1
0
Fork 0

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (48 commits)
  DMAENGINE: move COH901318 to arch_initcall
  dma: imx-dma: fix signedness bug
  dma/timberdale: simplify conditional
  ste_dma40: remove channel_type
  ste_dma40: remove enum for endianess
  ste_dma40: remove TIM_FOR_LINK option
  ste_dma40: move mode_opt to separate config
  ste_dma40: move channel mode to a separate field
  ste_dma40: move priority to separate field
  ste_dma40: add variable to indicate valid dma_cfg
  async_tx: make async_tx channel switching opt-in
  move async raid6 test to lib/Kconfig.debug
  dmaengine: Add Freescale i.MX1/21/27 DMA driver
  intel_mid_dma: change the slave interface
  intel_mid_dma: fix the WARN_ONs
  intel_mid_dma: Add sg list support to DMA driver
  intel_mid_dma: Allow DMAC2 to share interrupt
  intel_mid_dma: Allow IRQ sharing
  intel_mid_dma: Add runtime PM support
  DMAENGINE: define a dummy filter function for ste_dma40
  ...
hifive-unleashed-5.1
Linus Torvalds 2010-10-27 19:04:36 -07:00
commit e3e1288e86
25 changed files with 5723 additions and 1166 deletions

View File

@ -27,6 +27,8 @@
#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) #define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
#include <mach/dma.h>
#define IMX_DMA_CHANNELS 16 #define IMX_DMA_CHANNELS 16
#define DMA_MODE_READ 0 #define DMA_MODE_READ 0
@ -96,12 +98,6 @@ int imx_dma_request(int channel, const char *name);
void imx_dma_free(int channel); void imx_dma_free(int channel);
enum imx_dma_prio {
DMA_PRIO_HIGH = 0,
DMA_PRIO_MEDIUM = 1,
DMA_PRIO_LOW = 2
};
int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio); int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
#endif /* __MACH_DMA_V1_H__ */ #endif /* __MACH_DMA_V1_H__ */

View File

@ -208,35 +208,25 @@ static struct resource dma40_resources[] = {
/* Default configuration for physcial memcpy */ /* Default configuration for physcial memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_phy = { struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
.channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE | .mode = STEDMA40_MODE_PHYSICAL,
STEDMA40_LOW_PRIORITY_CHANNEL |
STEDMA40_PCHAN_BASIC_MODE),
.dir = STEDMA40_MEM_TO_MEM, .dir = STEDMA40_MEM_TO_MEM,
.src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH, .src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_PHY_1, .src_info.psize = STEDMA40_PSIZE_PHY_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
.dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_PHY_1, .dst_info.psize = STEDMA40_PSIZE_PHY_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
}; };
/* Default configuration for logical memcpy */ /* Default configuration for logical memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_log = { struct stedma40_chan_cfg dma40_memcpy_conf_log = {
.channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE |
STEDMA40_LOW_PRIORITY_CHANNEL |
STEDMA40_LCHAN_SRC_LOG_DST_LOG |
STEDMA40_NO_TIM_FOR_LINK),
.dir = STEDMA40_MEM_TO_MEM, .dir = STEDMA40_MEM_TO_MEM,
.src_info.endianess = STEDMA40_LITTLE_ENDIAN,
.src_info.data_width = STEDMA40_BYTE_WIDTH, .src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.psize = STEDMA40_PSIZE_LOG_1, .src_info.psize = STEDMA40_PSIZE_LOG_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
.dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
.dst_info.data_width = STEDMA40_BYTE_WIDTH, .dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.psize = STEDMA40_PSIZE_LOG_1, .dst_info.psize = STEDMA40_PSIZE_LOG_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
@ -269,7 +259,6 @@ static struct stedma40_platform_data dma40_plat_data = {
.memcpy_len = ARRAY_SIZE(dma40_memcpy_event), .memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
.memcpy_conf_phy = &dma40_memcpy_conf_phy, .memcpy_conf_phy = &dma40_memcpy_conf_phy,
.memcpy_conf_log = &dma40_memcpy_conf_log, .memcpy_conf_log = &dma40_memcpy_conf_log,
.llis_per_log = 8,
.disabled_channels = {-1}, .disabled_channels = {-1},
}; };

View File

@ -0,0 +1,67 @@
/*
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARCH_MXC_DMA_H__
#define __ASM_ARCH_MXC_DMA_H__
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
/*
* This enumerates peripheral types. Used for SDMA.
*/
enum sdma_peripheral_type {
IMX_DMATYPE_SSI, /* MCU domain SSI */
IMX_DMATYPE_SSI_SP, /* Shared SSI */
IMX_DMATYPE_MMC, /* MMC */
IMX_DMATYPE_SDHC, /* SDHC */
IMX_DMATYPE_UART, /* MCU domain UART */
IMX_DMATYPE_UART_SP, /* Shared UART */
IMX_DMATYPE_FIRI, /* FIRI */
IMX_DMATYPE_CSPI, /* MCU domain CSPI */
IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
IMX_DMATYPE_SIM, /* SIM */
IMX_DMATYPE_ATA, /* ATA */
IMX_DMATYPE_CCM, /* CCM */
IMX_DMATYPE_EXT, /* External peripheral */
IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
IMX_DMATYPE_DSP, /* DSP */
IMX_DMATYPE_MEMORY, /* Memory */
IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
IMX_DMATYPE_SPDIF, /* SPDIF */
IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
IMX_DMATYPE_ASRC, /* ASRC */
IMX_DMATYPE_ESAI, /* ESAI */
};
enum imx_dma_prio {
DMA_PRIO_HIGH = 0,
DMA_PRIO_MEDIUM = 1,
DMA_PRIO_LOW = 2
};
struct imx_dma_data {
int dma_request; /* DMA request line */
enum sdma_peripheral_type peripheral_type;
int priority;
};
static inline int imx_dma_is_ipu(struct dma_chan *chan)
{
return !strcmp(dev_name(chan->device->dev), "ipu-core");
}
static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
{
return !strcmp(dev_name(chan->device->dev), "imx-sdma") ||
!strcmp(dev_name(chan->device->dev), "imx-dma");
}
#endif

View File

@ -0,0 +1,17 @@
#ifndef __MACH_MXC_SDMA_H__
#define __MACH_MXC_SDMA_H__
/**
* struct sdma_platform_data - platform specific data for SDMA engine
*
* @sdma_version The version of this SDMA engine
* @cpu_name used to generate the firmware name
* @to_version CPU Tape out version
*/
struct sdma_platform_data {
int sdma_version;
char *cpu_name;
int to_version;
};
#endif /* __MACH_MXC_SDMA_H__ */

View File

@ -1,10 +1,8 @@
/* /*
* arch/arm/plat-nomadik/include/plat/ste_dma40.h * Copyright (C) ST-Ericsson SA 2007-2010
* * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
* Copyright (C) ST-Ericsson 2007-2010 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2 * License terms: GNU General Public License (GPL) version 2
* Author: Per Friden <per.friden@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/ */
@ -19,37 +17,20 @@
#define STEDMA40_DEV_DST_MEMORY (-1) #define STEDMA40_DEV_DST_MEMORY (-1)
#define STEDMA40_DEV_SRC_MEMORY (-1) #define STEDMA40_DEV_SRC_MEMORY (-1)
/* enum stedma40_mode {
* Description of bitfields of channel_type variable is available in STEDMA40_MODE_LOGICAL = 0,
* the info structure. STEDMA40_MODE_PHYSICAL,
*/ STEDMA40_MODE_OPERATION,
};
/* Priority */ enum stedma40_mode_opt {
#define STEDMA40_INFO_PRIO_TYPE_POS 2 STEDMA40_PCHAN_BASIC_MODE = 0,
#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS) STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS) STEDMA40_PCHAN_MODULO_MODE,
STEDMA40_PCHAN_DOUBLE_DST_MODE,
/* Mode */ STEDMA40_LCHAN_SRC_PHY_DST_LOG,
#define STEDMA40_INFO_CH_MODE_TYPE_POS 6 STEDMA40_LCHAN_SRC_LOG_DST_PHY,
#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS) };
#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
/* Mode options */
#define STEDMA40_INFO_CH_MODE_OPT_POS 8
#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
/* Interrupt */
#define STEDMA40_INFO_TIM_POS 10
#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
/* End of channel_type configuration */
#define STEDMA40_ESIZE_8_BIT 0x0 #define STEDMA40_ESIZE_8_BIT 0x0
#define STEDMA40_ESIZE_16_BIT 0x1 #define STEDMA40_ESIZE_16_BIT 0x1
@ -72,16 +53,14 @@
#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8 #define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16 #define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
/* Maximum number of possible physical channels */
#define STEDMA40_MAX_PHYS 32
enum stedma40_flow_ctrl { enum stedma40_flow_ctrl {
STEDMA40_NO_FLOW_CTRL, STEDMA40_NO_FLOW_CTRL,
STEDMA40_FLOW_CTRL, STEDMA40_FLOW_CTRL,
}; };
enum stedma40_endianess {
STEDMA40_LITTLE_ENDIAN,
STEDMA40_BIG_ENDIAN
};
enum stedma40_periph_data_width { enum stedma40_periph_data_width {
STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT, STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT, STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
@ -89,34 +68,40 @@ enum stedma40_periph_data_width {
STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
}; };
struct stedma40_half_channel_info {
enum stedma40_endianess endianess;
enum stedma40_periph_data_width data_width;
int psize;
enum stedma40_flow_ctrl flow_ctrl;
};
enum stedma40_xfer_dir { enum stedma40_xfer_dir {
STEDMA40_MEM_TO_MEM, STEDMA40_MEM_TO_MEM = 1,
STEDMA40_MEM_TO_PERIPH, STEDMA40_MEM_TO_PERIPH,
STEDMA40_PERIPH_TO_MEM, STEDMA40_PERIPH_TO_MEM,
STEDMA40_PERIPH_TO_PERIPH STEDMA40_PERIPH_TO_PERIPH
}; };
/**
* struct stedma40_chan_cfg - dst/src channel configuration
*
* @big_endian: true if the src/dst should be read as big endian
* @data_width: Data width of the src/dst hardware
* @p_size: Burst size
* @flow_ctrl: Flow control on/off.
*/
struct stedma40_half_channel_info {
bool big_endian;
enum stedma40_periph_data_width data_width;
int psize;
enum stedma40_flow_ctrl flow_ctrl;
};
/** /**
* struct stedma40_chan_cfg - Structure to be filled by client drivers. * struct stedma40_chan_cfg - Structure to be filled by client drivers.
* *
* @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
* @channel_type: priority, mode, mode options and interrupt configuration. * @high_priority: true if high-priority
* @mode: channel mode: physical, logical, or operation
* @mode_opt: options for the chosen channel mode
* @src_dev_type: Src device type * @src_dev_type: Src device type
* @dst_dev_type: Dst device type * @dst_dev_type: Dst device type
* @src_info: Parameters for dst half channel * @src_info: Parameters for dst half channel
* @dst_info: Parameters for dst half channel * @dst_info: Parameters for dst half channel
* @pre_transfer_data: Data to be passed on to the pre_transfer() function.
* @pre_transfer: Callback used if needed before preparation of transfer.
* Only called if device is set. size of bytes to transfer
* (in case of multiple element transfer size is size of the first element).
* *
* *
* This structure has to be filled by the client drivers. * This structure has to be filled by the client drivers.
@ -125,15 +110,13 @@ enum stedma40_xfer_dir {
*/ */
struct stedma40_chan_cfg { struct stedma40_chan_cfg {
enum stedma40_xfer_dir dir; enum stedma40_xfer_dir dir;
unsigned int channel_type; bool high_priority;
enum stedma40_mode mode;
enum stedma40_mode_opt mode_opt;
int src_dev_type; int src_dev_type;
int dst_dev_type; int dst_dev_type;
struct stedma40_half_channel_info src_info; struct stedma40_half_channel_info src_info;
struct stedma40_half_channel_info dst_info; struct stedma40_half_channel_info dst_info;
void *pre_transfer_data;
int (*pre_transfer) (struct dma_chan *chan,
void *data,
int size);
}; };
/** /**
@ -146,7 +129,6 @@ struct stedma40_chan_cfg {
* @memcpy_len: length of memcpy * @memcpy_len: length of memcpy
* @memcpy_conf_phy: default configuration of physical channel memcpy * @memcpy_conf_phy: default configuration of physical channel memcpy
* @memcpy_conf_log: default configuration of logical channel memcpy * @memcpy_conf_log: default configuration of logical channel memcpy
* @llis_per_log: number of max linked list items per logical channel
* @disabled_channels: A vector, ending with -1, that marks physical channels * @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver. * that are for different reasons not available for the driver.
*/ */
@ -158,23 +140,10 @@ struct stedma40_platform_data {
u32 memcpy_len; u32 memcpy_len;
struct stedma40_chan_cfg *memcpy_conf_phy; struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log; struct stedma40_chan_cfg *memcpy_conf_log;
unsigned int llis_per_log; int disabled_channels[STEDMA40_MAX_PHYS];
int disabled_channels[8];
}; };
/** #ifdef CONFIG_STE_DMA40
* setdma40_set_psize() - Used for changing the package size of an
* already configured dma channel.
*
* @chan: dmaengine handle
* @src_psize: new package side for src. (STEDMA40_PSIZE*)
* @src_psize: new package side for dst. (STEDMA40_PSIZE*)
*
* returns 0 on ok, otherwise negative error number.
*/
int stedma40_set_psize(struct dma_chan *chan,
int src_psize,
int dst_psize);
/** /**
* stedma40_filter() - Provides stedma40_chan_cfg to the * stedma40_filter() - Provides stedma40_chan_cfg to the
@ -237,4 +206,21 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
direction, flags); direction, flags);
} }
#else
static inline bool stedma40_filter(struct dma_chan *chan, void *data)
{
return false;
}
static inline struct
dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
dma_addr_t addr,
unsigned int size,
enum dma_data_direction direction,
unsigned long flags)
{
return NULL;
}
#endif
#endif #endif

View File

@ -1,137 +0,0 @@
/*
* Freescale MPC83XX / MPC85XX DMA Controller
*
* Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
#define __ARCH_POWERPC_ASM_FSLDMA_H__
#include <linux/slab.h>
#include <linux/dmaengine.h>
/*
* Definitions for the Freescale DMA controller's DMA_SLAVE implemention
*
* The Freescale DMA_SLAVE implementation was designed to handle many-to-many
* transfers. An example usage would be an accelerated copy between two
* scatterlists. Another example use would be an accelerated copy from
* multiple non-contiguous device buffers into a single scatterlist.
*
* A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
* structure contains a list of hardware addresses that should be copied
* to/from the scatterlist passed into device_prep_slave_sg(). The structure
* also has some fields to enable hardware-specific features.
*/
/**
* struct fsl_dma_hw_addr
* @entry: linked list entry
* @address: the hardware address
* @length: length to transfer
*
* Holds a single physical hardware address / length pair for use
* with the DMAEngine DMA_SLAVE API.
*/
struct fsl_dma_hw_addr {
struct list_head entry;
dma_addr_t address;
size_t length;
};
/**
* struct fsl_dma_slave
* @addresses: a linked list of struct fsl_dma_hw_addr structures
* @request_count: value for DMA request count
* @src_loop_size: setup and enable constant source-address DMA transfers
* @dst_loop_size: setup and enable constant destination address DMA transfers
* @external_start: enable externally started DMA transfers
* @external_pause: enable externally paused DMA transfers
*
* Holds a list of address / length pairs for use with the DMAEngine
* DMA_SLAVE API implementation for the Freescale DMA controller.
*/
struct fsl_dma_slave {
/* List of hardware address/length pairs */
struct list_head addresses;
/* Support for extra controller features */
unsigned int request_count;
unsigned int src_loop_size;
unsigned int dst_loop_size;
bool external_start;
bool external_pause;
};
/**
* fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
* @slave: the &struct fsl_dma_slave to add to
* @address: the hardware address to add
* @length: the length of bytes to transfer from @address
*
* Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
* success, -ERRNO otherwise.
*/
static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
dma_addr_t address, size_t length)
{
struct fsl_dma_hw_addr *addr;
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
INIT_LIST_HEAD(&addr->entry);
addr->address = address;
addr->length = length;
list_add_tail(&addr->entry, &slave->addresses);
return 0;
}
/**
* fsl_dma_slave_free - free a struct fsl_dma_slave
* @slave: the struct fsl_dma_slave to free
*
* Free a struct fsl_dma_slave and all associated address/length pairs
*/
static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
{
struct fsl_dma_hw_addr *addr, *tmp;
if (slave) {
list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
list_del(&addr->entry);
kfree(addr);
}
kfree(slave);
}
}
/**
* fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
* @gfp: the flags to pass to kmalloc when allocating this structure
*
* Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
* struct fsl_dma_slave on success, or NULL on failure.
*/
static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
{
struct fsl_dma_slave *slave;
slave = kzalloc(sizeof(*slave), gfp);
if (!slave)
return NULL;
INIT_LIST_HEAD(&slave->addresses);
return slave;
}
#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */

View File

@ -24,19 +24,6 @@ config ASYNC_RAID6_RECOV
select ASYNC_PQ select ASYNC_PQ
select ASYNC_XOR select ASYNC_XOR
config ASYNC_RAID6_TEST
tristate "Self test for hardware accelerated raid6 recovery"
depends on ASYNC_RAID6_RECOV
select ASYNC_MEMCPY
---help---
This is a one-shot self test that permutes through the
recovery of all the possible two disk failure scenarios for a
N-disk array. Recovery is performed with the asynchronous
raid6 recovery routines, and will optionally use an offload
engine if one is available.
If unsure, say N.
config ASYNC_TX_DISABLE_PQ_VAL_DMA config ASYNC_TX_DISABLE_PQ_VAL_DMA
bool bool

View File

@ -46,15 +46,22 @@ config INTEL_MID_DMAC
If unsure, say N. If unsure, say N.
config ASYNC_TX_DISABLE_CHANNEL_SWITCH config ASYNC_TX_ENABLE_CHANNEL_SWITCH
bool bool
config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA && EXPERIMENTAL
select DMA_ENGINE
help
Platform has a PL08x DMAC device
which can provide DMA engine support
config INTEL_IOATDMA config INTEL_IOATDMA
tristate "Intel I/OAT DMA support" tristate "Intel I/OAT DMA support"
depends on PCI && X86 depends on PCI && X86
select DMA_ENGINE select DMA_ENGINE
select DCA select DCA
select ASYNC_TX_DISABLE_CHANNEL_SWITCH
select ASYNC_TX_DISABLE_PQ_VAL_DMA select ASYNC_TX_DISABLE_PQ_VAL_DMA
select ASYNC_TX_DISABLE_XOR_VAL_DMA select ASYNC_TX_DISABLE_XOR_VAL_DMA
help help
@ -69,6 +76,7 @@ config INTEL_IOP_ADMA
tristate "Intel IOP ADMA support" tristate "Intel IOP ADMA support"
depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
select DMA_ENGINE select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help help
Enable support for the Intel(R) IOP Series RAID engines. Enable support for the Intel(R) IOP Series RAID engines.
@ -93,6 +101,7 @@ config FSL_DMA
tristate "Freescale Elo and Elo Plus DMA support" tristate "Freescale Elo and Elo Plus DMA support"
depends on FSL_SOC depends on FSL_SOC
select DMA_ENGINE select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help--- ---help---
Enable support for the Freescale Elo and Elo Plus DMA controllers. Enable support for the Freescale Elo and Elo Plus DMA controllers.
The Elo is the DMA controller on some 82xx and 83xx parts, and the The Elo is the DMA controller on some 82xx and 83xx parts, and the
@ -109,6 +118,7 @@ config MV_XOR
bool "Marvell XOR engine support" bool "Marvell XOR engine support"
depends on PLAT_ORION depends on PLAT_ORION
select DMA_ENGINE select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help--- ---help---
Enable support for the Marvell XOR engine. Enable support for the Marvell XOR engine.
@ -166,6 +176,7 @@ config AMCC_PPC440SPE_ADMA
depends on 440SPe || 440SP depends on 440SPe || 440SP
select DMA_ENGINE select DMA_ENGINE
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help help
Enable support for the AMCC PPC440SPe RAID engines. Enable support for the AMCC PPC440SPe RAID engines.
@ -195,6 +206,22 @@ config PCH_DMA
help help
Enable support for the Topcliff PCH DMA engine. Enable support for the Topcliff PCH DMA engine.
config IMX_SDMA
tristate "i.MX SDMA support"
depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
Freescale i.MX25/31/35/51 chips.
config IMX_DMA
tristate "i.MX DMA support"
depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27
select DMA_ENGINE
help
Support the i.MX DMA engine. This engine is integrated into
Freescale i.MX1/21/27 chips.
config DMA_ENGINE config DMA_ENGINE
bool bool

View File

@ -21,7 +21,10 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o

File diff suppressed because it is too large Load Diff

View File

@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
{ {
return platform_driver_probe(&coh901318_driver, coh901318_probe); return platform_driver_probe(&coh901318_driver, coh901318_probe);
} }
subsys_initcall(coh901318_init); arch_initcall(coh901318_init);
void __exit coh901318_exit(void) void __exit coh901318_exit(void)
{ {

View File

@ -690,8 +690,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_memset); !device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt); !device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg); !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control); !device->device_control);
@ -702,7 +706,7 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(!device->dev); BUG_ON(!device->dev);
/* note: this only matters in the /* note: this only matters in the
* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/ */
if (device_has_all_tx_types(device)) if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask); dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
@ -976,7 +980,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan) struct dma_chan *chan)
{ {
tx->chan = chan; tx->chan = chan;
#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock); spin_lock_init(&tx->lock);
#endif #endif
} }

View File

@ -35,9 +35,10 @@
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <asm/fsldma.h>
#include "fsldma.h" #include "fsldma.h"
static const char msg_ld_oom[] = "No free memory for link descriptor\n";
static void dma_init(struct fsldma_chan *chan) static void dma_init(struct fsldma_chan *chan)
{ {
/* Reset the channel */ /* Reset the channel */
@ -499,7 +500,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
new = fsl_dma_alloc_descriptor(chan); new = fsl_dma_alloc_descriptor(chan);
if (!new) { if (!new) {
dev_err(chan->dev, "No free memory for link descriptor\n"); dev_err(chan->dev, msg_ld_oom);
return NULL; return NULL;
} }
@ -536,8 +537,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Allocate the link descriptor from DMA pool */ /* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(chan); new = fsl_dma_alloc_descriptor(chan);
if (!new) { if (!new) {
dev_err(chan->dev, dev_err(chan->dev, msg_ld_oom);
"No free memory for link descriptor\n");
goto fail; goto fail;
} }
#ifdef FSL_DMA_LD_DEBUG #ifdef FSL_DMA_LD_DEBUG
@ -583,6 +583,125 @@ fail:
return NULL; return NULL;
} }
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
struct fsldma_chan *chan = to_fsl_chan(dchan);
size_t dst_avail, src_avail;
dma_addr_t dst, src;
size_t len;
/* basic sanity checks */
if (dst_nents == 0 || src_nents == 0)
return NULL;
if (dst_sg == NULL || src_sg == NULL)
return NULL;
/*
* TODO: should we check that both scatterlists have the same
* TODO: number of bytes in total? Is that really an error?
*/
/* get prepared for the loop */
dst_avail = sg_dma_len(dst_sg);
src_avail = sg_dma_len(src_sg);
/* run until we are out of scatterlist entries */
while (true) {
/* create the largest transaction possible */
len = min_t(size_t, src_avail, dst_avail);
len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
if (len == 0)
goto fetch;
dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
/* allocate and populate the descriptor */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
dev_err(chan->dev, msg_ld_oom);
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
set_desc_cnt(chan, &new->hw, len);
set_desc_src(chan, &new->hw, src);
set_desc_dst(chan, &new->hw, dst);
if (!first)
first = new;
else
set_desc_next(chan, &prev->hw, new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
prev = new;
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &first->tx_list);
/* update metadata */
dst_avail -= len;
src_avail -= len;
fetch:
/* fetch the next dst scatterlist entry */
if (dst_avail == 0) {
/* no more entries: we're done */
if (dst_nents == 0)
break;
/* fetch the next entry: if there are no more: done */
dst_sg = sg_next(dst_sg);
if (dst_sg == NULL)
break;
dst_nents--;
dst_avail = sg_dma_len(dst_sg);
}
/* fetch the next src scatterlist entry */
if (src_avail == 0) {
/* no more entries: we're done */
if (src_nents == 0)
break;
/* fetch the next entry: if there are no more: done */
src_sg = sg_next(src_sg);
if (src_sg == NULL)
break;
src_nents--;
src_avail = sg_dma_len(src_sg);
}
}
new->async_tx.flags = flags; /* client is in control of this ack */
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);
return &first->async_tx;
fail:
if (!first)
return NULL;
fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
/** /**
* fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @chan: DMA channel * @chan: DMA channel
@ -599,207 +718,70 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_data_direction direction, unsigned long flags) enum dma_data_direction direction, unsigned long flags)
{ {
struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
struct fsl_dma_slave *slave;
size_t copy;
int i;
struct scatterlist *sg;
size_t sg_used;
size_t hw_used;
struct fsl_dma_hw_addr *hw;
dma_addr_t dma_dst, dma_src;
if (!dchan)
return NULL;
if (!dchan->private)
return NULL;
chan = to_fsl_chan(dchan);
slave = dchan->private;
if (list_empty(&slave->addresses))
return NULL;
hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
hw_used = 0;
/* /*
* Build the hardware transaction to copy from the scatterlist to * This operation is not supported on the Freescale DMA controller
* the hardware, or from the hardware to the scatterlist
* *
* If you are copying from the hardware to the scatterlist and it * However, we need to provide the function pointer to allow the
* takes two hardware entries to fill an entire page, then both * device_control() method to work.
* hardware entries will be coalesced into the same page
*
* If you are copying from the scatterlist to the hardware and a
* single page can fill two hardware entries, then the data will
* be read out of the page into the first hardware entry, and so on
*/ */
for_each_sg(sgl, sg, sg_len, i) {
sg_used = 0;
/* Loop until the entire scatterlist entry is used */
while (sg_used < sg_dma_len(sg)) {
/*
* If we've used up the current hardware address/length
* pair, we need to load a new one
*
* This is done in a while loop so that descriptors with
* length == 0 will be skipped
*/
while (hw_used >= hw->length) {
/*
* If the current hardware entry is the last
* entry in the list, we're finished
*/
if (list_is_last(&hw->entry, &slave->addresses))
goto finished;
/* Get the next hardware address/length pair */
hw = list_entry(hw->entry.next,
struct fsl_dma_hw_addr, entry);
hw_used = 0;
}
/* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(chan);
if (!new) {
dev_err(chan->dev, "No free memory for "
"link descriptor\n");
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
/*
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the DMA controller limit
*/
copy = min_t(size_t, sg_dma_len(sg) - sg_used,
hw->length - hw_used);
copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
/*
* DMA_FROM_DEVICE
* from the hardware to the scatterlist
*
* DMA_TO_DEVICE
* from the scatterlist to the hardware
*/
if (direction == DMA_FROM_DEVICE) {
dma_src = hw->address + hw_used;
dma_dst = sg_dma_address(sg) + sg_used;
} else {
dma_src = sg_dma_address(sg) + sg_used;
dma_dst = hw->address + hw_used;
}
/* Fill in the descriptor */
set_desc_cnt(chan, &new->hw, copy);
set_desc_src(chan, &new->hw, dma_src);
set_desc_dst(chan, &new->hw, dma_dst);
/*
* If this is not the first descriptor, chain the
* current descriptor after the previous descriptor
*/
if (!first) {
first = new;
} else {
set_desc_next(chan, &prev->hw,
new->async_tx.phys);
}
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
prev = new;
sg_used += copy;
hw_used += copy;
/* Insert the link descriptor into the LD ring */
list_add_tail(&new->node, &first->tx_list);
}
}
finished:
/* All of the hardware address/length pairs had length == 0 */
if (!first || !new)
return NULL;
new->async_tx.flags = flags;
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list */
set_ld_eol(chan, new);
/* Enable extra controller features */
if (chan->set_src_loop_size)
chan->set_src_loop_size(chan, slave->src_loop_size);
if (chan->set_dst_loop_size)
chan->set_dst_loop_size(chan, slave->dst_loop_size);
if (chan->toggle_ext_start)
chan->toggle_ext_start(chan, slave->external_start);
if (chan->toggle_ext_pause)
chan->toggle_ext_pause(chan, slave->external_pause);
if (chan->set_request_count)
chan->set_request_count(chan, slave->request_count);
return &first->async_tx;
fail:
/* If first was not set, then we failed to allocate the very first
* descriptor, and we're done */
if (!first)
return NULL;
/*
* First is set, so all of the descriptors we allocated have been added
* to first->tx_list, INCLUDING "first" itself. Therefore we
* must traverse the list backwards freeing each descriptor in turn
*
* We're re-using variables for the loop, oh well
*/
fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL; return NULL;
} }
static int fsl_dma_device_control(struct dma_chan *dchan, static int fsl_dma_device_control(struct dma_chan *dchan,
enum dma_ctrl_cmd cmd, unsigned long arg) enum dma_ctrl_cmd cmd, unsigned long arg)
{ {
struct dma_slave_config *config;
struct fsldma_chan *chan; struct fsldma_chan *chan;
unsigned long flags; unsigned long flags;
int size;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
if (!dchan) if (!dchan)
return -EINVAL; return -EINVAL;
chan = to_fsl_chan(dchan); chan = to_fsl_chan(dchan);
/* Halt the DMA engine */ switch (cmd) {
dma_halt(chan); case DMA_TERMINATE_ALL:
/* Halt the DMA engine */
dma_halt(chan);
spin_lock_irqsave(&chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
/* Remove and free all of the descriptors in the LD queue */ /* Remove and free all of the descriptors in the LD queue */
fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running); fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
return 0;
case DMA_SLAVE_CONFIG:
config = (struct dma_slave_config *)arg;
/* make sure the channel supports setting burst size */
if (!chan->set_request_count)
return -ENXIO;
/* we set the controller burst size depending on direction */
if (config->direction == DMA_TO_DEVICE)
size = config->dst_addr_width * config->dst_maxburst;
else
size = config->src_addr_width * config->src_maxburst;
chan->set_request_count(chan, size);
return 0;
case FSLDMA_EXTERNAL_START:
/* make sure the channel supports external start */
if (!chan->toggle_ext_start)
return -ENXIO;
chan->toggle_ext_start(chan, arg);
return 0;
default:
return -ENXIO;
}
return 0; return 0;
} }
@ -1327,11 +1309,13 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
dma_cap_set(DMA_SG, fdev->common.cap_mask);
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
fdev->common.device_tx_status = fsl_tx_status; fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;

View File

@ -0,0 +1,424 @@
/*
* drivers/dma/imx-dma.c
*
* This file contains a driver for the Freescale i.MX DMA engine
* found on i.MX1/21/27
*
* Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <asm/irq.h>
#include <mach/dma-v1.h>
#include <mach/hardware.h>
struct imxdma_channel {
struct imxdma_engine *imxdma;
unsigned int channel;
unsigned int imxdma_channel;
enum dma_slave_buswidth word_size;
dma_addr_t per_address;
u32 watermark_level;
struct dma_chan chan;
spinlock_t lock;
struct dma_async_tx_descriptor desc;
dma_cookie_t last_completed;
enum dma_status status;
int dma_request;
struct scatterlist *sg_list;
};
#define MAX_DMA_CHANNELS 8
struct imxdma_engine {
struct device *dev;
struct dma_device dma_device;
struct imxdma_channel channel[MAX_DMA_CHANNELS];
};
static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
{
return container_of(chan, struct imxdma_channel, chan);
}
static void imxdma_handle(struct imxdma_channel *imxdmac)
{
if (imxdmac->desc.callback)
imxdmac->desc.callback(imxdmac->desc.callback_param);
imxdmac->last_completed = imxdmac->desc.cookie;
}
static void imxdma_irq_handler(int channel, void *data)
{
struct imxdma_channel *imxdmac = data;
imxdmac->status = DMA_SUCCESS;
imxdma_handle(imxdmac);
}
static void imxdma_err_handler(int channel, void *data, int error)
{
struct imxdma_channel *imxdmac = data;
imxdmac->status = DMA_ERROR;
imxdma_handle(imxdmac);
}
static void imxdma_progression(int channel, void *data,
struct scatterlist *sg)
{
struct imxdma_channel *imxdmac = data;
imxdmac->status = DMA_SUCCESS;
imxdma_handle(imxdmac);
}
static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct dma_slave_config *dmaengine_cfg = (void *)arg;
int ret;
unsigned int mode = 0;
switch (cmd) {
case DMA_TERMINATE_ALL:
imxdmac->status = DMA_ERROR;
imx_dma_disable(imxdmac->imxdma_channel);
return 0;
case DMA_SLAVE_CONFIG:
if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
} else {
imxdmac->per_address = dmaengine_cfg->dst_addr;
imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
imxdmac->word_size = dmaengine_cfg->dst_addr_width;
}
switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
mode = IMX_DMA_MEMSIZE_8;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
mode = IMX_DMA_MEMSIZE_16;
break;
default:
case DMA_SLAVE_BUSWIDTH_4_BYTES:
mode = IMX_DMA_MEMSIZE_32;
break;
}
ret = imx_dma_config_channel(imxdmac->imxdma_channel,
mode | IMX_DMA_TYPE_FIFO,
IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
imxdmac->dma_request, 1);
if (ret)
return ret;
imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
return 0;
default:
return -ENOSYS;
}
return -EINVAL;
}
static enum dma_status imxdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
dma_cookie_t last_used;
enum dma_status ret;
last_used = chan->cookie;
ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
return ret;
}
static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
{
dma_cookie_t cookie = imxdma->chan.cookie;
if (++cookie < 0)
cookie = 1;
imxdma->chan.cookie = cookie;
imxdma->desc.cookie = cookie;
return cookie;
}
static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
dma_cookie_t cookie;
spin_lock_irq(&imxdmac->lock);
cookie = imxdma_assign_cookie(imxdmac);
imx_dma_enable(imxdmac->imxdma_channel);
spin_unlock_irq(&imxdmac->lock);
return cookie;
}
static int imxdma_alloc_chan_resources(struct dma_chan *chan)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imx_dma_data *data = chan->private;
imxdmac->dma_request = data->dma_request;
dma_async_tx_descriptor_init(&imxdmac->desc, chan);
imxdmac->desc.tx_submit = imxdma_tx_submit;
/* txd.flags will be overwritten in prep funcs */
imxdmac->desc.flags = DMA_CTRL_ACK;
imxdmac->status = DMA_SUCCESS;
return 0;
}
static void imxdma_free_chan_resources(struct dma_chan *chan)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
imx_dma_disable(imxdmac->imxdma_channel);
if (imxdmac->sg_list) {
kfree(imxdmac->sg_list);
imxdmac->sg_list = NULL;
}
}
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct scatterlist *sg;
int i, ret, dma_length = 0;
unsigned int dmamode;
if (imxdmac->status == DMA_IN_PROGRESS)
return NULL;
imxdmac->status = DMA_IN_PROGRESS;
for_each_sg(sgl, sg, sg_len, i) {
dma_length += sg->length;
}
if (direction == DMA_FROM_DEVICE)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
dma_length, imxdmac->per_address, dmamode);
if (ret)
return NULL;
return &imxdmac->desc;
}
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_data_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
int i, ret;
unsigned int periods = buf_len / period_len;
unsigned int dmamode;
dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
__func__, imxdmac->channel, buf_len, period_len);
if (imxdmac->status == DMA_IN_PROGRESS)
return NULL;
imxdmac->status = DMA_IN_PROGRESS;
ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
imxdma_progression);
if (ret) {
dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
return NULL;
}
if (imxdmac->sg_list)
kfree(imxdmac->sg_list);
imxdmac->sg_list = kcalloc(periods + 1,
sizeof(struct scatterlist), GFP_KERNEL);
if (!imxdmac->sg_list)
return NULL;
sg_init_table(imxdmac->sg_list, periods);
for (i = 0; i < periods; i++) {
imxdmac->sg_list[i].page_link = 0;
imxdmac->sg_list[i].offset = 0;
imxdmac->sg_list[i].dma_address = dma_addr;
imxdmac->sg_list[i].length = period_len;
dma_addr += period_len;
}
/* close the loop */
imxdmac->sg_list[periods].offset = 0;
imxdmac->sg_list[periods].length = 0;
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
if (direction == DMA_FROM_DEVICE)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
if (ret)
return NULL;
return &imxdmac->desc;
}
static void imxdma_issue_pending(struct dma_chan *chan)
{
/*
* Nothing to do. We only have a single descriptor
*/
}
static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
int ret, i;
imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
if (!imxdma)
return -ENOMEM;
INIT_LIST_HEAD(&imxdma->dma_device.channels);
/* Initialize channel parameters */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct imxdma_channel *imxdmac = &imxdma->channel[i];
imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
DMA_PRIO_MEDIUM);
if ((int)imxdmac->channel < 0) {
ret = -ENODEV;
goto err_init;
}
imx_dma_setup_handlers(imxdmac->imxdma_channel,
imxdma_irq_handler, imxdma_err_handler, imxdmac);
imxdmac->imxdma = imxdma;
spin_lock_init(&imxdmac->lock);
dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
imxdmac->chan.device = &imxdma->dma_device;
imxdmac->chan.chan_id = i;
imxdmac->channel = i;
/* Add the channel to the DMAC list */
list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
}
imxdma->dev = &pdev->dev;
imxdma->dma_device.dev = &pdev->dev;
imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
imxdma->dma_device.device_tx_status = imxdma_tx_status;
imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
imxdma->dma_device.device_control = imxdma_control;
imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
platform_set_drvdata(pdev, imxdma);
ret = dma_async_device_register(&imxdma->dma_device);
if (ret) {
dev_err(&pdev->dev, "unable to register\n");
goto err_init;
}
return 0;
err_init:
while (i-- >= 0) {
struct imxdma_channel *imxdmac = &imxdma->channel[i];
imx_dma_free(imxdmac->imxdma_channel);
}
kfree(imxdma);
return ret;
}
static int __exit imxdma_remove(struct platform_device *pdev)
{
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
int i;
dma_async_device_unregister(&imxdma->dma_device);
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct imxdma_channel *imxdmac = &imxdma->channel[i];
imx_dma_free(imxdmac->imxdma_channel);
}
kfree(imxdma);
return 0;
}
static struct platform_driver imxdma_driver = {
.driver = {
.name = "imx-dma",
},
.remove = __exit_p(imxdma_remove),
};
static int __init imxdma_module_init(void)
{
return platform_driver_probe(&imxdma_driver, imxdma_probe);
}
subsys_initcall(imxdma_module_init);
MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX dma driver");
MODULE_LICENSE("GPL");

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,7 @@
*/ */
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/intel_mid_dma.h> #include <linux/intel_mid_dma.h>
#define MAX_CHAN 4 /*max ch across controllers*/ #define MAX_CHAN 4 /*max ch across controllers*/
@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size)
int byte_width = 0, block_ts = 0; int byte_width = 0, block_ts = 0;
switch (tx_width) { switch (tx_width) {
case LNW_DMA_WIDTH_8BIT: case DMA_SLAVE_BUSWIDTH_1_BYTE:
byte_width = 1; byte_width = 1;
break; break;
case LNW_DMA_WIDTH_16BIT: case DMA_SLAVE_BUSWIDTH_2_BYTES:
byte_width = 2; byte_width = 2;
break; break;
case LNW_DMA_WIDTH_32BIT: case DMA_SLAVE_BUSWIDTH_4_BYTES:
default: default:
byte_width = 4; byte_width = 4;
break; break;
@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
struct middma_device *mid = to_middma_device(midc->chan.device); struct middma_device *mid = to_middma_device(midc->chan.device);
/* channel is idle */ /* channel is idle */
if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) {
/*error*/ /*error*/
pr_err("ERR_MDMA: channel is busy in start\n"); pr_err("ERR_MDMA: channel is busy in start\n");
/* The tasklet will hopefully advance the queue... */ /* The tasklet will hopefully advance the queue... */
return; return;
} }
midc->busy = true;
/*write registers and en*/ /*write registers and en*/
iowrite32(first->sar, midc->ch_regs + SAR); iowrite32(first->sar, midc->ch_regs + SAR);
iowrite32(first->dar, midc->ch_regs + DAR); iowrite32(first->dar, midc->ch_regs + DAR);
iowrite32(first->lli_phys, midc->ch_regs + LLP);
iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH);
iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW);
iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW);
@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
(int)first->sar, (int)first->dar, first->cfg_hi, (int)first->sar, (int)first->dar, first->cfg_hi,
first->cfg_lo, first->ctl_hi, first->ctl_lo); first->cfg_lo, first->ctl_hi, first->ctl_lo);
first->status = DMA_IN_PROGRESS;
iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
first->status = DMA_IN_PROGRESS;
} }
/** /**
@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
{ {
struct dma_async_tx_descriptor *txd = &desc->txd; struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback_txd = NULL; dma_async_tx_callback callback_txd = NULL;
struct intel_mid_dma_lli *llitem;
void *param_txd = NULL; void *param_txd = NULL;
midc->completed = txd->cookie; midc->completed = txd->cookie;
callback_txd = txd->callback; callback_txd = txd->callback;
param_txd = txd->callback_param; param_txd = txd->callback_param;
list_move(&desc->desc_node, &midc->free_list); if (desc->lli != NULL) {
/*clear the DONE bit of completed LLI in memory*/
llitem = desc->lli + desc->current_lli;
llitem->ctl_hi &= CLEAR_DONE;
if (desc->current_lli < desc->lli_length-1)
(desc->current_lli)++;
else
desc->current_lli = 0;
}
spin_unlock_bh(&midc->lock); spin_unlock_bh(&midc->lock);
if (callback_txd) { if (callback_txd) {
pr_debug("MDMA: TXD callback set ... calling\n"); pr_debug("MDMA: TXD callback set ... calling\n");
callback_txd(param_txd); callback_txd(param_txd);
spin_lock_bh(&midc->lock); }
return; if (midc->raw_tfr) {
desc->status = DMA_SUCCESS;
if (desc->lli != NULL) {
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
}
list_move(&desc->desc_node, &midc->free_list);
midc->busy = false;
} }
spin_lock_bh(&midc->lock); spin_lock_bh(&midc->lock);
@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid,
/*tx is complete*/ /*tx is complete*/
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
if (desc->status == DMA_IN_PROGRESS) { if (desc->status == DMA_IN_PROGRESS)
desc->status = DMA_SUCCESS;
midc_descriptor_complete(midc, desc); midc_descriptor_complete(midc, desc);
}
} }
return; return;
} }
/**
* midc_lli_fill_sg - Helper function to convert
* SG list to Linked List Items.
*@midc: Channel
*@desc: DMA descriptor
*@sglist: Pointer to SG list
*@sglen: SG list length
*@flags: DMA transaction flags
*
* Walk through the SG list and convert the SG list into Linked
* List Items (LLI).
*/
static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
struct intel_mid_dma_desc *desc,
struct scatterlist *sglist,
unsigned int sglen,
unsigned int flags)
{
struct intel_mid_dma_slave *mids;
struct scatterlist *sg;
dma_addr_t lli_next, sg_phy_addr;
struct intel_mid_dma_lli *lli_bloc_desc;
union intel_mid_dma_ctl_lo ctl_lo;
union intel_mid_dma_ctl_hi ctl_hi;
int i;
pr_debug("MDMA: Entered midc_lli_fill_sg\n");
mids = midc->mid_slave;
lli_bloc_desc = desc->lli;
lli_next = desc->lli_phys;
ctl_lo.ctl_lo = desc->ctl_lo;
ctl_hi.ctl_hi = desc->ctl_hi;
for_each_sg(sglist, sg, sglen, i) {
/*Populate CTL_LOW and LLI values*/
if (i != sglen - 1) {
lli_next = lli_next +
sizeof(struct intel_mid_dma_lli);
} else {
/*Check for circular list, otherwise terminate LLI to ZERO*/
if (flags & DMA_PREP_CIRCULAR_LIST) {
pr_debug("MDMA: LLI is configured in circular mode\n");
lli_next = desc->lli_phys;
} else {
lli_next = 0;
ctl_lo.ctlx.llp_dst_en = 0;
ctl_lo.ctlx.llp_src_en = 0;
}
}
/*Populate CTL_HI values*/
ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
desc->width,
midc->dma->block_size);
/*Populate SAR and DAR values*/
sg_phy_addr = sg_phys(sg);
if (desc->dirn == DMA_TO_DEVICE) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
} else if (desc->dirn == DMA_FROM_DEVICE) {
lli_bloc_desc->sar = mids->dma_slave.src_addr;
lli_bloc_desc->dar = sg_phy_addr;
}
/*Copy values into block descriptor in system memroy*/
lli_bloc_desc->llp = lli_next;
lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo;
lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi;
lli_bloc_desc++;
}
/*Copy very first LLI values to descriptor*/
desc->ctl_lo = desc->lli->ctl_lo;
desc->ctl_hi = desc->lli->ctl_hi;
desc->sar = desc->lli->sar;
desc->dar = desc->lli->dar;
return 0;
}
/***************************************************************************** /*****************************************************************************
DMA engine callback Functions*/ DMA engine callback Functions*/
/** /**
@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.cookie = cookie; desc->txd.cookie = cookie;
if (list_empty(&midc->active_list)) { if (list_empty(&midc->active_list))
midc_dostart(midc, desc);
list_add_tail(&desc->desc_node, &midc->active_list); list_add_tail(&desc->desc_node, &midc->active_list);
} else { else
list_add_tail(&desc->desc_node, &midc->queue); list_add_tail(&desc->desc_node, &midc->queue);
}
midc_dostart(midc, desc);
spin_unlock_bh(&midc->lock); spin_unlock_bh(&midc->lock);
return cookie; return cookie;
@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
return ret; return ret;
} }
static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
{
struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
struct dma_slave_config *slave = (struct dma_slave_config *)arg;
struct intel_mid_dma_slave *mid_slave;
BUG_ON(!midc);
BUG_ON(!slave);
pr_debug("MDMA: slave control called\n");
mid_slave = to_intel_mid_dma_slave(slave);
BUG_ON(!mid_slave);
midc->mid_slave = mid_slave;
return 0;
}
/** /**
* intel_mid_dma_device_control - DMA device control * intel_mid_dma_device_control - DMA device control
* @chan: chan for DMA control * @chan: chan for DMA control
@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
struct middma_device *mid = to_middma_device(chan->device); struct middma_device *mid = to_middma_device(chan->device);
struct intel_mid_dma_desc *desc, *_desc; struct intel_mid_dma_desc *desc, *_desc;
LIST_HEAD(list); union intel_mid_dma_cfg_lo cfg_lo;
if (cmd == DMA_SLAVE_CONFIG)
return dma_slave_control(chan, arg);
if (cmd != DMA_TERMINATE_ALL) if (cmd != DMA_TERMINATE_ALL)
return -ENXIO; return -ENXIO;
spin_lock_bh(&midc->lock); spin_lock_bh(&midc->lock);
if (midc->in_use == false) { if (midc->busy == false) {
spin_unlock_bh(&midc->lock); spin_unlock_bh(&midc->lock);
return 0; return 0;
} }
list_splice_init(&midc->free_list, &list); /*Suspend and disable the channel*/
midc->descs_allocated = 0; cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW);
midc->slave = NULL; cfg_lo.cfgx.ch_susp = 1;
iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW);
iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN);
midc->busy = false;
/* Disable interrupts */ /* Disable interrupts */
disable_dma_interrupt(midc); disable_dma_interrupt(midc);
midc->descs_allocated = 0;
spin_unlock_bh(&midc->lock); spin_unlock_bh(&midc->lock);
list_for_each_entry_safe(desc, _desc, &list, desc_node) { list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
pr_debug("MDMA: freeing descriptor %p\n", desc); if (desc->lli != NULL) {
pci_pool_free(mid->dma_pool, desc, desc->txd.phys); pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
}
list_move(&desc->desc_node, &midc->free_list);
} }
return 0; return 0;
} }
/**
* intel_mid_dma_prep_slave_sg - Prep slave sg txn
* @chan: chan for DMA transfer
* @sgl: scatter gather list
* @sg_len: length of sg txn
* @direction: DMA transfer dirtn
* @flags: DMA flags
*
* Do DMA sg txn: NOT supported now
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
/*not supported now*/
return NULL;
}
/** /**
* intel_mid_dma_prep_memcpy - Prep memcpy txn * intel_mid_dma_prep_memcpy - Prep memcpy txn
@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
union intel_mid_dma_ctl_hi ctl_hi; union intel_mid_dma_ctl_hi ctl_hi;
union intel_mid_dma_cfg_lo cfg_lo; union intel_mid_dma_cfg_lo cfg_lo;
union intel_mid_dma_cfg_hi cfg_hi; union intel_mid_dma_cfg_hi cfg_hi;
enum intel_mid_dma_width width = 0; enum dma_slave_buswidth width;
pr_debug("MDMA: Prep for memcpy\n"); pr_debug("MDMA: Prep for memcpy\n");
WARN_ON(!chan); BUG_ON(!chan);
if (!len) if (!len)
return NULL; return NULL;
mids = chan->private;
WARN_ON(!mids);
midc = to_intel_mid_dma_chan(chan); midc = to_intel_mid_dma_chan(chan);
WARN_ON(!midc); BUG_ON(!midc);
mids = midc->mid_slave;
BUG_ON(!mids);
pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", pr_debug("MDMA:called for DMA %x CH %d Length %zu\n",
midc->dma->pci_id, midc->ch_id, len); midc->dma->pci_id, midc->ch_id, len);
pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); mids->cfg_mode, mids->dma_slave.direction,
mids->hs_mode, mids->dma_slave.src_addr_width);
/*calculate CFG_LO*/ /*calculate CFG_LO*/
if (mids->hs_mode == LNW_DMA_SW_HS) { if (mids->hs_mode == LNW_DMA_SW_HS) {
@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
if (midc->dma->pimr_mask) { if (midc->dma->pimr_mask) {
cfg_hi.cfgx.protctl = 0x0; /*default value*/ cfg_hi.cfgx.protctl = 0x0; /*default value*/
cfg_hi.cfgx.fifo_mode = 1; cfg_hi.cfgx.fifo_mode = 1;
if (mids->dirn == DMA_TO_DEVICE) { if (mids->dma_slave.direction == DMA_TO_DEVICE) {
cfg_hi.cfgx.src_per = 0; cfg_hi.cfgx.src_per = 0;
if (mids->device_instance == 0) if (mids->device_instance == 0)
cfg_hi.cfgx.dst_per = 3; cfg_hi.cfgx.dst_per = 3;
if (mids->device_instance == 1) if (mids->device_instance == 1)
cfg_hi.cfgx.dst_per = 1; cfg_hi.cfgx.dst_per = 1;
} else if (mids->dirn == DMA_FROM_DEVICE) { } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
if (mids->device_instance == 0) if (mids->device_instance == 0)
cfg_hi.cfgx.src_per = 2; cfg_hi.cfgx.src_per = 2;
if (mids->device_instance == 1) if (mids->device_instance == 1)
@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
/*calculate CTL_HI*/ /*calculate CTL_HI*/
ctl_hi.ctlx.reser = 0; ctl_hi.ctlx.reser = 0;
width = mids->src_width; ctl_hi.ctlx.done = 0;
width = mids->dma_slave.src_addr_width;
ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size);
pr_debug("MDMA:calc len %d for block size %d\n", pr_debug("MDMA:calc len %d for block size %d\n",
@ -560,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
/*calculate CTL_LO*/ /*calculate CTL_LO*/
ctl_lo.ctl_lo = 0; ctl_lo.ctl_lo = 0;
ctl_lo.ctlx.int_en = 1; ctl_lo.ctlx.int_en = 1;
ctl_lo.ctlx.dst_tr_width = mids->dst_width; ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
ctl_lo.ctlx.src_tr_width = mids->src_width; ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
ctl_lo.ctlx.dst_msize = mids->src_msize; ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
ctl_lo.ctlx.src_msize = mids->dst_msize; ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
ctl_lo.ctlx.tt_fc = 0; ctl_lo.ctlx.tt_fc = 0;
ctl_lo.ctlx.sinc = 0; ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 0; ctl_lo.ctlx.dinc = 0;
} else { } else {
if (mids->dirn == DMA_TO_DEVICE) { if (mids->dma_slave.direction == DMA_TO_DEVICE) {
ctl_lo.ctlx.sinc = 0; ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 2; ctl_lo.ctlx.dinc = 2;
ctl_lo.ctlx.tt_fc = 1; ctl_lo.ctlx.tt_fc = 1;
} else if (mids->dirn == DMA_FROM_DEVICE) { } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
ctl_lo.ctlx.sinc = 2; ctl_lo.ctlx.sinc = 2;
ctl_lo.ctlx.dinc = 0; ctl_lo.ctlx.dinc = 0;
ctl_lo.ctlx.tt_fc = 2; ctl_lo.ctlx.tt_fc = 2;
@ -597,7 +701,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
desc->ctl_lo = ctl_lo.ctl_lo; desc->ctl_lo = ctl_lo.ctl_lo;
desc->ctl_hi = ctl_hi.ctl_hi; desc->ctl_hi = ctl_hi.ctl_hi;
desc->width = width; desc->width = width;
desc->dirn = mids->dirn; desc->dirn = mids->dma_slave.direction;
desc->lli_phys = 0;
desc->lli = NULL;
desc->lli_pool = NULL;
return &desc->txd; return &desc->txd;
err_desc_get: err_desc_get:
@ -605,6 +712,85 @@ err_desc_get:
midc_desc_put(midc, desc); midc_desc_put(midc, desc);
return NULL; return NULL;
} }
/**
* intel_mid_dma_prep_slave_sg - Prep slave sg txn
* @chan: chan for DMA transfer
* @sgl: scatter gather list
* @sg_len: length of sg txn
* @direction: DMA transfer dirtn
* @flags: DMA flags
*
* Prepares LLI based periphral transfer
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct intel_mid_dma_chan *midc = NULL;
struct intel_mid_dma_slave *mids = NULL;
struct intel_mid_dma_desc *desc = NULL;
struct dma_async_tx_descriptor *txd = NULL;
union intel_mid_dma_ctl_lo ctl_lo;
pr_debug("MDMA: Prep for slave SG\n");
if (!sg_len) {
pr_err("MDMA: Invalid SG length\n");
return NULL;
}
midc = to_intel_mid_dma_chan(chan);
BUG_ON(!midc);
mids = midc->mid_slave;
BUG_ON(!mids);
if (!midc->dma->pimr_mask) {
pr_debug("MDMA: SG list is not supported by this controller\n");
return NULL;
}
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
sg_len, direction, flags);
txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
if (NULL == txd) {
pr_err("MDMA: Prep memcpy failed\n");
return NULL;
}
desc = to_intel_mid_dma_desc(txd);
desc->dirn = direction;
ctl_lo.ctl_lo = desc->ctl_lo;
ctl_lo.ctlx.llp_dst_en = 1;
ctl_lo.ctlx.llp_src_en = 1;
desc->ctl_lo = ctl_lo.ctl_lo;
desc->lli_length = sg_len;
desc->current_lli = 0;
/* DMA coherent memory pool for LLI descriptors*/
desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool",
midc->dma->pdev,
(sizeof(struct intel_mid_dma_lli)*sg_len),
32, 0);
if (NULL == desc->lli_pool) {
pr_err("MID_DMA:LLI pool create failed\n");
return NULL;
}
desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys);
if (!desc->lli) {
pr_err("MID_DMA: LLI alloc failed\n");
pci_pool_destroy(desc->lli_pool);
return NULL;
}
midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
if (flags & DMA_PREP_INTERRUPT) {
iowrite32(UNMASK_INTR_REG(midc->ch_id),
midc->dma_base + MASK_BLOCK);
pr_debug("MDMA:Enabled Block interrupt\n");
}
return &desc->txd;
}
/** /**
* intel_mid_dma_free_chan_resources - Frees dma resources * intel_mid_dma_free_chan_resources - Frees dma resources
@ -618,11 +804,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
struct middma_device *mid = to_middma_device(chan->device); struct middma_device *mid = to_middma_device(chan->device);
struct intel_mid_dma_desc *desc, *_desc; struct intel_mid_dma_desc *desc, *_desc;
if (true == midc->in_use) { if (true == midc->busy) {
/*trying to free ch in use!!!!!*/ /*trying to free ch in use!!!!!*/
pr_err("ERR_MDMA: trying to free ch in use\n"); pr_err("ERR_MDMA: trying to free ch in use\n");
} }
pm_runtime_put(&mid->pdev->dev);
spin_lock_bh(&midc->lock); spin_lock_bh(&midc->lock);
midc->descs_allocated = 0; midc->descs_allocated = 0;
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@ -639,6 +825,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
} }
spin_unlock_bh(&midc->lock); spin_unlock_bh(&midc->lock);
midc->in_use = false; midc->in_use = false;
midc->busy = false;
/* Disable CH interrupts */ /* Disable CH interrupts */
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
@ -659,11 +846,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
dma_addr_t phys; dma_addr_t phys;
int i = 0; int i = 0;
pm_runtime_get_sync(&mid->pdev->dev);
if (mid->state == SUSPENDED) {
if (dma_resume(mid->pdev)) {
pr_err("ERR_MDMA: resume failed");
return -EFAULT;
}
}
/* ASSERT: channel is idle */ /* ASSERT: channel is idle */
if (test_ch_en(mid->dma_base, midc->ch_id)) { if (test_ch_en(mid->dma_base, midc->ch_id)) {
/*ch is not idle*/ /*ch is not idle*/
pr_err("ERR_MDMA: ch not idle\n"); pr_err("ERR_MDMA: ch not idle\n");
pm_runtime_put(&mid->pdev->dev);
return -EIO; return -EIO;
} }
midc->completed = chan->cookie = 1; midc->completed = chan->cookie = 1;
@ -674,6 +870,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys);
if (!desc) { if (!desc) {
pr_err("ERR_MDMA: desc failed\n"); pr_err("ERR_MDMA: desc failed\n");
pm_runtime_put(&mid->pdev->dev);
return -ENOMEM; return -ENOMEM;
/*check*/ /*check*/
} }
@ -686,7 +883,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &midc->free_list); list_add_tail(&desc->desc_node, &midc->free_list);
} }
spin_unlock_bh(&midc->lock); spin_unlock_bh(&midc->lock);
midc->in_use = false; midc->in_use = true;
midc->busy = false;
pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i);
return i; return i;
} }
@ -715,7 +913,7 @@ static void dma_tasklet(unsigned long data)
{ {
struct middma_device *mid = NULL; struct middma_device *mid = NULL;
struct intel_mid_dma_chan *midc = NULL; struct intel_mid_dma_chan *midc = NULL;
u32 status; u32 status, raw_tfr, raw_block;
int i; int i;
mid = (struct middma_device *)data; mid = (struct middma_device *)data;
@ -724,8 +922,9 @@ static void dma_tasklet(unsigned long data)
return; return;
} }
pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id);
status = ioread32(mid->dma_base + RAW_TFR); raw_tfr = ioread32(mid->dma_base + RAW_TFR);
pr_debug("MDMA:RAW_TFR %x\n", status); raw_block = ioread32(mid->dma_base + RAW_BLOCK);
status = raw_tfr | raw_block;
status &= mid->intr_mask; status &= mid->intr_mask;
while (status) { while (status) {
/*txn interrupt*/ /*txn interrupt*/
@ -741,15 +940,23 @@ static void dma_tasklet(unsigned long data)
} }
pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
status, midc->ch_id, i); status, midc->ch_id, i);
midc->raw_tfr = raw_tfr;
midc->raw_block = raw_block;
spin_lock_bh(&midc->lock);
/*clearing this interrupts first*/ /*clearing this interrupts first*/
iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR);
iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); if (raw_block) {
iowrite32((1 << midc->ch_id),
spin_lock_bh(&midc->lock); mid->dma_base + CLEAR_BLOCK);
}
midc_scan_descriptors(mid, midc); midc_scan_descriptors(mid, midc);
pr_debug("MDMA:Scan of desc... complete, unmasking\n"); pr_debug("MDMA:Scan of desc... complete, unmasking\n");
iowrite32(UNMASK_INTR_REG(midc->ch_id), iowrite32(UNMASK_INTR_REG(midc->ch_id),
mid->dma_base + MASK_TFR); mid->dma_base + MASK_TFR);
if (raw_block) {
iowrite32(UNMASK_INTR_REG(midc->ch_id),
mid->dma_base + MASK_BLOCK);
}
spin_unlock_bh(&midc->lock); spin_unlock_bh(&midc->lock);
} }
@ -804,9 +1011,14 @@ static void dma_tasklet2(unsigned long data)
static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
{ {
struct middma_device *mid = data; struct middma_device *mid = data;
u32 status; u32 tfr_status, err_status;
int call_tasklet = 0; int call_tasklet = 0;
tfr_status = ioread32(mid->dma_base + RAW_TFR);
err_status = ioread32(mid->dma_base + RAW_ERR);
if (!tfr_status && !err_status)
return IRQ_NONE;
/*DMA Interrupt*/ /*DMA Interrupt*/
pr_debug("MDMA:Got an interrupt on irq %d\n", irq); pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
if (!mid) { if (!mid) {
@ -814,19 +1026,18 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
return -EINVAL; return -EINVAL;
} }
status = ioread32(mid->dma_base + RAW_TFR); pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask); tfr_status &= mid->intr_mask;
status &= mid->intr_mask; if (tfr_status) {
if (status) {
/*need to disable intr*/ /*need to disable intr*/
iowrite32((status << 8), mid->dma_base + MASK_TFR); iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR);
pr_debug("MDMA: Calling tasklet %x\n", status); iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK);
pr_debug("MDMA: Calling tasklet %x\n", tfr_status);
call_tasklet = 1; call_tasklet = 1;
} }
status = ioread32(mid->dma_base + RAW_ERR); err_status &= mid->intr_mask;
status &= mid->intr_mask; if (err_status) {
if (status) { iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR);
iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR);
call_tasklet = 1; call_tasklet = 1;
} }
if (call_tasklet) if (call_tasklet)
@ -856,7 +1067,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
{ {
struct middma_device *dma = pci_get_drvdata(pdev); struct middma_device *dma = pci_get_drvdata(pdev);
int err, i; int err, i;
unsigned int irq_level;
/* DMA coherent memory pool for DMA descriptor allocations */ /* DMA coherent memory pool for DMA descriptor allocations */
dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev,
@ -884,6 +1094,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan);
/*init CH structures*/ /*init CH structures*/
dma->intr_mask = 0; dma->intr_mask = 0;
dma->state = RUNNING;
for (i = 0; i < dma->max_chan; i++) { for (i = 0; i < dma->max_chan; i++) {
struct intel_mid_dma_chan *midch = &dma->ch[i]; struct intel_mid_dma_chan *midch = &dma->ch[i];
@ -943,7 +1154,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
/*register irq */ /*register irq */
if (dma->pimr_mask) { if (dma->pimr_mask) {
irq_level = IRQF_SHARED;
pr_debug("MDMA:Requesting irq shared for DMAC1\n"); pr_debug("MDMA:Requesting irq shared for DMAC1\n");
err = request_irq(pdev->irq, intel_mid_dma_interrupt1, err = request_irq(pdev->irq, intel_mid_dma_interrupt1,
IRQF_SHARED, "INTEL_MID_DMAC1", dma); IRQF_SHARED, "INTEL_MID_DMAC1", dma);
@ -951,10 +1161,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
goto err_irq; goto err_irq;
} else { } else {
dma->intr_mask = 0x03; dma->intr_mask = 0x03;
irq_level = 0;
pr_debug("MDMA:Requesting irq for DMAC2\n"); pr_debug("MDMA:Requesting irq for DMAC2\n");
err = request_irq(pdev->irq, intel_mid_dma_interrupt2, err = request_irq(pdev->irq, intel_mid_dma_interrupt2,
0, "INTEL_MID_DMAC2", dma); IRQF_SHARED, "INTEL_MID_DMAC2", dma);
if (0 != err) if (0 != err)
goto err_irq; goto err_irq;
} }
@ -1070,6 +1279,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
if (err) if (err)
goto err_dma; goto err_dma;
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0; return 0;
err_dma: err_dma:
@ -1104,6 +1316,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
pci_disable_device(pdev); pci_disable_device(pdev);
} }
/* Power Management */
/*
* dma_suspend - PCI suspend function
*
* @pci: PCI device structure
* @state: PM message
*
* This function is called by OS when a power event occurs
*/
int dma_suspend(struct pci_dev *pci, pm_message_t state)
{
int i;
struct middma_device *device = pci_get_drvdata(pci);
pr_debug("MDMA: dma_suspend called\n");
for (i = 0; i < device->max_chan; i++) {
if (device->ch[i].in_use)
return -EAGAIN;
}
device->state = SUSPENDED;
pci_set_drvdata(pci, device);
pci_save_state(pci);
pci_disable_device(pci);
pci_set_power_state(pci, PCI_D3hot);
return 0;
}
/**
* dma_resume - PCI resume function
*
* @pci: PCI device structure
*
* This function is called by OS when a power event occurs
*/
int dma_resume(struct pci_dev *pci)
{
int ret;
struct middma_device *device = pci_get_drvdata(pci);
pr_debug("MDMA: dma_resume called\n");
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
ret = pci_enable_device(pci);
if (ret) {
pr_err("MDMA: device cant be enabled for %x\n", pci->device);
return ret;
}
device->state = RUNNING;
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
pci_set_drvdata(pci, device);
return 0;
}
static int dma_runtime_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
return dma_suspend(pci_dev, PMSG_SUSPEND);
}
static int dma_runtime_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
return dma_resume(pci_dev);
}
static int dma_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct middma_device *device = pci_get_drvdata(pdev);
int i;
for (i = 0; i < device->max_chan; i++) {
if (device->ch[i].in_use)
return -EAGAIN;
}
return pm_schedule_suspend(dev, 0);
}
/****************************************************************************** /******************************************************************************
* PCI stuff * PCI stuff
*/ */
@ -1116,11 +1407,24 @@ static struct pci_device_id intel_mid_dma_ids[] = {
}; };
MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids);
static const struct dev_pm_ops intel_mid_dma_pm = {
.runtime_suspend = dma_runtime_suspend,
.runtime_resume = dma_runtime_resume,
.runtime_idle = dma_runtime_idle,
};
static struct pci_driver intel_mid_dma_pci = { static struct pci_driver intel_mid_dma_pci = {
.name = "Intel MID DMA", .name = "Intel MID DMA",
.id_table = intel_mid_dma_ids, .id_table = intel_mid_dma_ids,
.probe = intel_mid_dma_probe, .probe = intel_mid_dma_probe,
.remove = __devexit_p(intel_mid_dma_remove), .remove = __devexit_p(intel_mid_dma_remove),
#ifdef CONFIG_PM
.suspend = dma_suspend,
.resume = dma_resume,
.driver = {
.pm = &intel_mid_dma_pm,
},
#endif
}; };
static int __init intel_mid_dma_init(void) static int __init intel_mid_dma_init(void)

View File

@ -29,11 +29,12 @@
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5" #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0"
#define REG_BIT0 0x00000001 #define REG_BIT0 0x00000001
#define REG_BIT8 0x00000100 #define REG_BIT8 0x00000100
#define INT_MASK_WE 0x8
#define CLEAR_DONE 0xFFFFEFFF
#define UNMASK_INTR_REG(chan_num) \ #define UNMASK_INTR_REG(chan_num) \
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
@ -41,6 +42,9 @@
#define ENABLE_CHANNEL(chan_num) \ #define ENABLE_CHANNEL(chan_num) \
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
#define DISABLE_CHANNEL(chan_num) \
(REG_BIT8 << chan_num)
#define DESCS_PER_CHANNEL 16 #define DESCS_PER_CHANNEL 16
/*DMA Registers*/ /*DMA Registers*/
/*registers associated with channel programming*/ /*registers associated with channel programming*/
@ -50,6 +54,7 @@
/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
#define SAR 0x00 /* Source Address Register*/ #define SAR 0x00 /* Source Address Register*/
#define DAR 0x08 /* Destination Address Register*/ #define DAR 0x08 /* Destination Address Register*/
#define LLP 0x10 /* Linked List Pointer Register*/
#define CTL_LOW 0x18 /* Control Register*/ #define CTL_LOW 0x18 /* Control Register*/
#define CTL_HIGH 0x1C /* Control Register*/ #define CTL_HIGH 0x1C /* Control Register*/
#define CFG_LOW 0x40 /* Configuration Register Low*/ #define CFG_LOW 0x40 /* Configuration Register Low*/
@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo {
union intel_mid_dma_ctl_hi { union intel_mid_dma_ctl_hi {
struct { struct {
u32 block_ts:12; /*block transfer size*/ u32 block_ts:12; /*block transfer size*/
/*configured by DMAC*/ u32 done:1; /*Done - updated by DMAC*/
u32 reser:20; u32 reser:19; /*configured by DMAC*/
} ctlx; } ctlx;
u32 ctl_hi; u32 ctl_hi;
@ -152,6 +157,7 @@ union intel_mid_dma_cfg_hi {
u32 cfg_hi; u32 cfg_hi;
}; };
/** /**
* struct intel_mid_dma_chan - internal mid representation of a DMA channel * struct intel_mid_dma_chan - internal mid representation of a DMA channel
* @chan: dma_chan strcture represetation for mid chan * @chan: dma_chan strcture represetation for mid chan
@ -166,7 +172,10 @@ union intel_mid_dma_cfg_hi {
* @slave: dma slave struture * @slave: dma slave struture
* @descs_allocated: total number of decsiptors allocated * @descs_allocated: total number of decsiptors allocated
* @dma: dma device struture pointer * @dma: dma device struture pointer
* @busy: bool representing if ch is busy (active txn) or not
* @in_use: bool representing if ch is in use or not * @in_use: bool representing if ch is in use or not
* @raw_tfr: raw trf interrupt recieved
* @raw_block: raw block interrupt recieved
*/ */
struct intel_mid_dma_chan { struct intel_mid_dma_chan {
struct dma_chan chan; struct dma_chan chan;
@ -178,10 +187,13 @@ struct intel_mid_dma_chan {
struct list_head active_list; struct list_head active_list;
struct list_head queue; struct list_head queue;
struct list_head free_list; struct list_head free_list;
struct intel_mid_dma_slave *slave;
unsigned int descs_allocated; unsigned int descs_allocated;
struct middma_device *dma; struct middma_device *dma;
bool busy;
bool in_use; bool in_use;
u32 raw_tfr;
u32 raw_block;
struct intel_mid_dma_slave *mid_slave;
}; };
static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
@ -190,6 +202,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
return container_of(chan, struct intel_mid_dma_chan, chan); return container_of(chan, struct intel_mid_dma_chan, chan);
} }
enum intel_mid_dma_state {
RUNNING = 0,
SUSPENDED,
};
/** /**
* struct middma_device - internal representation of a DMA device * struct middma_device - internal representation of a DMA device
* @pdev: PCI device * @pdev: PCI device
@ -205,6 +221,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
* @max_chan: max number of chs supported (from drv_data) * @max_chan: max number of chs supported (from drv_data)
* @block_size: Block size of DMA transfer supported (from drv_data) * @block_size: Block size of DMA transfer supported (from drv_data)
* @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
* @state: dma PM device state
*/ */
struct middma_device { struct middma_device {
struct pci_dev *pdev; struct pci_dev *pdev;
@ -220,6 +237,7 @@ struct middma_device {
int max_chan; int max_chan;
int block_size; int block_size;
unsigned int pimr_mask; unsigned int pimr_mask;
enum intel_mid_dma_state state;
}; };
static inline struct middma_device *to_middma_device(struct dma_device *common) static inline struct middma_device *to_middma_device(struct dma_device *common)
@ -238,14 +256,27 @@ struct intel_mid_dma_desc {
u32 cfg_lo; u32 cfg_lo;
u32 ctl_lo; u32 ctl_lo;
u32 ctl_hi; u32 ctl_hi;
struct pci_pool *lli_pool;
struct intel_mid_dma_lli *lli;
dma_addr_t lli_phys;
unsigned int lli_length;
unsigned int current_lli;
dma_addr_t next; dma_addr_t next;
enum dma_data_direction dirn; enum dma_data_direction dirn;
enum dma_status status; enum dma_status status;
enum intel_mid_dma_width width; /*width of DMA txn*/ enum dma_slave_buswidth width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
}; };
struct intel_mid_dma_lli {
dma_addr_t sar;
dma_addr_t dar;
dma_addr_t llp;
u32 ctl_lo;
u32 ctl_hi;
} __attribute__ ((packed));
static inline int test_ch_en(void __iomem *dma, u32 ch_no) static inline int test_ch_en(void __iomem *dma, u32 ch_no)
{ {
u32 en_reg = ioread32(dma + DMA_CHAN_EN); u32 en_reg = ioread32(dma + DMA_CHAN_EN);
@ -257,4 +288,14 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
{ {
return container_of(txd, struct intel_mid_dma_desc, txd); return container_of(txd, struct intel_mid_dma_desc, txd);
} }
static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
(struct dma_slave_config *slave)
{
return container_of(slave, struct intel_mid_dma_slave, dma_slave);
}
int dma_resume(struct pci_dev *pci);
#endif /*__INTEL_MID_DMAC_REGS_H__*/ #endif /*__INTEL_MID_DMAC_REGS_H__*/

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,8 @@
/* /*
* driver/dma/ste_dma40_ll.c * Copyright (C) ST-Ericsson SA 2007-2010
* * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
* Copyright (C) ST-Ericsson 2007-2010 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
* License terms: GNU General Public License (GPL) version 2 * License terms: GNU General Public License (GPL) version 2
* Author: Per Friden <per.friden@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
@ -39,16 +37,13 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
cfg->dir == STEDMA40_PERIPH_TO_PERIPH) cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS;
l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
l3 |= 1 << D40_MEM_LCSP3_DTCP_POS;
l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
l1 |= 1 << D40_MEM_LCSP1_STCP_POS;
*lcsp1 = l1; *lcsp1 = l1;
*lcsp3 = l3; *lcsp3 = l3;
@ -113,13 +108,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= 1 << D40_SREG_CFG_LOG_GIM_POS; src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
} }
if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) { if (cfg->high_priority) {
src |= 1 << D40_SREG_CFG_PRI_POS; src |= 1 << D40_SREG_CFG_PRI_POS;
dst |= 1 << D40_SREG_CFG_PRI_POS; dst |= 1 << D40_SREG_CFG_PRI_POS;
} }
src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS; if (cfg->src_info.big_endian)
dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS; src |= 1 << D40_SREG_CFG_LBE_POS;
if (cfg->dst_info.big_endian)
dst |= 1 << D40_SREG_CFG_LBE_POS;
*src_cfg = src; *src_cfg = src;
*dst_cfg = dst; *dst_cfg = dst;
@ -197,8 +194,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
dma_addr_t lli_phys, dma_addr_t lli_phys,
u32 reg_cfg, u32 reg_cfg,
u32 data_width, u32 data_width,
int psize, int psize)
bool term_int)
{ {
int total_size = 0; int total_size = 0;
int i; int i;
@ -238,7 +234,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
} }
return total_size; return total_size;
err: err:
return err; return err;
} }
@ -271,11 +267,59 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* DMA logical lli operations */ /* DMA logical lli operations */
static void d40_log_lli_link(struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
int next)
{
u32 slos = 0;
u32 dlos = 0;
if (next != -EINVAL) {
slos = next * 2;
dlos = next * 2 + 1;
} else {
lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
}
lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
(slos << D40_MEM_LCSP1_SLOS_POS);
lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) |
(dlos << D40_MEM_LCSP1_SLOS_POS);
}
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
int next)
{
d40_log_lli_link(lli_dst, lli_src, next);
writel(lli_src->lcsp02, &lcpa[0].lcsp0);
writel(lli_src->lcsp13, &lcpa[0].lcsp1);
writel(lli_dst->lcsp02, &lcpa[0].lcsp2);
writel(lli_dst->lcsp13, &lcpa[0].lcsp3);
}
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
int next)
{
d40_log_lli_link(lli_dst, lli_src, next);
writel(lli_src->lcsp02, &lcla[0].lcsp02);
writel(lli_src->lcsp13, &lcla[0].lcsp13);
writel(lli_dst->lcsp02, &lcla[1].lcsp02);
writel(lli_dst->lcsp13, &lcla[1].lcsp13);
}
void d40_log_fill_lli(struct d40_log_lli *lli, void d40_log_fill_lli(struct d40_log_lli *lli,
dma_addr_t data, u32 data_size, dma_addr_t data, u32 data_size,
u32 lli_next_off, u32 reg_cfg, u32 reg_cfg,
u32 data_width, u32 data_width,
bool term_int, bool addr_inc) bool addr_inc)
{ {
lli->lcsp13 = reg_cfg; lli->lcsp13 = reg_cfg;
@ -290,165 +334,69 @@ void d40_log_fill_lli(struct d40_log_lli *lli,
if (addr_inc) if (addr_inc)
lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK;
lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
/* If this scatter list entry is the last one, no next link */
lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) &
D40_MEM_LCSP1_SLOS_MASK;
if (term_int)
lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
else
lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK;
} }
int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, int d40_log_sg_to_dev(struct scatterlist *sg,
struct scatterlist *sg,
int sg_len, int sg_len,
struct d40_log_lli_bidir *lli, struct d40_log_lli_bidir *lli,
struct d40_def_lcsp *lcsp, struct d40_def_lcsp *lcsp,
u32 src_data_width, u32 src_data_width,
u32 dst_data_width, u32 dst_data_width,
enum dma_data_direction direction, enum dma_data_direction direction,
bool term_int, dma_addr_t dev_addr, int max_len, dma_addr_t dev_addr)
int llis_per_log)
{ {
int total_size = 0; int total_size = 0;
struct scatterlist *current_sg = sg; struct scatterlist *current_sg = sg;
int i; int i;
u32 next_lli_off_dst = 0;
u32 next_lli_off_src = 0;
for_each_sg(sg, current_sg, sg_len, i) { for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg); total_size += sg_dma_len(current_sg);
/*
* If this scatter list entry is the last one or
* max length, terminate link.
*/
if (sg_len - 1 == i || ((i+1) % max_len == 0)) {
next_lli_off_src = 0;
next_lli_off_dst = 0;
} else {
if (next_lli_off_dst == 0 &&
next_lli_off_src == 0) {
/* The first lli will be at next_lli_off */
next_lli_off_dst = (lcla->dst_id *
llis_per_log + 1);
next_lli_off_src = (lcla->src_id *
llis_per_log + 1);
} else {
next_lli_off_dst++;
next_lli_off_src++;
}
}
if (direction == DMA_TO_DEVICE) { if (direction == DMA_TO_DEVICE) {
d40_log_fill_lli(&lli->src[i], d40_log_fill_lli(&lli->src[i],
sg_phys(current_sg), sg_phys(current_sg),
sg_dma_len(current_sg), sg_dma_len(current_sg),
next_lli_off_src,
lcsp->lcsp1, src_data_width, lcsp->lcsp1, src_data_width,
false,
true); true);
d40_log_fill_lli(&lli->dst[i], d40_log_fill_lli(&lli->dst[i],
dev_addr, dev_addr,
sg_dma_len(current_sg), sg_dma_len(current_sg),
next_lli_off_dst,
lcsp->lcsp3, dst_data_width, lcsp->lcsp3, dst_data_width,
/* No next == terminal interrupt */
term_int && !next_lli_off_dst,
false); false);
} else { } else {
d40_log_fill_lli(&lli->dst[i], d40_log_fill_lli(&lli->dst[i],
sg_phys(current_sg), sg_phys(current_sg),
sg_dma_len(current_sg), sg_dma_len(current_sg),
next_lli_off_dst,
lcsp->lcsp3, dst_data_width, lcsp->lcsp3, dst_data_width,
/* No next == terminal interrupt */
term_int && !next_lli_off_dst,
true); true);
d40_log_fill_lli(&lli->src[i], d40_log_fill_lli(&lli->src[i],
dev_addr, dev_addr,
sg_dma_len(current_sg), sg_dma_len(current_sg),
next_lli_off_src,
lcsp->lcsp1, src_data_width, lcsp->lcsp1, src_data_width,
false,
false); false);
} }
} }
return total_size; return total_size;
} }
int d40_log_sg_to_lli(int lcla_id, int d40_log_sg_to_lli(struct scatterlist *sg,
struct scatterlist *sg,
int sg_len, int sg_len,
struct d40_log_lli *lli_sg, struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/ u32 lcsp13, /* src or dst*/
u32 data_width, u32 data_width)
bool term_int, int max_len, int llis_per_log)
{ {
int total_size = 0; int total_size = 0;
struct scatterlist *current_sg = sg; struct scatterlist *current_sg = sg;
int i; int i;
u32 next_lli_off = 0;
for_each_sg(sg, current_sg, sg_len, i) { for_each_sg(sg, current_sg, sg_len, i) {
total_size += sg_dma_len(current_sg); total_size += sg_dma_len(current_sg);
/*
* If this scatter list entry is the last one or
* max length, terminate link.
*/
if (sg_len - 1 == i || ((i+1) % max_len == 0))
next_lli_off = 0;
else {
if (next_lli_off == 0)
/* The first lli will be at next_lli_off */
next_lli_off = lcla_id * llis_per_log + 1;
else
next_lli_off++;
}
d40_log_fill_lli(&lli_sg[i], d40_log_fill_lli(&lli_sg[i],
sg_phys(current_sg), sg_phys(current_sg),
sg_dma_len(current_sg), sg_dma_len(current_sg),
next_lli_off,
lcsp13, data_width, lcsp13, data_width,
term_int && !next_lli_off,
true); true);
} }
return total_size; return total_size;
} }
int d40_log_lli_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lcla_src,
struct d40_log_lli *lcla_dst,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
int llis_per_log)
{
u32 slos;
u32 dlos;
int i;
writel(lli_src->lcsp02, &lcpa->lcsp0);
writel(lli_src->lcsp13, &lcpa->lcsp1);
writel(lli_dst->lcsp02, &lcpa->lcsp2);
writel(lli_dst->lcsp13, &lcpa->lcsp3);
slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
}
return i;
}

View File

@ -1,10 +1,8 @@
/* /*
* driver/dma/ste_dma40_ll.h * Copyright (C) ST-Ericsson SA 2007-2010
* * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson SA
* Copyright (C) ST-Ericsson 2007-2010 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson SA
* License terms: GNU General Public License (GPL) version 2 * License terms: GNU General Public License (GPL) version 2
* Author: Per Friden <per.friden@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/ */
#ifndef STE_DMA40_LL_H #ifndef STE_DMA40_LL_H
#define STE_DMA40_LL_H #define STE_DMA40_LL_H
@ -132,6 +130,13 @@
#define D40_DREG_PRMSO 0x014 #define D40_DREG_PRMSO 0x014
#define D40_DREG_PRMOE 0x018 #define D40_DREG_PRMOE 0x018
#define D40_DREG_PRMOO 0x01C #define D40_DREG_PRMOO 0x01C
#define D40_DREG_PRMO_PCHAN_BASIC 0x1
#define D40_DREG_PRMO_PCHAN_MODULO 0x2
#define D40_DREG_PRMO_PCHAN_DOUBLE_DST 0x3
#define D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG 0x1
#define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY 0x2
#define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG 0x3
#define D40_DREG_LCPA 0x020 #define D40_DREG_LCPA 0x020
#define D40_DREG_LCLA 0x024 #define D40_DREG_LCLA 0x024
#define D40_DREG_ACTIVE 0x050 #define D40_DREG_ACTIVE 0x050
@ -163,6 +168,9 @@
#define D40_DREG_PERIPHID0 0xFE0 #define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4 #define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8 #define D40_DREG_PERIPHID2 0xFE8
#define D40_DREG_PERIPHID2_REV_POS 4
#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC #define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0 #define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4 #define D40_DREG_CELLID1 0xFF4
@ -199,8 +207,6 @@ struct d40_phy_lli {
* *
* @src: Register settings for src channel. * @src: Register settings for src channel.
* @dst: Register settings for dst channel. * @dst: Register settings for dst channel.
* @dst_addr: Physical destination address.
* @src_addr: Physical source address.
* *
* All DMA transfers have a source and a destination. * All DMA transfers have a source and a destination.
*/ */
@ -208,8 +214,6 @@ struct d40_phy_lli {
struct d40_phy_lli_bidir { struct d40_phy_lli_bidir {
struct d40_phy_lli *src; struct d40_phy_lli *src;
struct d40_phy_lli *dst; struct d40_phy_lli *dst;
dma_addr_t dst_addr;
dma_addr_t src_addr;
}; };
@ -271,29 +275,16 @@ struct d40_def_lcsp {
u32 lcsp1; u32 lcsp1;
}; };
/**
* struct d40_lcla_elem - Info for one LCA element.
*
* @src_id: logical channel src id
* @dst_id: logical channel dst id
* @src: LCPA formated src parameters
* @dst: LCPA formated dst parameters
*
*/
struct d40_lcla_elem {
int src_id;
int dst_id;
struct d40_log_lli *src;
struct d40_log_lli *dst;
};
/* Physical channels */ /* Physical channels */
void d40_phy_cfg(struct stedma40_chan_cfg *cfg, void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
u32 *src_cfg, u32 *dst_cfg, bool is_log); u32 *src_cfg,
u32 *dst_cfg,
bool is_log);
void d40_log_cfg(struct stedma40_chan_cfg *cfg, void d40_log_cfg(struct stedma40_chan_cfg *cfg,
u32 *lcsp1, u32 *lcsp2); u32 *lcsp1,
u32 *lcsp2);
int d40_phy_sg_to_lli(struct scatterlist *sg, int d40_phy_sg_to_lli(struct scatterlist *sg,
int sg_len, int sg_len,
@ -302,8 +293,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
dma_addr_t lli_phys, dma_addr_t lli_phys,
u32 reg_cfg, u32 reg_cfg,
u32 data_width, u32 data_width,
int psize, int psize);
bool term_int);
int d40_phy_fill_lli(struct d40_phy_lli *lli, int d40_phy_fill_lli(struct d40_phy_lli *lli,
dma_addr_t data, dma_addr_t data,
@ -323,35 +313,35 @@ void d40_phy_lli_write(void __iomem *virtbase,
/* Logical channels */ /* Logical channels */
void d40_log_fill_lli(struct d40_log_lli *lli, void d40_log_fill_lli(struct d40_log_lli *lli,
dma_addr_t data, u32 data_size, dma_addr_t data,
u32 lli_next_off, u32 reg_cfg, u32 data_size,
u32 reg_cfg,
u32 data_width, u32 data_width,
bool term_int, bool addr_inc); bool addr_inc);
int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, int d40_log_sg_to_dev(struct scatterlist *sg,
struct scatterlist *sg,
int sg_len, int sg_len,
struct d40_log_lli_bidir *lli, struct d40_log_lli_bidir *lli,
struct d40_def_lcsp *lcsp, struct d40_def_lcsp *lcsp,
u32 src_data_width, u32 src_data_width,
u32 dst_data_width, u32 dst_data_width,
enum dma_data_direction direction, enum dma_data_direction direction,
bool term_int, dma_addr_t dev_addr, int max_len, dma_addr_t dev_addr);
int llis_per_log);
int d40_log_lli_write(struct d40_log_lli_full *lcpa, int d40_log_sg_to_lli(struct scatterlist *sg,
struct d40_log_lli *lcla_src,
struct d40_log_lli *lcla_dst,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
int llis_per_log);
int d40_log_sg_to_lli(int lcla_id,
struct scatterlist *sg,
int sg_len, int sg_len,
struct d40_log_lli *lli_sg, struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/ u32 lcsp13, /* src or dst*/
u32 data_width, u32 data_width);
bool term_int, int max_len, int llis_per_log);
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
int next);
void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src,
int next);
#endif /* STE_DMA40_LLI_H */ #endif /* STE_DMA40_LLI_H */

View File

@ -759,7 +759,7 @@ static int __devinit td_probe(struct platform_device *pdev)
pdata->channels + i; pdata->channels + i;
/* even channels are RX, odd are TX */ /* even channels are RX, odd are TX */
if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { if ((i % 2) == pchan->rx) {
dev_err(&pdev->dev, "Wrong channel configuration\n"); dev_err(&pdev->dev, "Wrong channel configuration\n");
err = -EINVAL; err = -EINVAL;
goto err_tasklet_kill; goto err_tasklet_kill;

View File

@ -0,0 +1,222 @@
/*
* linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
*
* Copyright (C) 2005 ARM Ltd
* Copyright (C) 2010 ST-Ericsson SA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* pl08x information required by platform code
*
* Please credit ARM.com
* Documentation: ARM DDI 0196D
*
*/
#ifndef AMBA_PL08X_H
#define AMBA_PL08X_H
/* We need sizes of structs from this header */
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
/**
* struct pl08x_channel_data - data structure to pass info between
* platform and PL08x driver regarding channel configuration
* @bus_id: name of this device channel, not just a device name since
* devices may have more than one channel e.g. "foo_tx"
* @min_signal: the minimum DMA signal number to be muxed in for this
* channel (for platforms supporting muxed signals). If you have
* static assignments, make sure this is set to the assigned signal
* number, PL08x have 16 possible signals in number 0 thru 15 so
* when these are not enough they often get muxed (in hardware)
* disabling simultaneous use of the same channel for two devices.
* @max_signal: the maximum DMA signal number to be muxed in for
* the channel. Set to the same as min_signal for
* devices with static assignments
* @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel
* @cctl_opt: default options for the channel control register
* @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring
* this.
* @circular_buffer: whether the buffer passed in is circular and
* shall simply be looped round round (like a record baby round
* round round round)
* @single: the device connected to this channel will request single
* DMA transfers, not bursts. (Bursts are default.)
*/
struct pl08x_channel_data {
char *bus_id;
int min_signal;
int max_signal;
u32 muxval;
u32 cctl;
u32 ccfg;
dma_addr_t addr;
bool circular_buffer;
bool single;
};
/**
* Struct pl08x_bus_data - information of source or destination
* busses for a transfer
* @addr: current address
* @maxwidth: the maximum width of a transfer on this bus
* @buswidth: the width of this bus in bytes: 1, 2 or 4
* @fill_bytes: bytes required to fill to the next bus memory
* boundary
*/
struct pl08x_bus_data {
dma_addr_t addr;
u8 maxwidth;
u8 buswidth;
u32 fill_bytes;
};
/**
* struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel
* @lock: a lock to use when altering an instance of this struct
* @signal: the physical signal (aka channel) serving this
* physical channel right now
* @serving: the virtual channel currently being served by this
* physical channel
*/
struct pl08x_phy_chan {
unsigned int id;
void __iomem *base;
spinlock_t lock;
int signal;
struct pl08x_dma_chan *serving;
u32 csrc;
u32 cdst;
u32 clli;
u32 cctl;
u32 ccfg;
};
/**
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
* @llis_bus: DMA memory address (physical) start for the LLIs
* @llis_va: virtual memory address start for the LLIs
*/
struct pl08x_txd {
struct dma_async_tx_descriptor tx;
struct list_head node;
enum dma_data_direction direction;
struct pl08x_bus_data srcbus;
struct pl08x_bus_data dstbus;
int len;
dma_addr_t llis_bus;
void *llis_va;
struct pl08x_channel_data *cd;
bool active;
/*
* Settings to be put into the physical channel when we
* trigger this txd
*/
u32 csrc;
u32 cdst;
u32 clli;
u32 cctl;
};
/**
* struct pl08x_dma_chan_state - holds the PL08x specific virtual
* channel states
* @PL08X_CHAN_IDLE: the channel is idle
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
* channel and is running a transfer on it
* @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
* channel, but the transfer is currently paused
* @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
* channel to become available (only pertains to memcpy channels)
*/
enum pl08x_dma_chan_state {
PL08X_CHAN_IDLE,
PL08X_CHAN_RUNNING,
PL08X_CHAN_PAUSED,
PL08X_CHAN_WAITING,
};
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
* @chan: wrappped abstract channel
* @phychan: the physical channel utilized by this channel, if there is one
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
* @name: name of channel
* @cd: channel platform data
* @runtime_addr: address for RX/TX according to the runtime config
* @runtime_direction: current direction of this channel according to
* runtime config
* @lc: last completed transaction on this channel
* @desc_list: queued transactions pending on this channel
* @at: active transaction on this channel
* @lockflags: sometimes we let a lock last between two function calls,
* especially prep/submit, and then we need to store the IRQ flags
* in the channel state, here
* @lock: a lock for this channel data
* @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc
* @slave: whether this channel is a device (slave) or for memcpy
* @waiting: a TX descriptor on this channel which is waiting for
* a physical channel to become available
*/
struct pl08x_dma_chan {
struct dma_chan chan;
struct pl08x_phy_chan *phychan;
struct tasklet_struct tasklet;
char *name;
struct pl08x_channel_data *cd;
dma_addr_t runtime_addr;
enum dma_data_direction runtime_direction;
atomic_t last_issued;
dma_cookie_t lc;
struct list_head desc_list;
struct pl08x_txd *at;
unsigned long lockflags;
spinlock_t lock;
void *host;
enum pl08x_dma_chan_state state;
bool slave;
struct pl08x_txd *waiting;
};
/**
* struct pl08x_platform_data - the platform configuration for the
* PL08x PrimeCells.
* @slave_channels: the channels defined for the different devices on the
* platform, all inclusive, including multiplexed channels. The available
* physical channels will be multiplexed around these signals as they
* are requested, just enumerate all possible channels.
* @get_signal: request a physical signal to be used for a DMA
* transfer immediately: if there is some multiplexing or similar blocking
* the use of the channel the transfer can be denied by returning
* less than zero, else it returns the allocated signal number
* @put_signal: indicate to the platform that this physical signal is not
* running any DMA transfer and multiplexing can be recycled
* @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
* LLI addresses are on 0/1 Master 1/2.
*/
struct pl08x_platform_data {
struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
struct pl08x_channel_data memcpy_channel;
int (*get_signal)(struct pl08x_dma_chan *);
void (*put_signal)(struct pl08x_dma_chan *);
};
#ifdef CONFIG_AMBA_PL08X
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id);
#else
static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
return false;
}
#endif
#endif /* AMBA_PL08X_H */

View File

@ -64,13 +64,15 @@ enum dma_transaction_type {
DMA_PQ_VAL, DMA_PQ_VAL,
DMA_MEMSET, DMA_MEMSET,
DMA_INTERRUPT, DMA_INTERRUPT,
DMA_SG,
DMA_PRIVATE, DMA_PRIVATE,
DMA_ASYNC_TX, DMA_ASYNC_TX,
DMA_SLAVE, DMA_SLAVE,
DMA_CYCLIC,
}; };
/* last transaction type for creation of the capabilities mask */ /* last transaction type for creation of the capabilities mask */
#define DMA_TX_TYPE_END (DMA_SLAVE + 1) #define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
/** /**
@ -119,12 +121,15 @@ enum dma_ctrl_flags {
* configuration data in statically from the platform). An additional * configuration data in statically from the platform). An additional
* argument of struct dma_slave_config must be passed in with this * argument of struct dma_slave_config must be passed in with this
* command. * command.
* @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
* into external start mode.
*/ */
enum dma_ctrl_cmd { enum dma_ctrl_cmd {
DMA_TERMINATE_ALL, DMA_TERMINATE_ALL,
DMA_PAUSE, DMA_PAUSE,
DMA_RESUME, DMA_RESUME,
DMA_SLAVE_CONFIG, DMA_SLAVE_CONFIG,
FSLDMA_EXTERNAL_START,
}; };
/** /**
@ -316,14 +321,14 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent; struct dma_async_tx_descriptor *parent;
spinlock_t lock; spinlock_t lock;
#endif #endif
}; };
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
static inline void txd_lock(struct dma_async_tx_descriptor *txd) static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{ {
} }
@ -422,6 +427,9 @@ struct dma_tx_state {
* @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation * @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
* @device_control: manipulate all pending operations on a channel, returns * @device_control: manipulate all pending operations on a channel, returns
* zero or error code * zero or error code
* @device_tx_status: poll for transaction completion, the optional * @device_tx_status: poll for transaction completion, the optional
@ -473,11 +481,19 @@ struct dma_device {
unsigned long flags); unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags); struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)( struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl, struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction, unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags); unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_data_direction direction);
int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg); unsigned long arg);
@ -487,6 +503,40 @@ struct dma_device {
void (*device_issue_pending)(struct dma_chan *chan); void (*device_issue_pending)(struct dma_chan *chan);
}; };
static inline int dmaengine_device_control(struct dma_chan *chan,
enum dma_ctrl_cmd cmd,
unsigned long arg)
{
return chan->device->device_control(chan, cmd, arg);
}
static inline int dmaengine_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
return dmaengine_device_control(chan, DMA_SLAVE_CONFIG,
(unsigned long)config);
}
static inline int dmaengine_terminate_all(struct dma_chan *chan)
{
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
}
static inline int dmaengine_pause(struct dma_chan *chan)
{
return dmaengine_device_control(chan, DMA_PAUSE, 0);
}
static inline int dmaengine_resume(struct dma_chan *chan)
{
return dmaengine_device_control(chan, DMA_RESUME, 0);
}
static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc)
{
return desc->tx_submit(desc);
}
static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
{ {
size_t mask; size_t mask;
@ -606,11 +656,11 @@ static inline void net_dmaengine_put(void)
#ifdef CONFIG_ASYNC_TX_DMA #ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get() dmaengine_get() #define async_dmaengine_get() dmaengine_get()
#define async_dmaengine_put() dmaengine_put() #define async_dmaengine_put() dmaengine_put()
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
#else #else
#define async_dma_find_channel(type) dma_find_channel(type) #define async_dma_find_channel(type) dma_find_channel(type)
#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
#else #else
static inline void async_dmaengine_get(void) static inline void async_dmaengine_get(void)
{ {

View File

@ -27,14 +27,7 @@
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
/*DMA transaction width, src and dstn width would be same #define DMA_PREP_CIRCULAR_LIST (1 << 10)
The DMA length must be width aligned,
for 32 bit width the length must be 32 bit (4bytes) aligned only*/
enum intel_mid_dma_width {
LNW_DMA_WIDTH_8BIT = 0x0,
LNW_DMA_WIDTH_16BIT = 0x1,
LNW_DMA_WIDTH_32BIT = 0x2,
};
/*DMA mode configurations*/ /*DMA mode configurations*/
enum intel_mid_dma_mode { enum intel_mid_dma_mode {
@ -69,18 +62,15 @@ enum intel_mid_dma_msize {
* @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
* @src_msize: Source DMA burst size * @src_msize: Source DMA burst size
* @dst_msize: Dst DMA burst size * @dst_msize: Dst DMA burst size
* @per_addr: Periphral address
* @device_instance: DMA peripheral device instance, we can have multiple * @device_instance: DMA peripheral device instance, we can have multiple
* peripheral device connected to single DMAC * peripheral device connected to single DMAC
*/ */
struct intel_mid_dma_slave { struct intel_mid_dma_slave {
enum dma_data_direction dirn;
enum intel_mid_dma_width src_width; /*width of DMA src txn*/
enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/
enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
enum intel_mid_dma_msize src_msize; /*size if src burst*/
enum intel_mid_dma_msize dst_msize; /*size of dst burst*/
unsigned int device_instance; /*0, 1 for periphral instance*/ unsigned int device_instance; /*0, 1 for periphral instance*/
struct dma_slave_config dma_slave;
}; };
#endif /*__INTEL_MID_DMA_H__*/ #endif /*__INTEL_MID_DMA_H__*/

View File

@ -1217,6 +1217,19 @@ config ATOMIC64_SELFTEST
If unsure, say N. If unsure, say N.
config ASYNC_RAID6_TEST
tristate "Self test for hardware accelerated raid6 recovery"
depends on ASYNC_RAID6_RECOV
select ASYNC_MEMCPY
---help---
This is a one-shot self test that permutes through the
recovery of all the possible two disk failure scenarios for a
N-disk array. Recovery is performed with the asynchronous
raid6 recovery routines, and will optionally use an offload
engine if one is available.
If unsure, say N.
source "samples/Kconfig" source "samples/Kconfig"
source "lib/Kconfig.kgdb" source "lib/Kconfig.kgdb"