1
0
Fork 0

Merge branch 'devel-stable' into for-next

This commit is contained in:
Russell King 2014-06-05 12:36:22 +01:00
commit bd63ce27d9
7 changed files with 218 additions and 9 deletions

View file

@ -58,21 +58,37 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
#ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
if (dev)
pfn -= dev->dma_pfn_offset;
return (dma_addr_t)__pfn_to_bus(pfn);
}
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{
return __bus_to_pfn(addr);
unsigned long pfn = __bus_to_pfn(addr);
if (dev)
pfn += dev->dma_pfn_offset;
return pfn;
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
if (dev) {
unsigned long pfn = dma_to_pfn(dev, addr);
return phys_to_virt(__pfn_to_phys(pfn));
}
return (void *)__bus_to_virt((unsigned long)addr);
}
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
if (dev)
return pfn_to_dma(dev, virt_to_pfn(addr));
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
@ -105,6 +121,13 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#define dma_max_pfn(dev) dma_max_pfn(dev)
static inline int set_arch_dma_coherent_ops(struct device *dev)
{
set_dma_ops(dev, &arm_coherent_dma_ops);
return 0;
}
#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
unsigned int offset = paddr & ~PAGE_MASK;

View file

@ -885,7 +885,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
phys_addr_t paddr;
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
@ -901,7 +901,7 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
phys_addr_t paddr = page_to_phys(page) + off;
/* FIXME: non-speculating: not required */
/* in any case, don't bother invalidating if DMA to device */

View file

@ -721,3 +721,113 @@ void __iomem *of_iomap(struct device_node *np, int index)
return ioremap(res.start, resource_size(&res));
}
EXPORT_SYMBOL(of_iomap);
/**
* of_dma_get_range - Get DMA range info
* @np: device node to get DMA range info
* @dma_addr: pointer to store initial DMA address of DMA range
* @paddr: pointer to store initial CPU address of DMA range
* @size: pointer to store size of DMA range
*
* Look in bottom up direction for the first "dma-ranges" property
* and parse it.
* dma-ranges format:
* DMA addr (dma_addr) : naddr cells
* CPU addr (phys_addr_t) : pna cells
* size : nsize cells
*
* It returns -ENODEV if "dma-ranges" property was not found
* for this device in DT.
*/
int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
{
struct device_node *node = of_node_get(np);
const __be32 *ranges = NULL;
int len, naddr, nsize, pna;
int ret = 0;
u64 dmaaddr;
if (!node)
return -EINVAL;
while (1) {
naddr = of_n_addr_cells(node);
nsize = of_n_size_cells(node);
node = of_get_next_parent(node);
if (!node)
break;
ranges = of_get_property(node, "dma-ranges", &len);
/* Ignore empty ranges, they imply no translation required */
if (ranges && len > 0)
break;
/*
* At least empty ranges has to be defined for parent node if
* DMA is supported
*/
if (!ranges)
break;
}
if (!ranges) {
pr_debug("%s: no dma-ranges found for node(%s)\n",
__func__, np->full_name);
ret = -ENODEV;
goto out;
}
len /= sizeof(u32);
pna = of_n_addr_cells(node);
/* dma-ranges format:
* DMA addr : naddr cells
* CPU addr : pna cells
* size : nsize cells
*/
dmaaddr = of_read_number(ranges, naddr);
*paddr = of_translate_dma_address(np, ranges);
if (*paddr == OF_BAD_ADDR) {
pr_err("%s: translation of DMA address(%pad) to CPU address failed node(%s)\n",
__func__, dma_addr, np->full_name);
ret = -EINVAL;
goto out;
}
*dma_addr = dmaaddr;
*size = of_read_number(ranges + naddr + pna, nsize);
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
*dma_addr, *paddr, *size);
out:
of_node_put(node);
return ret;
}
EXPORT_SYMBOL_GPL(of_dma_get_range);
/**
* of_dma_is_coherent - Check if device is coherent
* @np: device node
*
* It returns true if "dma-coherent" property was found
* for this device in DT.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node = of_node_get(np);
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
of_node_put(node);
return true;
}
node = of_get_next_parent(node);
}
of_node_put(node);
return false;
}
EXPORT_SYMBOL_GPL(of_dma_is_coherent);

View file

@ -188,6 +188,64 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
EXPORT_SYMBOL(of_device_alloc);
/**
* of_dma_configure - Setup DMA configuration
* @dev: Device to apply DMA configuration
*
* Try to get devices's DMA configuration from DT and update it
* accordingly.
*
* In case if platform code need to use own special DMA configuration,it
* can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event
* to fix up DMA configuration.
*/
static void of_dma_configure(struct platform_device *pdev)
{
u64 dma_addr, paddr, size;
int ret;
struct device *dev = &pdev->dev;
#if defined(CONFIG_MICROBLAZE)
pdev->archdata.dma_mask = 0xffffffffUL;
#endif
/*
* Set default dma-mask to 32 bit. Drivers are expected to setup
* the correct supported dma_mask.
*/
dev->coherent_dma_mask = DMA_BIT_MASK(32);
/*
* Set it to coherent_dma_mask by default if the architecture
* code has not set it.
*/
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
/*
* if dma-coherent property exist, call arch hook to setup
* dma coherent operations.
*/
if (of_dma_is_coherent(dev->of_node)) {
set_arch_dma_coherent_ops(dev);
dev_dbg(dev, "device is dma coherent\n");
}
/*
* if dma-ranges property doesn't exist - just return else
* setup the dma offset
*/
ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size);
if (ret < 0) {
dev_dbg(dev, "no dma range information to setup\n");
return;
}
/* DMA ranges found. Calculate and set dma_pfn_offset */
dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr);
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
}
/**
* of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
@ -213,12 +271,7 @@ static struct platform_device *of_platform_device_create_pdata(
if (!dev)
return NULL;
#if defined(CONFIG_MICROBLAZE)
dev->archdata.dma_mask = 0xffffffffUL;
#endif
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
if (!dev->dev.dma_mask)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
of_dma_configure(dev);
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;

View file

@ -685,6 +685,7 @@ struct acpi_dev_node {
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
* @dma_pfn_offset: offset of DMA memory range relatively of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
* @dma_pools: Dma pools (if dma'ble device).
@ -750,6 +751,7 @@ struct device {
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
unsigned long dma_pfn_offset;
struct device_dma_parameters *dma_parms;

View file

@ -123,6 +123,13 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
extern u64 dma_get_required_mask(struct device *dev);
#ifndef set_arch_dma_coherent_ops
static inline int set_arch_dma_coherent_ops(struct device *dev)
{
return 0;
}
#endif
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;

View file

@ -63,6 +63,9 @@ extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
u64 *paddr, u64 *size);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
static inline struct device_node *of_find_matching_node_by_address(
struct device_node *from,
@ -90,6 +93,17 @@ static inline struct of_pci_range *of_pci_range_parser_one(
{
return NULL;
}
static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
u64 *paddr, u64 *size)
{
return -ENODEV;
}
static inline bool of_dma_is_coherent(struct device_node *np)
{
return false;
}
#endif /* CONFIG_OF_ADDRESS */
#ifdef CONFIG_OF