1
0
Fork 0

OpenRISC updates for 4.19

Just one change for 4.19:
  - Refactors from Christoph Hellwig to use generic DMA facilities
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJbfS0+AAoJEMOzHC1eZifkq9YP/1L9gyp8eolVe0q90rWdNIBf
 BxqfLZv2kyMJZsj0GxycXHMnob4q+cJbPxQlxJLk02tGO97l+ze9FfH4FL0gqYeM
 Ei2HeOJMxFX6w3/vHIw2UzzYXHoBpe06Kp1Za9GyJlyxiJ/MYvwASbC/LNo1/HxX
 exg7epXOAZFxmivWAPJiUcyNnMRLqSgzOGlg7HLfX7CTmjPkqDqDLNzwY/CgLxCW
 LuzEAFC/bPFadzfGOlMnww5j7zzobgjC9Zj+dOdZBnVmnIO8NdhFmiDTnKXVQsUr
 C+34Tf4LfiL/BigTAo9bjPJeY5fCsvcfbqnTCHXgCTBp80suXWjv8YZjfXOd6stI
 MU72RMwx5pFo6rsbcWAUULzHnAoEAeeyMWNbca/TJEMV5X1ift6R7qxd3ojrsD+R
 +I4wd00BJn0THrS9CUeGbqIOc8HYhpJialz8ZR2ucPVFGNFJoaGK6RUsPNZFjuq6
 ErxOG4IcHZM9jJYCZJnw8uYNgDoZN00j3LBvpfC0QLHTMyDOkJ+eiN5e5V46RxIZ
 EPKgVX5SYp8L4sYQ8qR0HJsjgxNZzUSS4ccmt+hC2e7r4p/5L2CkeOFWArg4ZQX2
 n27SUcy6RU/8X8YSjbS+w1zNue4uoU7JnTaMhE5GVOzhaHPTiHq78IL+QnkSUkUj
 m5z8iBp3w4QZ3FezP7Hj
 =C60y
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://github.com/openrisc/linux

Pull OpenRISC update from Stafford Horne:
 "Just one change for 4.19: refactoring from Christoph Hellwig to use
  generic DMA facilities"

* tag 'for-linus' of git://github.com/openrisc/linux:
  openrisc: use generic dma_noncoherent_ops
  openrisc: fix cache maintainance the the sync_single_for_device DMA operation
  openrisc: remove the no-op unmap_page and unmap_sg DMA operations
  openrisc: remove the sync_single_for_cpu DMA operation
hifive-unleashed-5.1
Linus Torvalds 2018-08-23 14:09:37 -07:00
commit 2ab054fd1f
4 changed files with 12 additions and 133 deletions

View File

@ -6,6 +6,8 @@
config OPENRISC
def_bool y
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_NONCOHERENT_OPS
select OF
select OF_EARLY_FLATTREE
select IRQ_DOMAIN

View File

@ -7,6 +7,7 @@ generic-y += current.h
generic-y += device.h
generic-y += div64.h
generic-y += dma.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h

View File

@ -1,35 +0,0 @@
/*
* OpenRISC Linux
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* OpenRISC implementation:
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __ASM_OPENRISC_DMA_MAPPING_H
#define __ASM_OPENRISC_DMA_MAPPING_H
/*
* See Documentation/DMA-API-HOWTO.txt and
* Documentation/DMA-API.txt for documentation.
*/
#include <linux/dma-debug.h>
#include <linux/dma-mapping.h>
extern const struct dma_map_ops or1k_dma_map_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return &or1k_dma_map_ops;
}
#endif /* __ASM_OPENRISC_DMA_MAPPING_H */

View File

@ -19,9 +19,7 @@
* the only thing implemented properly. The rest need looking into...
*/
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
#include <linux/dma-noncoherent.h>
#include <asm/cpuinfo.h>
#include <asm/spr_defs.h>
@ -80,10 +78,9 @@ page_clear_nocache(pte_t *pte, unsigned long addr,
* is being ignored for now; uncached but write-combined memory is a
* missing feature of the OR1K.
*/
static void *
or1k_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs)
void *
arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
unsigned long va;
void *page;
@ -115,9 +112,9 @@ or1k_dma_alloc(struct device *dev, size_t size,
return (void *)va;
}
static void
or1k_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
void
arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long va = (unsigned long)vaddr;
struct mm_walk walk = {
@ -133,19 +130,12 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages_exact(vaddr, size);
}
static dma_addr_t
or1k_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;
dma_addr_t addr = page_to_phys(page) + offset;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return addr;
switch (dir) {
case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */
@ -167,83 +157,4 @@ or1k_map_page(struct device *dev, struct page *page,
*/
break;
}
return addr;
}
static void
or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
/* Nothing special to do here... */
}
static int
or1k_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
s->length, dir, 0);
}
return nents;
}
static void
or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
}
}
static void
or1k_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;
dma_addr_t addr = dma_handle;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
/* Invalidate the dcache for the requested range */
for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBIR, cl);
}
static void
or1k_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;
dma_addr_t addr = dma_handle;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
/* Flush the dcache for the requested range */
for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
}
const struct dma_map_ops or1k_dma_map_ops = {
.alloc = or1k_dma_alloc,
.free = or1k_dma_free,
.map_page = or1k_map_page,
.unmap_page = or1k_unmap_page,
.map_sg = or1k_map_sg,
.unmap_sg = or1k_unmap_sg,
.sync_single_for_cpu = or1k_sync_single_for_cpu,
.sync_single_for_device = or1k_sync_single_for_device,
};
EXPORT_SYMBOL(or1k_dma_map_ops);