1
0
Fork 0

sh: introduce a sh_cacheop_vaddr helper

And use it in the maple bus code to avoid a dma API dependency.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Yoshinori Sato <ysato@users.sourceforge.jp>
hifive-unleashed-5.1
Christoph Hellwig 2018-04-18 08:53:46 +02:00
parent b2fcb677d4
commit 47fcae0d2a
3 changed files with 12 additions and 8 deletions

View File

@ -101,5 +101,12 @@ void kunmap_coherent(void *kvaddr);
void cpu_cache_init(void);
static inline void *sh_cacheop_vaddr(void *vaddr)
{
if (__in_29bit_mode())
vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
return vaddr;
}
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */

View File

@ -74,10 +74,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
void sh_sync_dma_for_device(void *vaddr, size_t size,
enum dma_data_direction direction)
{
void *addr;
addr = __in_29bit_mode() ?
(void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
void *addr = sh_cacheop_vaddr(vaddr);
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
@ -93,7 +90,6 @@ void sh_sync_dma_for_device(void *vaddr, size_t size,
BUG();
}
}
EXPORT_SYMBOL(sh_sync_dma_for_device);
static int __init memchunk_setup(char *str)
{

View File

@ -300,8 +300,8 @@ static void maple_send(void)
mutex_unlock(&maple_wlist_lock);
if (maple_packets > 0) {
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
PAGE_SIZE);
}
finish:
@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work)
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
recvbuf = mq->recvbuf->buf;
sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
__flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
0x400);
code = recvbuf[0];
kfree(mq->sendbuf);
list_del_init(&mq->list);