1
0
Fork 0

sh: Optimized copy_{to,from}_user_page() for SH-4.

This moves copy_{to,from}_user_page() out-of-line on SH-4 and
converts for the kmap_coherent() API. Based on the MIPS
implementation.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
hifive-unleashed-5.1
Paul Mundt 2007-11-05 16:18:16 +09:00
parent 7747b9a493
commit ba1789efea
2 changed files with 43 additions and 27 deletions

View File

@ -52,33 +52,39 @@ static inline void kunmap_coherent(struct page *page)
void clear_user_page(void *to, unsigned long address, struct page *page) void clear_user_page(void *to, unsigned long address, struct page *page)
{ {
__set_bit(PG_mapped, &page->flags); __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
clear_page(to); clear_page(to);
else { if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
void *vto = kmap_coherent(page, address); __flush_wback_region(to, PAGE_SIZE);
__clear_user_page(vto, to);
kunmap_coherent(vto);
}
} }
/* void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
* copy_user_page unsigned long vaddr, void *dst, const void *src,
* @to: P1 address unsigned long len)
* @from: P1 address
* @address: U0 address to be mapped
* @page: page (virt_to_page(to))
*/
void copy_user_page(void *to, void *from, unsigned long address,
struct page *page)
{ {
void *vto;
__set_bit(PG_mapped, &page->flags); __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from); vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
else { memcpy(vto, src, len);
void *vfrom = kmap_coherent(page, address); kunmap_coherent(vto);
__copy_user_page(vfrom, from, to);
kunmap_coherent(vfrom); if (vma->vm_flags & VM_EXEC)
} flush_cache_page(vma, vaddr, page_to_pfn(page));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
void *vfrom;
__set_bit(PG_mapped, &page->flags);
vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent(vfrom);
} }
void copy_user_highpage(struct page *to, struct page *from, void copy_user_highpage(struct page *to, struct page *from,

View File

@ -43,21 +43,31 @@ extern void __flush_purge_region(void *start, int size);
extern void __flush_invalidate_region(void *start, int size); extern void __flush_invalidate_region(void *start, int size);
#endif #endif
#define flush_cache_vmap(start, end) flush_cache_all() #ifdef CONFIG_CPU_SH4
#define flush_cache_vunmap(start, end) flush_cache_all() extern void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
#else
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \ flush_icache_user_range(vma, page, vaddr, len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\ flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \ memcpy(dst, src, len); \
} while (0) } while (0)
#endif
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA