1
0
Fork 0

nds32 patches for 4.17

Here is the nds32 patch set based on 4.17-rc6.
 Contained in here are the bug fixes and building error fixes for nds32.
 
 These are the LTP20170427 testing results. hackbench01 may fail sometimes.
 We are still investigating this issue.
 
 Total Tests: 1902
 Total Skipped Tests: 593
 Total Failures: 420
 Kernel Version: 4.17.0-rc6-00018-ga30e7d1e37e8
 Machine Architecture: nds32
 
 Total Tests: 1902
 Total Skipped Tests: 593
 Total Failures: 419
 Kernel Version: 4.17.0-rc5-00018-g27288975a735
 Machine Architecture: nds32
 
 Signed-off-by: Greentime Hu <greentime@andestech.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.17 (GNU/Linux)
 
 iQIcBAABAgAGBQJbC2uYAAoJEHfB0l0b2JxEpToP/2JaTE+YC+0wiri4ljYB/z9G
 z9cG81tD6R6tCHoc01MdxL8xoUNoum3DMesp0M4l27wLLln+KOjs7UL3ry6CjqmO
 E3LSISkjy2JAllX2CUxN4xWm70a2QDSFKUmndw71vxljLfkvKjvEpErQyfvzU24S
 PcQ/AfOBeeMQTzePILxLkCg/atSdXMOjhVGTxB2np8B6EjrIIELjy5rwr7CD2Fw6
 +KEgKKW+/4BkVMn4liJeAX7ywA8V81yj3AWq4omEbb+hT3ugckZ36XxnmhdT0P+8
 fXQvxoVrlb2kpBhuKU6yu8aQcMJ/oMa/uUvbPrG8PhSDAllrw05s+Bn9r9d8iqfR
 EE9As6tAjrApYbxQ2K+q3qiZULKV0nhwTmhjaj30S+UnC2j+yJbpGvSyJBic9mC5
 PA2/+jaeoLwb3pfrxzaoMKc5UKPBZKZhuk81CMTK2SjUoZWF9fp/ansquBLZU1IY
 34wuKroIjsDHLgd/Xt4lCQTIg8tU/IO3Hg0jnYuvwnRwi2ocGnjW8To9e8Q5w7NV
 OKFvtIUeSBHo02OhvsmOthN2ZQTSFQDAHUromr+XZ0W8s97lN8e1WqOJYMJuGE21
 ExJ9jbpTnni4Q5Yjl0FvnwntJFgXRfAYV18W2Icn0nIrgeNFCBZ4pmH7HQAPvHBs
 conjeO3e+wJKMHj96fgP
 =qlw2
 -----END PGP SIGNATURE-----

Merge tag 'nds32-for-linus-4.17-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux

Pull nds32 fixes from Greentime Hu:
 "Bug fixes and build error fixes for nds32"

* tag 'nds32-for-linus-4.17-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux:
  nds32: Fix compiler warning, Wstringop-overflow, in vdso.c
  nds32: Disable local irq before calling cpu_dcache_wb_page in copy_user_highpage
  nds32: Flush the cache of the page at vmaddr instead of kaddr in flush_anon_page
  nds32: Correct flush_dcache_page function
  nds32: Fix the unaligned access handler
  nds32: Renaming the file for unaligned access
  nds32: To fix a cache inconsistency issue by setting correct cacheability of NTC
  nds32: To refine readability of INT_MASK_INITAIAL_VAL
  nds32: Fix the virtual address may map too much range by tlbop issue.
  nds32: Fix the allmodconfig build. To make sure CONFIG_CPU_LITTLE_ENDIAN is default y
  nds32: Fix build failed because arch_trace_hardirqs_off is changed to trace_hardirqs_off.
  nds32: Fix the unknown type u8 issue.
  nds32: Fix the symbols undefined issue by exporting them.
  nds32: Fix xfs_buf built failed by export invalidate_kernel_vmap_range and flush_kernel_vmap_range
  nds32: Fix drivers/gpu/drm/udl/udl_fb.c building error by defining PAGE_SHARED
  nds32: Fix building error of crypto/xor.c by adding xor.h
  nds32: Fix building error when CONFIG_FREEZE is enabled.
  nds32: lib: To use generic lib instead of libgcc to prevent the symbol undefined issue.
hifive-unleashed-5.1
Linus Torvalds 2018-05-28 05:25:57 -07:00
commit 786b71f5b7
18 changed files with 132 additions and 36 deletions

View File

@ -9,6 +9,12 @@ config NDS32
select CLKSRC_MMIO
select CLONE_BACKWARDS
select COMMON_CLK
select GENERIC_ASHLDI3
select GENERIC_ASHRDI3
select GENERIC_LSHRDI3
select GENERIC_CMPDI2
select GENERIC_MULDI3
select GENERIC_UCMPDI2
select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
@ -82,6 +88,7 @@ endmenu
menu "Kernel Features"
source "kernel/Kconfig.preempt"
source "kernel/Kconfig.freezer"
source "mm/Kconfig"
source "kernel/Kconfig.hz"
endmenu

View File

@ -1,10 +1,11 @@
comment "Processor Features"
config CPU_BIG_ENDIAN
bool "Big endian"
def_bool !CPU_LITTLE_ENDIAN
config CPU_LITTLE_ENDIAN
def_bool !CPU_BIG_ENDIAN
bool "Little endian"
default y
config HWZOL
bool "hardware zero overhead loop support"

View File

@ -23,9 +23,6 @@ export TEXTADDR
# If we have a machine-specific directory, then include it in the build.
core-y += arch/nds32/kernel/ arch/nds32/mm/
libs-y += arch/nds32/lib/
LIBGCC_PATH := \
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
libs-y += $(LIBGCC_PATH)
ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""'
BUILTIN_DTB := y
@ -35,8 +32,12 @@ endif
ifdef CONFIG_CPU_LITTLE_ENDIAN
KBUILD_CFLAGS += $(call cc-option, -EL)
KBUILD_AFLAGS += $(call cc-option, -EL)
LDFLAGS += $(call cc-option, -EL)
else
KBUILD_CFLAGS += $(call cc-option, -EB)
KBUILD_AFLAGS += $(call cc-option, -EB)
LDFLAGS += $(call cc-option, -EB)
endif
boot := arch/nds32/boot

View File

@ -16,6 +16,7 @@ generic-y += dma.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += export.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += ftrace.h
@ -49,6 +50,7 @@ generic-y += switch_to.h
generic-y += timex.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += xor.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h

View File

@ -336,7 +336,7 @@
#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE )
#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM )
#define INT_MASK_INITAIAL_VAL 0x10003
#define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE)
/******************************************************************************
* ir15: INT_PEND (Interrupt Pending Register)
@ -396,6 +396,7 @@
#define MMU_CTL_D8KB 1
#define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA )
#define MMU_CTL_CACHEABLE_NON 0
#define MMU_CTL_CACHEABLE_WB 2
#define MMU_CTL_CACHEABLE_WT 3

View File

@ -32,6 +32,8 @@ void flush_anon_page(struct vm_area_struct *vma,
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page(struct page *page);
void flush_kernel_vmap_range(void *addr, int size);
void invalidate_kernel_vmap_range(void *addr, int size);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)

View File

@ -4,6 +4,8 @@
#ifndef __ASM_NDS32_IO_H
#define __ASM_NDS32_IO_H
#include <linux/types.h>
extern void iounmap(volatile void __iomem *addr);
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)

View File

@ -27,6 +27,9 @@ extern void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *to);
void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define clear_user_highpage clear_user_highpage
#else

View File

@ -152,6 +152,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE)
#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
#define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD)
#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
#endif /* __ASSEMBLY__ */

View File

@ -118,7 +118,7 @@ common_exception_handler:
/* interrupt */
2:
#ifdef CONFIG_TRACE_IRQFLAGS
jal arch_trace_hardirqs_off
jal trace_hardirqs_off
#endif
move $r0, $sp
sethi $lp, hi20(ret_from_intr)

View File

@ -57,14 +57,32 @@ _nodtb:
isb
mtsr $r4, $L1_PPTB ! load page table pointer\n"
/* set NTC0 cacheable/writeback, mutliple page size in use */
mfsr $r3, $MMU_CTL
li $r0, #~MMU_CTL_mskNTC0
and $r3, $r3, $r0
#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0))
#ifdef CONFIG_CPU_DCACHE_DISABLE
#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON
#else
ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB)
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT
#else
#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB
#endif
#endif
/* set NTC cacheability, mutliple page size in use */
mfsr $r3, $MMU_CTL
#if CONFIG_MEMORY_START >= 0xc0000000
ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3)
#elif CONFIG_MEMORY_START >= 0x80000000
ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2)
#elif CONFIG_MEMORY_START >= 0x40000000
ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1)
#else
ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0)
#endif
#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
ori $r3, $r3, #(MMU_CTL_mskMPZIU)
#else
ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB)
#endif
#ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS
li $r0, #MMU_CTL_UNA

View File

@ -293,6 +293,9 @@ void __init setup_arch(char **cmdline_p)
/* paging_init() sets up the MMU and marks all pages as reserved */
paging_init();
/* invalidate all TLB entries because the new mapping is created */
__nds32__tlbop_flua();
/* use generic way to parse */
parse_early_param();

View File

@ -9,6 +9,7 @@ void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
@ -45,3 +46,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
fpn = (unsigned long *)fpp;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);

View File

@ -23,7 +23,7 @@
#include <asm/vdso_timer_info.h>
#include <asm/cache_info.h>
extern struct cache_info L1_cache_info[2];
extern char vdso_start, vdso_end;
extern char vdso_start[], vdso_end[];
static unsigned long vdso_pages __ro_after_init;
static unsigned long timer_mapping_base;
@ -66,16 +66,16 @@ static int __init vdso_init(void)
int i;
struct page **vdso_pagelist;
if (memcmp(&vdso_start, "\177ELF", 4)) {
if (memcmp(vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
return -EINVAL;
}
/* Creat a timer io mapping to get clock cycles counter */
get_timer_node_info();
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist */
vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL);
@ -83,7 +83,7 @@ static int __init vdso_init(void)
return -ENOMEM;
for (i = 0; i < vdso_pages; i++)
vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE);
vdso_spec[1].pages = &vdso_pagelist[0];
return 0;

View File

@ -2,6 +2,7 @@
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/linkage.h>
#include <asm/export.h>
#include <asm/page.h>
.text
@ -16,6 +17,7 @@ ENTRY(copy_page)
popm $r2, $r10
ret
ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)
ENTRY(clear_page)
pushm $r1, $r9
@ -35,3 +37,4 @@ ENTRY(clear_page)
popm $r1, $r9
ret
ENDPROC(clear_page)
EXPORT_SYMBOL(clear_page)

View File

@ -19,7 +19,7 @@
#define RA(inst) (((inst) >> 15) & 0x1FUL)
#define RB(inst) (((inst) >> 10) & 0x1FUL)
#define SV(inst) (((inst) >> 8) & 0x3UL)
#define IMM(inst) (((inst) >> 0) & 0x3FFFUL)
#define IMM(inst) (((inst) >> 0) & 0x7FFFUL)
#define RA3(inst) (((inst) >> 3) & 0x7UL)
#define RT3(inst) (((inst) >> 6) & 0x7UL)
@ -28,6 +28,9 @@
#define RA5(inst) (((inst) >> 0) & 0x1FUL)
#define RT4(inst) (((inst) >> 5) & 0xFUL)
#define GET_IMMSVAL(imm_value) \
(((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value)
#define __get8_data(val,addr,err) \
__asm__( \
"1: lbi.bi %1, [%2], #1\n" \
@ -467,7 +470,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs)
}
if (imm)
shift = IMM(inst) * len;
shift = GET_IMMSVAL(IMM(inst)) * len;
else
shift = *idx_to_addr(regs, RB(inst)) << SV(inst);
@ -552,7 +555,7 @@ static struct ctl_table alignment_tbl[3] = {
static struct ctl_table nds32_sysctl_table[2] = {
{
.procname = "unaligned_acess",
.procname = "unaligned_access",
.mode = 0555,
.child = alignment_tbl},
{}

View File

@ -147,6 +147,25 @@ void flush_cache_vunmap(unsigned long start, unsigned long end)
cpu_icache_inval_all();
}
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *to)
{
cpu_dcache_wbinval_page((unsigned long)vaddr);
cpu_icache_inval_page((unsigned long)vaddr);
copy_page(vto, vfrom);
cpu_dcache_wbinval_page((unsigned long)vto);
cpu_icache_inval_page((unsigned long)vto);
}
void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
{
cpu_dcache_wbinval_page((unsigned long)vaddr);
cpu_icache_inval_page((unsigned long)vaddr);
clear_page(addr);
cpu_dcache_wbinval_page((unsigned long)addr);
cpu_icache_inval_page((unsigned long)addr);
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
@ -156,11 +175,9 @@ void copy_user_highpage(struct page *to, struct page *from,
pto = page_to_phys(to);
pfrom = page_to_phys(from);
local_irq_save(flags);
if (aliasing(vaddr, (unsigned long)kfrom))
cpu_dcache_wb_page((unsigned long)kfrom);
if (aliasing(vaddr, (unsigned long)kto))
cpu_dcache_inval_page((unsigned long)kto);
local_irq_save(flags);
vto = kremap0(vaddr, pto);
vfrom = kremap1(vaddr, pfrom);
copy_page((void *)vto, (void *)vfrom);
@ -198,21 +215,25 @@ void flush_dcache_page(struct page *page)
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else {
int i, pc;
unsigned long vto, kaddr, flags;
unsigned long kaddr, flags;
kaddr = (unsigned long)page_address(page);
cpu_dcache_wbinval_page(kaddr);
pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
local_irq_save(flags);
for (i = 0; i < pc; i++) {
vto =
kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
cpu_dcache_wbinval_page(vto);
kunmap01(vto);
cpu_dcache_wbinval_page(kaddr);
if (mapping) {
unsigned long vaddr, kto;
vaddr = page->index << PAGE_SHIFT;
if (aliasing(vaddr, kaddr)) {
kto = kremap0(vaddr, page_to_phys(page));
cpu_dcache_wbinval_page(kto);
kunmap01(kto);
}
}
local_irq_restore(flags);
}
}
EXPORT_SYMBOL(flush_dcache_page);
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len)
@ -251,7 +272,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr)
{
unsigned long flags;
unsigned long kaddr, flags, ktmp;
if (!PageAnon(page))
return;
@ -261,7 +282,12 @@ void flush_anon_page(struct vm_area_struct *vma,
local_irq_save(flags);
if (vma->vm_flags & VM_EXEC)
cpu_icache_inval_page(vaddr & PAGE_MASK);
cpu_dcache_wbinval_page((unsigned long)page_address(page));
kaddr = (unsigned long)page_address(page);
if (aliasing(vaddr, kaddr)) {
ktmp = kremap0(vaddr, page_to_phys(page));
cpu_dcache_wbinval_page(ktmp);
kunmap01(ktmp);
}
local_irq_restore(flags);
}
@ -272,6 +298,25 @@ void flush_kernel_dcache_page(struct page *page)
cpu_dcache_wbinval_page((unsigned long)page_address(page));
local_irq_restore(flags);
}
EXPORT_SYMBOL(flush_kernel_dcache_page);
void flush_kernel_vmap_range(void *addr, int size)
{
unsigned long flags;
local_irq_save(flags);
cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size);
local_irq_restore(flags);
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
void invalidate_kernel_vmap_range(void *addr, int size)
{
unsigned long flags;
local_irq_save(flags);
cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
local_irq_restore(flags);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
void flush_icache_range(unsigned long start, unsigned long end)
{
@ -283,6 +328,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
cpu_cache_wbinval_range(start, end, 1);
local_irq_restore(flags);
}
EXPORT_SYMBOL(flush_icache_range);
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{

View File

@ -30,6 +30,7 @@ extern unsigned long phys_initrd_size;
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
static void __init zone_sizes_init(void)
{