1
0
Fork 0

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from James Hogan:
 "math-emu:
   - Add missing clearing of BLTZALL and BGEZALL emulation counters
   - Fix BC1EQZ and BC1NEZ condition handling
   - Fix BLEZL and BGTZL identification

  BPF:
   - Add JIT support for SKF_AD_HATYPE
   - Use unsigned access for unsigned SKB fields
   - Quit clobbering callee saved registers in JIT code
   - Fix multiple problems in JIT skb access helpers

  Loongson 3:
   - Select MIPS_L1_CACHE_SHIFT_6

  Octeon:
   - Remove vestiges of CONFIG_CAVIUM_OCTEON_2ND_KERNEL
   - Remove unused L2C types and macros.
   - Remove unused SLI types and macros.
   - Fix compile error when USB is not enabled.
   - Octeon: Remove unused PCIERCX types and macros.
   - Octeon: Clean up platform code.

  SNI:
   - Remove recursive include of cpu-feature-overrides.h

  Sibyte:
   - Export symbol periph_rev to sb1250-mac network driver.
   - Fix Kconfig warning.

  Generic platform:
   - Enable Root FS on NFS in generic_defconfig

  SMP-MT:
   - Use CPU interrupt controller IPI IRQ domain support

  UASM:
   - Add support for LHU for uasm.
   - Remove needless ISA abstraction

  mm:
   - Add 48-bit VA space and 4-level page tables for 4K pages.

  PCI:
   - Add controllers before the specified head

  irqchip driver for MIPS CPU:
   - Replace magic 0x100 with IE_SW0
   - Prepare for non-legacy IRQ domains
   - Introduce IPI IRQ domain support

  MAINTAINERS:
   - Update email-id of Rahul Bedarkar

  NET:
   - sb1250-mac: Add missing MODULE_LICENSE()

  CPUFREQ:
   - Loongson2: drop set_cpus_allowed_ptr()

  Misc:
   - Disable Werror when W= is set
   - Opt into HAVE_COPY_THREAD_TLS
   - Enable GENERIC_CPU_AUTOPROBE
   - Use common outgoing-CPU-notification code
   - Remove dead define of ST_OFF
   - Remove CONFIG_ARCH_HAS_ILOG2_U{32,64}
   - Stengthen IPI IRQ domain sanity check
   - Remove confusing else statement in __do_page_fault()
   - Don't unnecessarily include kmalloc.h into <asm/cache.h>.
   - Delete unused definition of SMP_CACHE_SHIFT.
   - Delete redundant definition of SMP_CACHE_BYTES"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (39 commits)
  MIPS: Sibyte: Fix Kconfig warning.
  MIPS: Sibyte: Export symbol periph_rev to sb1250-mac network driver.
  NET: sb1250-mac: Add missing MODULE_LICENSE()
  MAINTAINERS: Update email-id of Rahul Bedarkar
  MIPS: Remove confusing else statement in __do_page_fault()
  MIPS: Stengthen IPI IRQ domain sanity check
  MIPS: smp-mt: Use CPU interrupt controller IPI IRQ domain support
  irqchip: mips-cpu: Introduce IPI IRQ domain support
  irqchip: mips-cpu: Prepare for non-legacy IRQ domains
  irqchip: mips-cpu: Replace magic 0x100 with IE_SW0
  MIPS: Remove CONFIG_ARCH_HAS_ILOG2_U{32,64}
  MIPS: generic: Enable Root FS on NFS in generic_defconfig
  MIPS: mach-rm: Remove recursive include of cpu-feature-overrides.h
  MIPS: Opt into HAVE_COPY_THREAD_TLS
  CPUFREQ: Loongson2: drop set_cpus_allowed_ptr()
  MIPS: uasm: Remove needless ISA abstraction
  MIPS: Remove dead define of ST_OFF
  MIPS: Use common outgoing-CPU-notification code
  MIPS: math-emu: Fix BC1EQZ and BC1NEZ condition handling
  MIPS: r2-on-r6-emu: Clear BLTZALL and BGEZALL debugfs counters
  ...
zero-colors
Linus Torvalds 2017-05-12 09:56:30 -07:00
commit ac3c4aa248
50 changed files with 1215 additions and 10915 deletions

View File

@ -7923,7 +7923,7 @@ L: linux-man@vger.kernel.org
S: Maintained
MARDUK (CREATOR CI40) DEVICE TREE SUPPORT
M: Rahul Bedarkar <rahul.bedarkar@imgtec.com>
M: Rahul Bedarkar <rahulbedarkar89@gmail.com>
L: linux-mips@linux-mips.org
S: Maintained
F: arch/mips/boot/dts/img/pistachio_marduk.dts

View File

@ -1,7 +1,9 @@
# Fail on warnings - also for files referenced in subdirs
# -Werror can be disabled for specific files using:
# CFLAGS_<file.o> := -Wno-error
ifeq ($(W),)
subdir-ccflags-y := -Werror
endif
# platform specific definitions
include arch/mips/Kbuild.platforms

View File

@ -46,6 +46,7 @@ config MIPS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_SMP_IDLE_THREAD
select BUILDTIME_EXTABLE_SORT
select GENERIC_CPU_AUTOPROBE
select GENERIC_CLOCKEVENTS
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_CMOS_UPDATE
@ -68,6 +69,7 @@ config MIPS
select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_COPY_THREAD_TLS
menu "Machine selection"
@ -1039,14 +1041,6 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
config ARCH_HAS_ILOG2_U32
bool
default n
config ARCH_HAS_ILOG2_U64
bool
default n
config GENERIC_HWEIGHT
bool
default y
@ -1372,6 +1366,7 @@ config CPU_LOONGSON3
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select MIPS_PGD_C0_CONTEXT
select MIPS_L1_CACHE_SHIFT_6
select GPIOLIB
help
The Loongson 3 processor implements the MIPS64R2 instruction
@ -2120,10 +2115,13 @@ config MIPS_VA_BITS_48
bool "48 bits virtual memory"
depends on 64BIT
help
Support a maximum at least 48 bits of application virtual memory.
Default is 40 bits or less, depending on the CPU.
This option result in a small memory overhead for page tables.
This option is only supported with 16k and 64k page sizes.
Support a maximum at least 48 bits of application virtual
memory. Default is 40 bits or less, depending on the CPU.
For page sizes 16k and above, this option results in a small
memory overhead for page tables. For 4k page size, a fourth
level of page tables is added which imposes both a memory
overhead as well as slower TLB fault handling.
If unsure, say N.
choice
@ -2133,7 +2131,6 @@ choice
config PAGE_SIZE_4KB
bool "4kB"
depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
depends on !MIPS_VA_BITS_48
help
This option select the standard 4kB Linux page size. On some
R3000-family processors this is the only available page size. Using
@ -2982,6 +2979,7 @@ config HAVE_LATENCYTOP_SUPPORT
config PGTABLE_LEVELS
int
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
default 3 if 64BIT && !PAGE_SIZE_64KB
default 2

View File

@ -82,7 +82,7 @@ config CMDLINE_OVERRIDE
config SB1XXX_CORELIS
bool "Corelis Debugger"
depends on SIBYTE_SB1xxx_SOC
select DEBUG_INFO
select DEBUG_INFO if !COMPILE_TEST
help
Select compile flags that produce code that can be processed by the
Corelis mksym utility and UDB Emulator.

View File

@ -25,15 +25,6 @@ endif # CPU_CAVIUM_OCTEON
if CAVIUM_OCTEON_SOC
config CAVIUM_OCTEON_2ND_KERNEL
bool "Build the kernel to be used as a 2nd kernel on the same chip"
default "n"
help
This option configures this kernel to be linked at a different
address and use the 2nd uart for output. This allows a kernel built
with this option to be run at the same time as one built without this
option.
config CAVIUM_OCTEON_LOCK_L2
bool "Lock often used kernel code in the L2"
default "y"

View File

@ -4,8 +4,4 @@
platform-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon/
cflags-$(CONFIG_CAVIUM_OCTEON_SOC) += \
-I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff84100000
else
load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff81100000
endif

View File

@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2010 Cavium Networks
* Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@ -239,6 +239,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
return counter;
@ -249,6 +250,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
return counter;
@ -259,6 +261,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
return counter;
@ -270,6 +273,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
return counter;
@ -301,7 +305,7 @@ static void fault_in(uint64_t addr, int len)
*/
CVMX_DCACHE_INVALIDATE;
while (len > 0) {
ACCESS_ONCE(*ptr);
READ_ONCE(*ptr);
len -= CVMX_CACHE_LINE_SIZE;
ptr += CVMX_CACHE_LINE_SIZE;
}
@ -375,7 +379,9 @@ int cvmx_l2c_lock_line(uint64_t addr)
if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
lckbase.s.lck_base = addr_tmp >> 7;
} else {
lckbase.s.lck_base = addr >> 7;
}
@ -435,6 +441,7 @@ void cvmx_l2c_flush(void)
/* These may look like constants, but they aren't... */
int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
for (set = 0; set < n_set; set++) {
for (assoc = 0; assoc < n_assoc; assoc++) {
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
@ -519,89 +526,49 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
union __cvmx_l2c_tag {
uint64_t u64;
struct cvmx_l2c_tag_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:40;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:20; /* Phys mem addr (33..14) */
#else
uint64_t addr:20; /* Phys mem addr (33..14) */
uint64_t U:1; /* Use, LRU eviction */
uint64_t L:1; /* Line locked */
uint64_t D:1; /* Line dirty */
uint64_t V:1; /* Line valid */
uint64_t reserved:40;
#endif
__BITFIELD_FIELD(uint64_t reserved:40,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:20, /* Phys addr (33..14) */
;))))))
} cn50xx;
struct cvmx_l2c_tag_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:41;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:19; /* Phys mem addr (33..15) */
#else
uint64_t addr:19; /* Phys mem addr (33..15) */
uint64_t U:1; /* Use, LRU eviction */
uint64_t L:1; /* Line locked */
uint64_t D:1; /* Line dirty */
uint64_t V:1; /* Line valid */
uint64_t reserved:41;
#endif
__BITFIELD_FIELD(uint64_t reserved:41,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:19, /* Phys addr (33..15) */
;))))))
} cn30xx;
struct cvmx_l2c_tag_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:42;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:18; /* Phys mem addr (33..16) */
#else
uint64_t addr:18; /* Phys mem addr (33..16) */
uint64_t U:1; /* Use, LRU eviction */
uint64_t L:1; /* Line locked */
uint64_t D:1; /* Line dirty */
uint64_t V:1; /* Line valid */
uint64_t reserved:42;
#endif
__BITFIELD_FIELD(uint64_t reserved:42,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:18, /* Phys addr (33..16) */
;))))))
} cn31xx;
struct cvmx_l2c_tag_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:43;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:17; /* Phys mem addr (33..17) */
#else
uint64_t addr:17; /* Phys mem addr (33..17) */
uint64_t U:1; /* Use, LRU eviction */
uint64_t L:1; /* Line locked */
uint64_t D:1; /* Line dirty */
uint64_t V:1; /* Line valid */
uint64_t reserved:43;
#endif
__BITFIELD_FIELD(uint64_t reserved:43,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:17, /* Phys addr (33..17) */
;))))))
} cn38xx;
struct cvmx_l2c_tag_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:44;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:16; /* Phys mem addr (33..18) */
#else
uint64_t addr:16; /* Phys mem addr (33..18) */
uint64_t U:1; /* Use, LRU eviction */
uint64_t L:1; /* Line locked */
uint64_t D:1; /* Line dirty */
uint64_t V:1; /* Line valid */
uint64_t reserved:44;
#endif
__BITFIELD_FIELD(uint64_t reserved:44,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:16, /* Phys addr (33..18) */
;))))))
} cn58xx;
struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
@ -629,8 +596,8 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
union __cvmx_l2c_tag tag_val;
uint64_t dbg_addr = CVMX_L2C_DBG;
unsigned long flags;
union cvmx_l2c_dbg debug_val;
debug_val.u64 = 0;
/*
* For low core count parts, the core number is always small
@ -683,8 +650,8 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
{
union cvmx_l2c_tag tag;
tag.u64 = 0;
tag.u64 = 0;
if ((int)association >= cvmx_l2c_get_num_assoc()) {
cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
return tag;
@ -767,10 +734,12 @@ uint32_t cvmx_l2c_address_to_index(uint64_t addr)
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
union cvmx_l2c_ctl l2c_ctl;
l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
indxalias = !l2c_ctl.s.disidxalias;
} else {
union cvmx_l2c_cfg l2c_cfg;
l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
indxalias = l2c_cfg.s.idxalias;
}
@ -778,6 +747,7 @@ uint32_t cvmx_l2c_address_to_index(uint64_t addr)
if (indxalias) {
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
idx ^= idx / cvmx_l2c_get_num_sets();
idx ^= a_14_12;
} else {
@ -801,6 +771,7 @@ int cvmx_l2c_get_cache_size_bytes(void)
int cvmx_l2c_get_set_bits(void)
{
int l2_set_bits;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
l2_set_bits = 11; /* 2048 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
@ -828,6 +799,7 @@ int cvmx_l2c_get_num_sets(void)
int cvmx_l2c_get_num_assoc(void)
{
int l2_assoc;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX) ||
@ -869,16 +841,17 @@ int cvmx_l2c_get_num_assoc(void)
else if (mio_fus_dat3.s.l2c_crip == 1)
l2_assoc = 12;
} else {
union cvmx_l2d_fus3 val;
val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
uint64_t l2d_fus3;
l2d_fus3 = cvmx_read_csr(CVMX_L2D_FUS3);
/*
* Using shifts here, as bit position names are
* different for each model but they all mean the
* same.
*/
if ((val.u64 >> 35) & 0x1)
if ((l2d_fus3 >> 35) & 0x1)
l2_assoc = l2_assoc >> 2;
else if ((val.u64 >> 34) & 0x1)
else if ((l2d_fus3 >> 34) & 0x1)
l2_assoc = l2_assoc >> 1;
}
return l2_assoc;

View File

@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2010 Cavium Networks
* Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@ -63,16 +63,15 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
char pass[4];
int clock_mhz;
const char *suffix;
union cvmx_l2d_fus3 fus3;
int num_cores;
union cvmx_mio_fus_dat2 fus_dat2;
union cvmx_mio_fus_dat3 fus_dat3;
char fuse_model[10];
uint32_t fuse_data = 0;
uint64_t l2d_fus3 = 0;
fus3.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
l2d_fus3 = (cvmx_read_csr(CVMX_L2D_FUS3) >> 34) & 0x3;
fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
num_cores = cvmx_octeon_num_cores();
@ -192,7 +191,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
/* Now figure out the family, the first two digits */
switch ((chip_id >> 8) & 0xff) {
case 0: /* CN38XX, CN37XX or CN36XX */
if (fus3.cn38xx.crip_512k) {
if (l2d_fus3) {
/*
* For some unknown reason, the 16 core one is
* called 37 instead of 36.
@ -223,7 +222,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
}
break;
case 1: /* CN31XX or CN3020 */
if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
if ((chip_id & 0x10) || l2d_fus3)
family = "30";
else
family = "31";
@ -246,7 +245,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
case 2: /* CN3010 or CN3005 */
family = "30";
/* A chip with half cache is an 05 */
if (fus3.cn30xx.crip_64k)
if (l2d_fus3)
core_model = "05";
/*
* This series of chips didn't follow the standard
@ -267,7 +266,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
case 3: /* CN58XX */
family = "58";
/* Special case. 4 core, half cache (CP with half cache) */
if ((num_cores == 4) && fus3.cn58xx.crip_1024k && !strncmp(suffix, "CP", 2))
if ((num_cores == 4) && l2d_fus3 && !strncmp(suffix, "CP", 2))
core_model = "29";
/* Pass 1 uses different encodings for pass numbers */
@ -290,7 +289,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
break;
case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
if (fus_dat2.cn56xx.raid_en) {
if (fus3.cn56xx.crip_1024k)
if (l2d_fus3)
family = "55";
else
family = "57";
@ -309,7 +308,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
if (fus_dat3.cn56xx.bar2_en)
suffix = "NSPB2";
}
if (fus3.cn56xx.crip_1024k)
if (l2d_fus3)
family = "54";
else
family = "56";
@ -319,7 +318,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
family = "50";
break;
case 7: /* CN52XX */
if (fus3.cn52xx.crip_256k)
if (l2d_fus3)
family = "51";
else
family = "52";

View File

@ -3,71 +3,27 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2016 Cavium Networks
* Copyright (C) 2004-2017 Cavium, Inc.
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/usb/ehci_def.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-helper-board.h>
#ifdef CONFIG_USB
#include <linux/usb/ehci_def.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/cvmx-uctlx-defs.h>
#define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
#define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
/* Octeon Random Number Generator. */
static int __init octeon_rng_device_init(void)
{
struct platform_device *pd;
int ret = 0;
struct resource rng_resources[] = {
{
.flags = IORESOURCE_MEM,
.start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
.end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
}, {
.flags = IORESOURCE_MEM,
.start = cvmx_build_io_address(8, 0),
.end = cvmx_build_io_address(8, 0) + 0x7
}
};
pd = platform_device_alloc("octeon_rng", -1);
if (!pd) {
ret = -ENOMEM;
goto out;
}
ret = platform_device_add_resources(pd, rng_resources,
ARRAY_SIZE(rng_resources));
if (ret)
goto fail;
ret = platform_device_add(pd);
if (ret)
goto fail;
return ret;
fail:
platform_device_put(pd);
out:
return ret;
}
device_initcall(octeon_rng_device_init);
#ifdef CONFIG_USB
static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
static int octeon2_usb_clock_start_cnt;
@ -440,8 +396,49 @@ device_initcall(octeon_ohci_device_init);
#endif /* CONFIG_USB */
/* Octeon Random Number Generator. */
static int __init octeon_rng_device_init(void)
{
struct platform_device *pd;
int ret = 0;
static struct of_device_id __initdata octeon_ids[] = {
struct resource rng_resources[] = {
{
.flags = IORESOURCE_MEM,
.start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
.end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
}, {
.flags = IORESOURCE_MEM,
.start = cvmx_build_io_address(8, 0),
.end = cvmx_build_io_address(8, 0) + 0x7
}
};
pd = platform_device_alloc("octeon_rng", -1);
if (!pd) {
ret = -ENOMEM;
goto out;
}
ret = platform_device_add_resources(pd, rng_resources,
ARRAY_SIZE(rng_resources));
if (ret)
goto fail;
ret = platform_device_add(pd);
if (ret)
goto fail;
return ret;
fail:
platform_device_put(pd);
out:
return ret;
}
device_initcall(octeon_rng_device_init);
const struct of_device_id octeon_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "cavium,octeon-6335-uctl", },
{ .compatible = "cavium,octeon-5750-usbn", },
@ -481,6 +478,7 @@ static void __init octeon_fdt_set_phy(int eth, int phy_addr)
alt_phy_handle = fdt_getprop(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
if (alt_phy_handle) {
u32 alt_phandle = be32_to_cpup(alt_phy_handle);
alt_phy = fdt_node_offset_by_phandle(initial_boot_params, alt_phandle);
} else {
alt_phy = -1;
@ -579,6 +577,7 @@ static void __init octeon_fdt_rm_ethernet(int node)
if (phy_handle) {
u32 ph = be32_to_cpup(phy_handle);
int p = fdt_node_offset_by_phandle(initial_boot_params, ph);
if (p >= 0)
fdt_nop_node(initial_boot_params, p);
}
@ -728,6 +727,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int mgmt;
snprintf(name_buffer, sizeof(name_buffer),
"mix%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@ -743,6 +743,7 @@ int __init octeon_prune_device_tree(void)
name_buffer);
} else {
int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
octeon_fdt_set_phy(mgmt, phy_addr);
}
}
@ -751,6 +752,7 @@ int __init octeon_prune_device_tree(void)
pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
if (pip_path) {
int pip = fdt_path_offset(initial_boot_params, pip_path);
if (pip >= 0)
for (i = 0; i <= 4; i++)
octeon_fdt_pip_iface(pip, i);
@ -767,6 +769,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int i2c;
snprintf(name_buffer, sizeof(name_buffer),
"twsi%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@ -797,11 +800,11 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int i2c;
snprintf(name_buffer, sizeof(name_buffer),
"smi%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
name_buffer, NULL);
if (alias_prop) {
i2c = fdt_path_offset(initial_boot_params, alias_prop);
if (i2c < 0)
@ -824,6 +827,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 3; i++) {
int uart;
snprintf(name_buffer, sizeof(name_buffer),
"uart%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@ -863,6 +867,7 @@ int __init octeon_prune_device_tree(void)
int len;
int cf = fdt_path_offset(initial_boot_params, alias_prop);
base_ptr = 0;
if (octeon_bootinfo->major_version == 1
&& octeon_bootinfo->minor_version >= 1) {
@ -912,6 +917,7 @@ int __init octeon_prune_device_tree(void)
fdt_nop_property(initial_boot_params, cf, "cavium,dma-engine-handle");
if (!is_16bit) {
__be32 width = cpu_to_be32(8);
fdt_setprop_inplace(initial_boot_params, cf,
"cavium,bus-width", &width, sizeof(width));
}
@ -1004,6 +1010,7 @@ end_led:
;
}
#ifdef CONFIG_USB
/* OHCI/UHCI USB */
alias_prop = fdt_getprop(initial_boot_params, aliases,
"uctl", NULL);
@ -1036,6 +1043,7 @@ end_led:
} else {
__be32 new_f[1];
enum cvmx_helper_board_usb_clock_types c;
c = __cvmx_helper_board_usb_get_clock_type();
switch (c) {
case USB_CLOCK_TYPE_REF_48:
@ -1052,6 +1060,7 @@ end_led:
}
}
}
#endif
return 0;
}

View File

@ -374,14 +374,8 @@ void octeon_write_lcd(const char *s)
*/
int octeon_get_boot_uart(void)
{
int uart;
#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
uart = 1;
#else
uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
1 : 0;
#endif
return uart;
}
/**
@ -901,14 +895,10 @@ void __init prom_init(void)
}
if (strstr(arcs_cmdline, "console=") == NULL) {
#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
strcat(arcs_cmdline, " console=ttyS0,115200");
#else
if (octeon_uart == 1)
strcat(arcs_cmdline, " console=ttyS1,115200");
else
strcat(arcs_cmdline, " console=ttyS0,115200");
#endif
}
mips_hpt_frequency = octeon_get_clock_rate();

View File

@ -36,6 +36,8 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_NETFILTER=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
@ -80,6 +82,7 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_REDUCED=y

View File

@ -9,14 +9,9 @@
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H
#include <kmalloc.h>
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif /* _ASM_CACHE_H */

View File

@ -12,10 +12,9 @@
#ifndef __ASM_CPU_INFO_H
#define __ASM_CPU_INFO_H
#include <linux/cache.h>
#include <linux/types.h>
#include <asm/cache.h>
/*
* Descriptor for a cache
*/

View File

@ -0,0 +1,26 @@
/*
* CPU feature definitions for module loading, used by
* module_cpu_feature_match(), see uapi/asm/hwcap.h for MIPS CPU features.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H
#include <uapi/asm/hwcap.h>
#include <asm/elf.h>
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
static inline bool cpu_have_feature(unsigned int num)
{
return elf_hwcap & (1UL << num);
}
#endif /* __ASM_CPUFEATURE_H */

View File

@ -10,8 +10,6 @@
#ifndef __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H
#include <cpu-feature-overrides.h>
#define cpu_has_tlb 1
#define cpu_has_4kex 1
#define cpu_has_4k_cache 1

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2010 Cavium Networks
* Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@ -33,48 +33,39 @@
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
#include <uapi/asm/bitfield.h>
#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro */
#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro */
#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro */
#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
/* Based on 128 byte cache line size */
#define CVMX_L2C_IDX_ADDR_SHIFT 7
#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + \
cvmx_l2c_get_set_bits())
#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
/* Defines for Virtualizations, valid only from Octeon II onwards. */
#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0)
#define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0)
union cvmx_l2c_tag {
uint64_t u64;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved:28;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:32; /* Phys mem (not all bits valid) */
#else
uint64_t addr:32; /* Phys mem (not all bits valid) */
uint64_t U:1; /* Use, LRU eviction */
uint64_t L:1; /* Line locked */
uint64_t D:1; /* Line dirty */
uint64_t V:1; /* Line valid */
uint64_t reserved:28;
#endif
} s;
};
#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
#define CVMX_L2C_TADS 1
/* L2C Performance Counter events. */
union cvmx_l2c_tag {
uint64_t u64;
struct {
__BITFIELD_FIELD(uint64_t reserved:28,
__BITFIELD_FIELD(uint64_t V:1,
__BITFIELD_FIELD(uint64_t D:1,
__BITFIELD_FIELD(uint64_t L:1,
__BITFIELD_FIELD(uint64_t U:1,
__BITFIELD_FIELD(uint64_t addr:32,
;))))))
} s;
};
/* L2C Performance Counter events. */
enum cvmx_l2c_event {
CVMX_L2C_EVENT_CYCLES = 0,
CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
@ -175,7 +166,8 @@ enum cvmx_l2c_tad_event {
*
* @note The routine does not clear the counter.
*/
void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read);
void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
uint32_t clear_on_read);
/**
* Read the given L2 Cache performance counter. The counter must be configured
@ -307,8 +299,11 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
/* Wrapper providing a deprecated old function name */
static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated));
static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index)
static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
uint32_t index)
__attribute__((deprecated));
static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
uint32_t index)
{
return cvmx_l2c_get_tag(association, index);
}

View File

@ -1,526 +0,0 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_L2D_DEFS_H__
#define __CVMX_L2D_DEFS_H__
#define CVMX_L2D_BST0 (CVMX_ADD_IO_SEG(0x0001180080000780ull))
#define CVMX_L2D_BST1 (CVMX_ADD_IO_SEG(0x0001180080000788ull))
#define CVMX_L2D_BST2 (CVMX_ADD_IO_SEG(0x0001180080000790ull))
#define CVMX_L2D_BST3 (CVMX_ADD_IO_SEG(0x0001180080000798ull))
#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
#define CVMX_L2D_FADR (CVMX_ADD_IO_SEG(0x0001180080000018ull))
#define CVMX_L2D_FSYN0 (CVMX_ADD_IO_SEG(0x0001180080000020ull))
#define CVMX_L2D_FSYN1 (CVMX_ADD_IO_SEG(0x0001180080000028ull))
#define CVMX_L2D_FUS0 (CVMX_ADD_IO_SEG(0x00011800800007A0ull))
#define CVMX_L2D_FUS1 (CVMX_ADD_IO_SEG(0x00011800800007A8ull))
#define CVMX_L2D_FUS2 (CVMX_ADD_IO_SEG(0x00011800800007B0ull))
#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
union cvmx_l2d_bst0 {
uint64_t u64;
struct cvmx_l2d_bst0_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_35_63:29;
uint64_t ftl:1;
uint64_t q0stat:34;
#else
uint64_t q0stat:34;
uint64_t ftl:1;
uint64_t reserved_35_63:29;
#endif
} s;
struct cvmx_l2d_bst0_s cn30xx;
struct cvmx_l2d_bst0_s cn31xx;
struct cvmx_l2d_bst0_s cn38xx;
struct cvmx_l2d_bst0_s cn38xxp2;
struct cvmx_l2d_bst0_s cn50xx;
struct cvmx_l2d_bst0_s cn52xx;
struct cvmx_l2d_bst0_s cn52xxp1;
struct cvmx_l2d_bst0_s cn56xx;
struct cvmx_l2d_bst0_s cn56xxp1;
struct cvmx_l2d_bst0_s cn58xx;
struct cvmx_l2d_bst0_s cn58xxp1;
};
union cvmx_l2d_bst1 {
uint64_t u64;
struct cvmx_l2d_bst1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q1stat:34;
#else
uint64_t q1stat:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_bst1_s cn30xx;
struct cvmx_l2d_bst1_s cn31xx;
struct cvmx_l2d_bst1_s cn38xx;
struct cvmx_l2d_bst1_s cn38xxp2;
struct cvmx_l2d_bst1_s cn50xx;
struct cvmx_l2d_bst1_s cn52xx;
struct cvmx_l2d_bst1_s cn52xxp1;
struct cvmx_l2d_bst1_s cn56xx;
struct cvmx_l2d_bst1_s cn56xxp1;
struct cvmx_l2d_bst1_s cn58xx;
struct cvmx_l2d_bst1_s cn58xxp1;
};
union cvmx_l2d_bst2 {
uint64_t u64;
struct cvmx_l2d_bst2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q2stat:34;
#else
uint64_t q2stat:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_bst2_s cn30xx;
struct cvmx_l2d_bst2_s cn31xx;
struct cvmx_l2d_bst2_s cn38xx;
struct cvmx_l2d_bst2_s cn38xxp2;
struct cvmx_l2d_bst2_s cn50xx;
struct cvmx_l2d_bst2_s cn52xx;
struct cvmx_l2d_bst2_s cn52xxp1;
struct cvmx_l2d_bst2_s cn56xx;
struct cvmx_l2d_bst2_s cn56xxp1;
struct cvmx_l2d_bst2_s cn58xx;
struct cvmx_l2d_bst2_s cn58xxp1;
};
union cvmx_l2d_bst3 {
uint64_t u64;
struct cvmx_l2d_bst3_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q3stat:34;
#else
uint64_t q3stat:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_bst3_s cn30xx;
struct cvmx_l2d_bst3_s cn31xx;
struct cvmx_l2d_bst3_s cn38xx;
struct cvmx_l2d_bst3_s cn38xxp2;
struct cvmx_l2d_bst3_s cn50xx;
struct cvmx_l2d_bst3_s cn52xx;
struct cvmx_l2d_bst3_s cn52xxp1;
struct cvmx_l2d_bst3_s cn56xx;
struct cvmx_l2d_bst3_s cn56xxp1;
struct cvmx_l2d_bst3_s cn58xx;
struct cvmx_l2d_bst3_s cn58xxp1;
};
union cvmx_l2d_err {
uint64_t u64;
struct cvmx_l2d_err_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t bmhclsel:1;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t bmhclsel:1;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_l2d_err_s cn30xx;
struct cvmx_l2d_err_s cn31xx;
struct cvmx_l2d_err_s cn38xx;
struct cvmx_l2d_err_s cn38xxp2;
struct cvmx_l2d_err_s cn50xx;
struct cvmx_l2d_err_s cn52xx;
struct cvmx_l2d_err_s cn52xxp1;
struct cvmx_l2d_err_s cn56xx;
struct cvmx_l2d_err_s cn56xxp1;
struct cvmx_l2d_err_s cn58xx;
struct cvmx_l2d_err_s cn58xxp1;
};
union cvmx_l2d_fadr {
uint64_t u64;
struct cvmx_l2d_fadr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_19_63:45;
uint64_t fadru:1;
uint64_t fowmsk:4;
uint64_t fset:3;
uint64_t fadr:11;
#else
uint64_t fadr:11;
uint64_t fset:3;
uint64_t fowmsk:4;
uint64_t fadru:1;
uint64_t reserved_19_63:45;
#endif
} s;
struct cvmx_l2d_fadr_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t reserved_13_13:1;
uint64_t fset:2;
uint64_t reserved_9_10:2;
uint64_t fadr:9;
#else
uint64_t fadr:9;
uint64_t reserved_9_10:2;
uint64_t fset:2;
uint64_t reserved_13_13:1;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn30xx;
struct cvmx_l2d_fadr_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t reserved_13_13:1;
uint64_t fset:2;
uint64_t reserved_10_10:1;
uint64_t fadr:10;
#else
uint64_t fadr:10;
uint64_t reserved_10_10:1;
uint64_t fset:2;
uint64_t reserved_13_13:1;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn31xx;
struct cvmx_l2d_fadr_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t fset:3;
uint64_t fadr:11;
#else
uint64_t fadr:11;
uint64_t fset:3;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn38xx;
struct cvmx_l2d_fadr_cn38xx cn38xxp2;
struct cvmx_l2d_fadr_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t fset:3;
uint64_t reserved_8_10:3;
uint64_t fadr:8;
#else
uint64_t fadr:8;
uint64_t reserved_8_10:3;
uint64_t fset:3;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn50xx;
struct cvmx_l2d_fadr_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t fset:3;
uint64_t reserved_10_10:1;
uint64_t fadr:10;
#else
uint64_t fadr:10;
uint64_t reserved_10_10:1;
uint64_t fset:3;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn52xx;
struct cvmx_l2d_fadr_cn52xx cn52xxp1;
struct cvmx_l2d_fadr_s cn56xx;
struct cvmx_l2d_fadr_s cn56xxp1;
struct cvmx_l2d_fadr_s cn58xx;
struct cvmx_l2d_fadr_s cn58xxp1;
};
union cvmx_l2d_fsyn0 {
uint64_t u64;
struct cvmx_l2d_fsyn0_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t fsyn_ow1:10;
uint64_t fsyn_ow0:10;
#else
uint64_t fsyn_ow0:10;
uint64_t fsyn_ow1:10;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_l2d_fsyn0_s cn30xx;
struct cvmx_l2d_fsyn0_s cn31xx;
struct cvmx_l2d_fsyn0_s cn38xx;
struct cvmx_l2d_fsyn0_s cn38xxp2;
struct cvmx_l2d_fsyn0_s cn50xx;
struct cvmx_l2d_fsyn0_s cn52xx;
struct cvmx_l2d_fsyn0_s cn52xxp1;
struct cvmx_l2d_fsyn0_s cn56xx;
struct cvmx_l2d_fsyn0_s cn56xxp1;
struct cvmx_l2d_fsyn0_s cn58xx;
struct cvmx_l2d_fsyn0_s cn58xxp1;
};
union cvmx_l2d_fsyn1 {
uint64_t u64;
struct cvmx_l2d_fsyn1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t fsyn_ow3:10;
uint64_t fsyn_ow2:10;
#else
uint64_t fsyn_ow2:10;
uint64_t fsyn_ow3:10;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_l2d_fsyn1_s cn30xx;
struct cvmx_l2d_fsyn1_s cn31xx;
struct cvmx_l2d_fsyn1_s cn38xx;
struct cvmx_l2d_fsyn1_s cn38xxp2;
struct cvmx_l2d_fsyn1_s cn50xx;
struct cvmx_l2d_fsyn1_s cn52xx;
struct cvmx_l2d_fsyn1_s cn52xxp1;
struct cvmx_l2d_fsyn1_s cn56xx;
struct cvmx_l2d_fsyn1_s cn56xxp1;
struct cvmx_l2d_fsyn1_s cn58xx;
struct cvmx_l2d_fsyn1_s cn58xxp1;
};
union cvmx_l2d_fus0 {
uint64_t u64;
struct cvmx_l2d_fus0_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q0fus:34;
#else
uint64_t q0fus:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_fus0_s cn30xx;
struct cvmx_l2d_fus0_s cn31xx;
struct cvmx_l2d_fus0_s cn38xx;
struct cvmx_l2d_fus0_s cn38xxp2;
struct cvmx_l2d_fus0_s cn50xx;
struct cvmx_l2d_fus0_s cn52xx;
struct cvmx_l2d_fus0_s cn52xxp1;
struct cvmx_l2d_fus0_s cn56xx;
struct cvmx_l2d_fus0_s cn56xxp1;
struct cvmx_l2d_fus0_s cn58xx;
struct cvmx_l2d_fus0_s cn58xxp1;
};
union cvmx_l2d_fus1 {
uint64_t u64;
struct cvmx_l2d_fus1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q1fus:34;
#else
uint64_t q1fus:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_fus1_s cn30xx;
struct cvmx_l2d_fus1_s cn31xx;
struct cvmx_l2d_fus1_s cn38xx;
struct cvmx_l2d_fus1_s cn38xxp2;
struct cvmx_l2d_fus1_s cn50xx;
struct cvmx_l2d_fus1_s cn52xx;
struct cvmx_l2d_fus1_s cn52xxp1;
struct cvmx_l2d_fus1_s cn56xx;
struct cvmx_l2d_fus1_s cn56xxp1;
struct cvmx_l2d_fus1_s cn58xx;
struct cvmx_l2d_fus1_s cn58xxp1;
};
union cvmx_l2d_fus2 {
uint64_t u64;
struct cvmx_l2d_fus2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q2fus:34;
#else
uint64_t q2fus:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_fus2_s cn30xx;
struct cvmx_l2d_fus2_s cn31xx;
struct cvmx_l2d_fus2_s cn38xx;
struct cvmx_l2d_fus2_s cn38xxp2;
struct cvmx_l2d_fus2_s cn50xx;
struct cvmx_l2d_fus2_s cn52xx;
struct cvmx_l2d_fus2_s cn52xxp1;
struct cvmx_l2d_fus2_s cn56xx;
struct cvmx_l2d_fus2_s cn56xxp1;
struct cvmx_l2d_fus2_s cn58xx;
struct cvmx_l2d_fus2_s cn58xxp1;
};
union cvmx_l2d_fus3 {
uint64_t u64;
struct cvmx_l2d_fus3_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ema_ctl:3;
uint64_t reserved_34_36:3;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t reserved_34_36:3;
uint64_t ema_ctl:3;
uint64_t reserved_40_63:24;
#endif
} s;
struct cvmx_l2d_fus3_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_35_63:29;
uint64_t crip_64k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_64k:1;
uint64_t reserved_35_63:29;
#endif
} cn30xx;
struct cvmx_l2d_fus3_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_35_63:29;
uint64_t crip_128k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_128k:1;
uint64_t reserved_35_63:29;
#endif
} cn31xx;
struct cvmx_l2d_fus3_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_36_63:28;
uint64_t crip_256k:1;
uint64_t crip_512k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_512k:1;
uint64_t crip_256k:1;
uint64_t reserved_36_63:28;
#endif
} cn38xx;
struct cvmx_l2d_fus3_cn38xx cn38xxp2;
struct cvmx_l2d_fus3_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ema_ctl:3;
uint64_t reserved_36_36:1;
uint64_t crip_32k:1;
uint64_t crip_64k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_64k:1;
uint64_t crip_32k:1;
uint64_t reserved_36_36:1;
uint64_t ema_ctl:3;
uint64_t reserved_40_63:24;
#endif
} cn50xx;
struct cvmx_l2d_fus3_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ema_ctl:3;
uint64_t reserved_36_36:1;
uint64_t crip_128k:1;
uint64_t crip_256k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_256k:1;
uint64_t crip_128k:1;
uint64_t reserved_36_36:1;
uint64_t ema_ctl:3;
uint64_t reserved_40_63:24;
#endif
} cn52xx;
struct cvmx_l2d_fus3_cn52xx cn52xxp1;
struct cvmx_l2d_fus3_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ema_ctl:3;
uint64_t reserved_36_36:1;
uint64_t crip_512k:1;
uint64_t crip_1024k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_1024k:1;
uint64_t crip_512k:1;
uint64_t reserved_36_36:1;
uint64_t ema_ctl:3;
uint64_t reserved_40_63:24;
#endif
} cn56xx;
struct cvmx_l2d_fus3_cn56xx cn56xxp1;
struct cvmx_l2d_fus3_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_39_63:25;
uint64_t ema_ctl:2;
uint64_t reserved_36_36:1;
uint64_t crip_512k:1;
uint64_t crip_1024k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_1024k:1;
uint64_t crip_512k:1;
uint64_t reserved_36_36:1;
uint64_t ema_ctl:2;
uint64_t reserved_39_63:25;
#endif
} cn58xx;
struct cvmx_l2d_fus3_cn58xx cn58xxp1;
};
#endif

View File

@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
* Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@ -28,210 +28,116 @@
#ifndef __CVMX_L2T_DEFS_H__
#define __CVMX_L2T_DEFS_H__
#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
#include <uapi/asm/bitfield.h>
#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
union cvmx_l2t_err {
uint64_t u64;
struct cvmx_l2t_err_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t fadru:1;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t fset:3;
uint64_t fadr:10;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:10;
uint64_t fset:3;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t fadru:1;
uint64_t reserved_29_63:35;
#endif
__BITFIELD_FIELD(uint64_t reserved_29_63:35,
__BITFIELD_FIELD(uint64_t fadru:1,
__BITFIELD_FIELD(uint64_t lck_intena2:1,
__BITFIELD_FIELD(uint64_t lckerr2:1,
__BITFIELD_FIELD(uint64_t lck_intena:1,
__BITFIELD_FIELD(uint64_t lckerr:1,
__BITFIELD_FIELD(uint64_t fset:3,
__BITFIELD_FIELD(uint64_t fadr:10,
__BITFIELD_FIELD(uint64_t fsyn:6,
__BITFIELD_FIELD(uint64_t ded_err:1,
__BITFIELD_FIELD(uint64_t sec_err:1,
__BITFIELD_FIELD(uint64_t ded_intena:1,
__BITFIELD_FIELD(uint64_t sec_intena:1,
__BITFIELD_FIELD(uint64_t ecc_ena:1,
;))))))))))))))
} s;
struct cvmx_l2t_err_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t reserved_23_23:1;
uint64_t fset:2;
uint64_t reserved_19_20:2;
uint64_t fadr:8;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:8;
uint64_t reserved_19_20:2;
uint64_t fset:2;
uint64_t reserved_23_23:1;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
__BITFIELD_FIELD(uint64_t reserved_28_63:36,
__BITFIELD_FIELD(uint64_t lck_intena2:1,
__BITFIELD_FIELD(uint64_t lckerr2:1,
__BITFIELD_FIELD(uint64_t lck_intena:1,
__BITFIELD_FIELD(uint64_t lckerr:1,
__BITFIELD_FIELD(uint64_t reserved_23_23:1,
__BITFIELD_FIELD(uint64_t fset:2,
__BITFIELD_FIELD(uint64_t reserved_19_20:2,
__BITFIELD_FIELD(uint64_t fadr:8,
__BITFIELD_FIELD(uint64_t fsyn:6,
__BITFIELD_FIELD(uint64_t ded_err:1,
__BITFIELD_FIELD(uint64_t sec_err:1,
__BITFIELD_FIELD(uint64_t ded_intena:1,
__BITFIELD_FIELD(uint64_t sec_intena:1,
__BITFIELD_FIELD(uint64_t ecc_ena:1,
;)))))))))))))))
} cn30xx;
struct cvmx_l2t_err_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t reserved_23_23:1;
uint64_t fset:2;
uint64_t reserved_20_20:1;
uint64_t fadr:9;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:9;
uint64_t reserved_20_20:1;
uint64_t fset:2;
uint64_t reserved_23_23:1;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
__BITFIELD_FIELD(uint64_t reserved_28_63:36,
__BITFIELD_FIELD(uint64_t lck_intena2:1,
__BITFIELD_FIELD(uint64_t lckerr2:1,
__BITFIELD_FIELD(uint64_t lck_intena:1,
__BITFIELD_FIELD(uint64_t lckerr:1,
__BITFIELD_FIELD(uint64_t reserved_23_23:1,
__BITFIELD_FIELD(uint64_t fset:2,
__BITFIELD_FIELD(uint64_t reserved_20_20:1,
__BITFIELD_FIELD(uint64_t fadr:9,
__BITFIELD_FIELD(uint64_t fsyn:6,
__BITFIELD_FIELD(uint64_t ded_err:1,
__BITFIELD_FIELD(uint64_t sec_err:1,
__BITFIELD_FIELD(uint64_t ded_intena:1,
__BITFIELD_FIELD(uint64_t sec_intena:1,
__BITFIELD_FIELD(uint64_t ecc_ena:1,
;)))))))))))))))
} cn31xx;
struct cvmx_l2t_err_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t fset:3;
uint64_t fadr:10;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:10;
uint64_t fset:3;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
__BITFIELD_FIELD(uint64_t reserved_28_63:36,
__BITFIELD_FIELD(uint64_t lck_intena2:1,
__BITFIELD_FIELD(uint64_t lckerr2:1,
__BITFIELD_FIELD(uint64_t lck_intena:1,
__BITFIELD_FIELD(uint64_t lckerr:1,
__BITFIELD_FIELD(uint64_t fset:3,
__BITFIELD_FIELD(uint64_t fadr:10,
__BITFIELD_FIELD(uint64_t fsyn:6,
__BITFIELD_FIELD(uint64_t ded_err:1,
__BITFIELD_FIELD(uint64_t sec_err:1,
__BITFIELD_FIELD(uint64_t ded_intena:1,
__BITFIELD_FIELD(uint64_t sec_intena:1,
__BITFIELD_FIELD(uint64_t ecc_ena:1,
;)))))))))))))
} cn38xx;
struct cvmx_l2t_err_cn38xx cn38xxp2;
struct cvmx_l2t_err_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t fset:3;
uint64_t reserved_18_20:3;
uint64_t fadr:7;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:7;
uint64_t reserved_18_20:3;
uint64_t fset:3;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
__BITFIELD_FIELD(uint64_t reserved_28_63:36,
__BITFIELD_FIELD(uint64_t lck_intena2:1,
__BITFIELD_FIELD(uint64_t lckerr2:1,
__BITFIELD_FIELD(uint64_t lck_intena:1,
__BITFIELD_FIELD(uint64_t lckerr:1,
__BITFIELD_FIELD(uint64_t fset:3,
__BITFIELD_FIELD(uint64_t reserved_18_20:3,
__BITFIELD_FIELD(uint64_t fadr:7,
__BITFIELD_FIELD(uint64_t fsyn:6,
__BITFIELD_FIELD(uint64_t ded_err:1,
__BITFIELD_FIELD(uint64_t sec_err:1,
__BITFIELD_FIELD(uint64_t ded_intena:1,
__BITFIELD_FIELD(uint64_t sec_intena:1,
__BITFIELD_FIELD(uint64_t ecc_ena:1,
;))))))))))))))
} cn50xx;
struct cvmx_l2t_err_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t fset:3;
uint64_t reserved_20_20:1;
uint64_t fadr:9;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:9;
uint64_t reserved_20_20:1;
uint64_t fset:3;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
__BITFIELD_FIELD(uint64_t reserved_28_63:36,
__BITFIELD_FIELD(uint64_t lck_intena2:1,
__BITFIELD_FIELD(uint64_t lckerr2:1,
__BITFIELD_FIELD(uint64_t lck_intena:1,
__BITFIELD_FIELD(uint64_t lckerr:1,
__BITFIELD_FIELD(uint64_t fset:3,
__BITFIELD_FIELD(uint64_t reserved_20_20:1,
__BITFIELD_FIELD(uint64_t fadr:9,
__BITFIELD_FIELD(uint64_t fsyn:6,
__BITFIELD_FIELD(uint64_t ded_err:1,
__BITFIELD_FIELD(uint64_t sec_err:1,
__BITFIELD_FIELD(uint64_t ded_intena:1,
__BITFIELD_FIELD(uint64_t sec_intena:1,
__BITFIELD_FIELD(uint64_t ecc_ena:1,
;))))))))))))))
} cn52xx;
struct cvmx_l2t_err_cn52xx cn52xxp1;
struct cvmx_l2t_err_s cn56xx;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
* Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@ -62,7 +62,6 @@ enum cvmx_mips_space {
#include <asm/octeon/cvmx-iob-defs.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-l2c-defs.h>
#include <asm/octeon/cvmx-l2d-defs.h>
#include <asm/octeon/cvmx-l2t-defs.h>
#include <asm/octeon/cvmx-led-defs.h>
#include <asm/octeon/cvmx-mio-defs.h>

View File

@ -110,6 +110,32 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#endif
#ifndef __PAGETABLE_PUD_FOLDED
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
pud_t *pud;
pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER);
if (pud)
pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
return pud;
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
free_pages((unsigned long)pud, PUD_ORDER);
}
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
set_pgd(pgd, __pgd((unsigned long)pud));
}
#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
#endif /* __PAGETABLE_PUD_FOLDED */
#define check_pgt_cache() do { } while (0)
extern void pagetable_init(void);

View File

@ -20,7 +20,7 @@
#define __ARCH_USE_5LEVEL_HACK
#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
#include <asm-generic/pgtable-nopmd.h>
#else
#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
#include <asm-generic/pgtable-nopud.h>
#endif
@ -54,9 +54,18 @@
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
# ifdef __PAGETABLE_PUD_FOLDED
# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
# endif
#endif
#ifndef __PAGETABLE_PUD_FOLDED
#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@ -79,8 +88,13 @@
* of virtual address space.
*/
#ifdef CONFIG_PAGE_SIZE_4KB
#define PGD_ORDER 1
#define PUD_ORDER aieeee_attempt_to_allocate_pud
# ifdef CONFIG_MIPS_VA_BITS_48
# define PGD_ORDER 0
# define PUD_ORDER 0
# else
# define PGD_ORDER 1
# define PUD_ORDER aieeee_attempt_to_allocate_pud
# endif
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
@ -118,6 +132,9 @@
#endif
#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
#ifndef __PAGETABLE_PUD_FOLDED
#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
#endif
@ -134,7 +151,7 @@
#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
#define VMALLOC_END \
(MAP_BASE + \
min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
(1UL << cpu_vmbits)) - (1UL << 32))
#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
@ -150,12 +167,72 @@
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
#endif
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_ERROR(e) \
printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
#endif
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pte_t invalid_pte_table[PTRS_PER_PTE];
extern pte_t empty_bad_page_table[PTRS_PER_PTE];
#ifndef __PAGETABLE_PUD_FOLDED
/*
* For 4-level pagetables we defines these ourselves, for 3-level the
* definitions are below, for 2-level the
* definitions are supplied by <asm-generic/pgtable-nopmd.h>.
*/
typedef struct { unsigned long pud; } pud_t;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) })
extern pud_t invalid_pud_table[PTRS_PER_PUD];
/*
* Empty pgd entries point to the invalid_pud_table.
*/
static inline int pgd_none(pgd_t pgd)
{
return pgd_val(pgd) == (unsigned long)invalid_pud_table;
}
static inline int pgd_bad(pgd_t pgd)
{
if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
return 1;
return 0;
}
static inline int pgd_present(pgd_t pgd)
{
return pgd_val(pgd) != (unsigned long)invalid_pud_table;
}
static inline void pgd_clear(pgd_t *pgdp)
{
pgd_val(*pgdp) = (unsigned long)invalid_pud_table;
}
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
return pgd_val(pgd);
}
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
}
static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
{
*pgd = pgdval;
}
#endif
#ifndef __PAGETABLE_PMD_FOLDED
/*
@ -281,6 +358,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
* Initialize a new pgd / pmd table with invalid pointers.
*/
extern void pgd_init(unsigned long page);
extern void pud_init(unsigned long page, unsigned long pagetable);
extern void pmd_init(unsigned long page, unsigned long pagetable);
/*

View File

@ -21,77 +21,46 @@
#define UASM_EXPORT_SYMBOL(sym)
#endif
#define _UASM_ISA_CLASSIC 0
#define _UASM_ISA_MICROMIPS 1
#ifndef UASM_ISA
#ifdef CONFIG_CPU_MICROMIPS
#define UASM_ISA _UASM_ISA_MICROMIPS
#else
#define UASM_ISA _UASM_ISA_CLASSIC
#endif
#endif
#if (UASM_ISA == _UASM_ISA_CLASSIC)
#ifdef CONFIG_CPU_MICROMIPS
#define ISAOPC(op) CL_uasm_i##op
#define ISAFUNC(x) CL_##x
#else
#define ISAOPC(op) uasm_i##op
#define ISAFUNC(x) x
#endif
#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
#ifdef CONFIG_CPU_MICROMIPS
#define ISAOPC(op) uasm_i##op
#define ISAFUNC(x) x
#else
#define ISAOPC(op) MM_uasm_i##op
#define ISAFUNC(x) MM_##x
#endif
#else
#error Unsupported micro-assembler ISA!!!
#endif
#define Ip_u1u2u3(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u2u1(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
void uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_s3s1s2(op) \
void ISAOPC(op)(u32 **buf, int a, int b, int c)
void uasm_i##op(u32 **buf, int a, int b, int c)
#define Ip_u2u1s3(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2u1msbu3(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
unsigned int d)
#define Ip_u1u2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u2u1(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
void uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
void uasm_i##op(u32 **buf, unsigned int a, signed int b)
#define Ip_u1(op) void ISAOPC(op)(u32 **buf, unsigned int a)
#define Ip_u1(op) void uasm_i##op(u32 **buf, unsigned int a)
#define Ip_0(op) void ISAOPC(op)(u32 **buf)
#define Ip_0(op) void uasm_i##op(u32 **buf)
Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu);
@ -138,6 +107,7 @@ Ip_u2s3u1(_lb);
Ip_u2s3u1(_ld);
Ip_u3u1u2(_ldx);
Ip_u2s3u1(_lh);
Ip_u2s3u1(_lhu);
Ip_u2s3u1(_ll);
Ip_u2s3u1(_lld);
Ip_u1s2(_lui);
@ -190,20 +160,20 @@ struct uasm_label {
int lab;
};
void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
void uasm_build_label(struct uasm_label **lab, u32 *addr,
int lid);
#ifdef CONFIG_64BIT
int ISAFUNC(uasm_in_compat_space_p)(long addr);
int uasm_in_compat_space_p(long addr);
#endif
int ISAFUNC(uasm_rel_hi)(long val);
int ISAFUNC(uasm_rel_lo)(long val);
void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
int uasm_rel_hi(long val);
int uasm_rel_lo(long val);
void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
#define UASM_L_LA(lb) \
static inline void ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
static inline void uasm_l##lb(struct uasm_label **lab, u32 *addr) \
{ \
ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
uasm_build_label(lab, addr, label##lb); \
}
/* convenience macros for instructions */
@ -255,27 +225,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
ISAOPC(_drotr)(p, a1, a2, a3);
uasm_i_drotr(p, a1, a2, a3);
else
ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
uasm_i_drotr32(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
ISAOPC(_dsll)(p, a1, a2, a3);
uasm_i_dsll(p, a1, a2, a3);
else
ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
uasm_i_dsll32(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
ISAOPC(_dsrl)(p, a1, a2, a3);
uasm_i_dsrl(p, a1, a2, a3);
else
ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
uasm_i_dsrl32(p, a1, a2, a3 - 32);
}
/* Handle relocations. */

View File

@ -34,6 +34,7 @@
/* Hardware capabilities */
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
/*
* Get the FPU Implementation/Revision.
@ -1955,6 +1956,12 @@ void cpu_probe(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id();
/*
* Set a default elf platform, cpu probe may later
* overwrite it with a more precise value
*/
set_elf_platform(cpu, "mips");
c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;

View File

@ -1096,10 +1096,20 @@ repeat:
}
break;
case beql_op:
case bnel_op:
case blezl_op:
case bgtzl_op:
/*
* For BLEZL and BGTZL, rt field must be set to 0. If this
* is not the case, this may be an encoding of a MIPS R6
* instruction, so return to CPU execution if this occurs
*/
if (MIPSInst_RT(inst)) {
err = SIGILL;
break;
}
/* fall through */
case beql_op:
case bnel_op:
if (delay_slot(regs)) {
err = SIGILL;
break;
@ -2329,6 +2339,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
__this_cpu_write((mipsr2bremustats).bgezl, 0);
__this_cpu_write((mipsr2bremustats).bltzll, 0);
__this_cpu_write((mipsr2bremustats).bgezll, 0);
__this_cpu_write((mipsr2bremustats).bltzall, 0);
__this_cpu_write((mipsr2bremustats).bgezall, 0);
__this_cpu_write((mipsr2bremustats).bltzal, 0);
__this_cpu_write((mipsr2bremustats).bgezal, 0);
__this_cpu_write((mipsr2bremustats).beql, 0);

View File

@ -114,8 +114,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* Copy architecture-specific thread state
*/
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long kthread_arg, struct task_struct *p)
int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
@ -176,7 +176,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
if (clone_flags & CLONE_SETTLS)
ti->tp_value = regs->regs[7];
ti->tp_value = tls;
return 0;
}

View File

@ -25,12 +25,6 @@
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
#undef fp
/*
* Offset to the current process status flags, the first 32 bytes of the
* stack are not used.
*/
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,

View File

@ -8,6 +8,7 @@
* option) any later version.
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irqchip/mips-gic.h>
@ -408,7 +409,6 @@ static int cps_cpu_disable(void)
return 0;
}
static DECLARE_COMPLETION(cpu_death_chosen);
static unsigned cpu_death_sibling;
static enum {
CPU_DEATH_HALT,
@ -443,7 +443,7 @@ void play_dead(void)
}
/* This CPU has chosen its way out */
complete(&cpu_death_chosen);
(void)cpu_report_death();
if (cpu_death == CPU_DEATH_HALT) {
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
@ -492,8 +492,7 @@ static void cps_cpu_die(unsigned int cpu)
int err;
/* Wait for the cpu to choose its way out */
if (!wait_for_completion_timeout(&cpu_death_chosen,
msecs_to_jiffies(5000))) {
if (!cpu_wait_death(cpu, 5)) {
pr_err("CPU%u: didn't offline\n", cpu);
return;
}

View File

@ -83,6 +83,8 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
if (tc != 0)
smvp_copy_vpe_config();
cpu_data[ncpu].vpe_id = tc;
return ncpu;
}
@ -114,49 +116,6 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
write_tc_c0_tchalt(TCHALT_H);
}
static void vsmp_send_ipi_single(int cpu, unsigned int action)
{
int i;
unsigned long flags;
int vpflags;
#ifdef CONFIG_MIPS_GIC
if (gic_present) {
mips_smp_send_ipi_single(cpu, action);
return;
}
#endif
local_irq_save(flags);
vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
switch (action) {
case SMP_CALL_FUNCTION:
i = C_SW1;
break;
case SMP_RESCHEDULE_YOURSELF:
default:
i = C_SW0;
break;
}
/* 1:1 mapping of vpe and tc... */
settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
local_irq_restore(flags);
}
static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
static void vsmp_init_secondary(void)
{
#ifdef CONFIG_MIPS_GIC
@ -281,8 +240,8 @@ static void __init vsmp_prepare_cpus(unsigned int max_cpus)
}
struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_single = vsmp_send_ipi_single,
.send_ipi_mask = vsmp_send_ipi_mask,
.send_ipi_single = mips_smp_send_ipi_single,
.send_ipi_mask = mips_smp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.boot_secondary = vsmp_boot_secondary,

View File

@ -261,16 +261,20 @@ int mips_smp_ipi_allocate(const struct cpumask *mask)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
/*
* There are systems which only use IPI domains some of the time,
* depending upon configuration we don't know until runtime. An
* example is Malta where we may compile in support for GIC & the
* MT ASE, but run on a system which has multiple VPEs in a single
* core and doesn't include a GIC. Until all IPI implementations
* have been converted to use IPI domains the best we can do here
* is to return & hope some other code sets up the IPIs.
* There are systems which use IPI IRQ domains, but only have one
* registered when some runtime condition is met. For example a Malta
* kernel may include support for GIC & CPU interrupt controller IPI
* IRQ domains, but if run on a system with no GIC & no MT ASE then
* neither will be supported or registered.
*
* We only have a problem if we're actually using multiple CPUs so fail
* loudly if that is the case. Otherwise simply return, skipping IPI
* setup, if we're running with only a single CPU.
*/
if (!ipidomain)
if (!ipidomain) {
BUG_ON(num_present_cpus() > 1);
return 0;
}
virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!virq);

View File

@ -274,47 +274,6 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
}
#ifdef CONFIG_MIPS_MT_SMP
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
setup_irq(irq, action);
irq_set_handler(irq, handle_percpu_irq);
}
static void ltq_sw0_irqdispatch(void)
{
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
}
static void ltq_sw1_irqdispatch(void)
{
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt,
.flags = IRQF_PERCPU,
.name = "IPI_resched"
};
static struct irqaction irq_call = {
.handler = ipi_call_interrupt,
.flags = IRQF_PERCPU,
.name = "IPI_call"
};
#endif
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
@ -402,17 +361,6 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
#if defined(CONFIG_MIPS_MT_SMP)
if (cpu_has_vint) {
pr_info("Setting up IPI vectored interrupts\n");
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
}
arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
&irq_resched);
arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
#endif
#ifndef CONFIG_MIPS_MT_SMP
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
IE_IRQ3 | IE_IRQ4 | IE_IRQ5);

View File

@ -439,6 +439,8 @@ int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
unsigned int fcr31;
unsigned int bit = 0;
unsigned int bit0;
union fpureg *fpr;
switch (insn.i_format.opcode) {
case spec_op:
@ -706,14 +708,14 @@ int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
((insn.i_format.rs == bc1eqz_op) ||
(insn.i_format.rs == bc1nez_op))) {
bit = 0;
fpr = &current->thread.fpu.fpr[insn.i_format.rt];
bit0 = get_fpr32(fpr, 0) & 0x1;
switch (insn.i_format.rs) {
case bc1eqz_op:
if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
bit = 1;
bit = bit0 == 0;
break;
case bc1nez_op:
if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
bit = 1;
bit = bit0 != 0;
break;
}
if (bit)

View File

@ -267,19 +267,19 @@ do_sigbus:
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
else
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
printk("do_page_fault() #3: sending SIGBUS to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
printk("do_page_fault() #3: sending SIGBUS to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
tsk->thread.cp0_badvaddr = address;

View File

@ -537,6 +537,9 @@ unsigned long pgd_current[NR_CPUS];
* it in the linker script.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
EXPORT_SYMBOL_GPL(invalid_pmd_table);

View File

@ -19,10 +19,12 @@ void pgd_init(unsigned long page)
unsigned long *p, *end;
unsigned long entry;
#ifdef __PAGETABLE_PMD_FOLDED
entry = (unsigned long)invalid_pte_table;
#else
#if !defined(__PAGETABLE_PUD_FOLDED)
entry = (unsigned long)invalid_pud_table;
#elif !defined(__PAGETABLE_PMD_FOLDED)
entry = (unsigned long)invalid_pmd_table;
#else
entry = (unsigned long)invalid_pte_table;
#endif
p = (unsigned long *) page;
@ -64,6 +66,28 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
EXPORT_SYMBOL_GPL(pmd_init);
#endif
#ifndef __PAGETABLE_PUD_FOLDED
void pud_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
p = (unsigned long *)addr;
end = p + PTRS_PER_PUD;
do {
p[0] = pagetable;
p[1] = pagetable;
p[2] = pagetable;
p[3] = pagetable;
p[4] = pagetable;
p += 8;
p[-3] = pagetable;
p[-2] = pagetable;
p[-1] = pagetable;
} while (p != end);
}
#endif
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
@ -87,6 +111,9 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
#ifndef __PAGETABLE_PUD_FOLDED
pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
#endif

View File

@ -865,6 +865,13 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
#ifndef __PAGETABLE_PUD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
#endif
#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
@ -1184,6 +1191,21 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
}
#ifndef __PAGETABLE_PUD_FOLDED
/* get pud offset in bytes */
uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
if (use_lwx_insns()) {
UASM_i_LWX(p, ptr, scratch, ptr);
} else {
uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
UASM_i_LW(p, ptr, 0, ptr);
}
/* ptr contains a pointer to PMD entry */
/* tmp contains the address */
#endif
#ifndef __PAGETABLE_PMD_FOLDED
/* get pmd offset in bytes */
uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);

View File

@ -103,6 +103,7 @@ static struct insn insn_table[] = {
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
{ insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lhu, M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
#ifndef CONFIG_CPU_MIPSR6
{ insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },

View File

@ -61,7 +61,7 @@ enum opcode {
insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl,
insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
insn_xori, insn_yield, insn_lddir, insn_ldpte,
insn_xori, insn_yield, insn_lddir, insn_ldpte, insn_lhu,
};
struct insn {
@ -297,6 +297,7 @@ I_u1(_jr)
I_u2s3u1(_lb)
I_u2s3u1(_ld)
I_u2s3u1(_lh)
I_u2s3u1(_lhu)
I_u2s3u1(_ll)
I_u2s3u1(_lld)
I_u1s2(_lui)
@ -349,7 +350,7 @@ I_u2u1u3(_lddir)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
void uasm_i_pref(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
@ -361,26 +362,26 @@ void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
else
build_insn(buf, insn_pref, c, a, b);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
UASM_EXPORT_SYMBOL(uasm_i_pref);
#else
I_u2s3u1(_pref)
#endif
/* Handle labels. */
void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
void uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
UASM_EXPORT_SYMBOL(uasm_build_label);
int ISAFUNC(uasm_in_compat_space_p)(long addr)
int uasm_in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
return addr == (int)addr;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
static int uasm_rel_highest(long val)
{
@ -400,64 +401,64 @@ static int uasm_rel_higher(long val)
#endif
}
int ISAFUNC(uasm_rel_hi)(long val)
int uasm_rel_hi(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
UASM_EXPORT_SYMBOL(uasm_rel_hi);
int ISAFUNC(uasm_rel_lo)(long val)
int uasm_rel_lo(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
UASM_EXPORT_SYMBOL(uasm_rel_lo);
void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
if (!uasm_in_compat_space_p(addr)) {
uasm_i_lui(buf, rs, uasm_rel_highest(addr));
if (uasm_rel_higher(addr))
ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
if (ISAFUNC(uasm_rel_hi(addr))) {
ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
ISAFUNC(uasm_rel_hi)(addr));
ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
if (uasm_rel_hi(addr)) {
uasm_i_dsll(buf, rs, rs, 16);
uasm_i_daddiu(buf, rs, rs,
uasm_rel_hi(addr));
uasm_i_dsll(buf, rs, rs, 16);
} else
ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
uasm_i_dsll32(buf, rs, rs, 0);
} else
ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
uasm_i_lui(buf, rs, uasm_rel_hi(addr));
}
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
void UASM_i_LA(u32 **buf, unsigned int rs, long addr)
{
ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
if (ISAFUNC(uasm_rel_lo(addr))) {
if (!ISAFUNC(uasm_in_compat_space_p)(addr))
ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
ISAFUNC(uasm_rel_lo(addr)));
UASM_i_LA_mostly(buf, rs, addr);
if (uasm_rel_lo(addr)) {
if (!uasm_in_compat_space_p(addr))
uasm_i_daddiu(buf, rs, rs,
uasm_rel_lo(addr));
else
ISAFUNC(uasm_i_addiu)(buf, rs, rs,
ISAFUNC(uasm_rel_lo(addr)));
uasm_i_addiu(buf, rs, rs,
uasm_rel_lo(addr));
}
}
UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
UASM_EXPORT_SYMBOL(UASM_i_LA);
/* Handle relocations. */
void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
(*rel)->lab = lid;
(*rel)++;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
static inline void __resolve_relocs(struct uasm_reloc *rel,
struct uasm_label *lab);
void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
void uasm_resolve_relocs(struct uasm_reloc *rel,
struct uasm_label *lab)
{
struct uasm_label *l;
@ -467,39 +468,39 @@ void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
if (rel->lab == l->lab)
__resolve_relocs(rel, l);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end,
long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
UASM_EXPORT_SYMBOL(uasm_move_relocs);
void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end,
long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
UASM_EXPORT_SYMBOL(uasm_move_labels);
void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
u32 *first, u32 *end, u32 *target)
{
long off = (long)(target - first);
memcpy(target, first, (end - first) * sizeof(u32));
ISAFUNC(uasm_move_relocs(rel, first, end, off));
ISAFUNC(uasm_move_labels(lab, first, end, off));
uasm_move_relocs(rel, first, end, off);
uasm_move_labels(lab, first, end, off);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
UASM_EXPORT_SYMBOL(uasm_copy_handler);
int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
@ -510,92 +511,92 @@ int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
return 0;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
/* Convenience functions for labeled branches. */
void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bltz)(p, reg, 0);
uasm_i_bltz(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
UASM_EXPORT_SYMBOL(uasm_il_bltz);
void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_b)(p, 0);
uasm_i_b(p, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
UASM_EXPORT_SYMBOL(uasm_il_b);
void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1,
void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
unsigned int r2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beq)(p, r1, r2, 0);
uasm_i_beq(p, r1, r2, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq));
UASM_EXPORT_SYMBOL(uasm_il_beq);
void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beqz)(p, reg, 0);
uasm_i_beqz(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
UASM_EXPORT_SYMBOL(uasm_il_beqz);
void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_beqzl)(p, reg, 0);
uasm_i_beqzl(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
UASM_EXPORT_SYMBOL(uasm_il_beqzl);
void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
uasm_i_bne(p, reg1, reg2, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
UASM_EXPORT_SYMBOL(uasm_il_bne);
void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bnez)(p, reg, 0);
uasm_i_bnez(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
UASM_EXPORT_SYMBOL(uasm_il_bnez);
void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bgezl)(p, reg, 0);
uasm_i_bgezl(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
UASM_EXPORT_SYMBOL(uasm_il_bgezl);
void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bgez)(p, reg, 0);
uasm_i_bgez(p, reg, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
UASM_EXPORT_SYMBOL(uasm_il_bgez);
void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
uasm_i_bbit0(p, reg, bit, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
UASM_EXPORT_SYMBOL(uasm_il_bbit0);
void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
uasm_i_bbit1(p, reg, bit, 0);
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
UASM_EXPORT_SYMBOL(uasm_il_bbit1);

View File

@ -145,56 +145,6 @@ static irqreturn_t corehi_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
#ifdef CONFIG_MIPS_MT_SMP
#define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */
#define C_RESCHED C_SW0
#define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for resched */
#define C_CALL C_SW1
static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
static void ipi_resched_dispatch(void)
{
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
}
static void ipi_call_dispatch(void)
{
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
if (aprp_hook)
aprp_hook();
#endif
scheduler_ipi();
return IRQ_HANDLED;
}
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt,
.flags = IRQF_PERCPU,
.name = "IPI_resched"
};
static struct irqaction irq_call = {
.handler = ipi_call_interrupt,
.flags = IRQF_PERCPU,
.name = "IPI_call"
};
#endif /* CONFIG_MIPS_MT_SMP */
static struct irqaction corehi_irqaction = {
.handler = corehi_handler,
.name = "CoreHi",
@ -222,12 +172,6 @@ static msc_irqmap_t msc_eicirqmap[] __initdata = {
static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap);
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
setup_irq(irq, action);
irq_set_handler(irq, handle_percpu_irq);
}
void __init arch_init_irq(void)
{
int corehi_irq;
@ -273,30 +217,11 @@ void __init arch_init_irq(void)
if (gic_present) {
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
} else if (cpu_has_veic) {
set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
} else {
#if defined(CONFIG_MIPS_MT_SMP)
/* set up ipi interrupts */
if (cpu_has_veic) {
set_vi_handler (MSC01E_INT_SW0, ipi_resched_dispatch);
set_vi_handler (MSC01E_INT_SW1, ipi_call_dispatch);
cpu_ipi_resched_irq = MSC01E_INT_SW0;
cpu_ipi_call_irq = MSC01E_INT_SW1;
} else {
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE +
MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE +
MIPS_CPU_IPI_CALL_IRQ;
}
arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched);
arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
#endif
if (cpu_has_veic) {
set_vi_handler(MSC01E_INT_COREHI,
corehi_irqdispatch);
corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
} else {
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
}
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
}
setup_irq(corehi_irq, &corehi_irqaction);

View File

@ -365,6 +365,12 @@ static inline void emit_half_load(unsigned int reg, unsigned int base,
emit_instr(ctx, lh, reg, offset, base);
}
static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
unsigned int offset, struct jit_ctx *ctx)
{
emit_instr(ctx, lhu, reg, offset, base);
}
static inline void emit_mul(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
@ -526,7 +532,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
u32 sflags, tmp_flags;
/* Adjust the stack pointer */
emit_stack_offset(-align_sp(offset), ctx);
if (offset)
emit_stack_offset(-align_sp(offset), ctx);
tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
/* sflags is essentially a bitmap */
@ -578,7 +585,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx,
emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
/* Restore the sp and discard the scrach memory */
emit_stack_offset(align_sp(offset), ctx);
if (offset)
emit_stack_offset(align_sp(offset), ctx);
}
static unsigned int get_stack_depth(struct jit_ctx *ctx)
@ -625,8 +633,14 @@ static void build_prologue(struct jit_ctx *ctx)
if (ctx->flags & SEEN_X)
emit_jit_reg_move(r_X, r_zero, ctx);
/* Do not leak kernel data to userspace */
if (bpf_needs_clear_a(&ctx->skf->insns[0]))
/*
* Do not leak kernel data to userspace, we only need to clear
* r_A if it is ever used. In fact if it is never used, we
* will not save/restore it, so clearing it in this case would
* corrupt the state of the caller.
*/
if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
(ctx->flags & SEEN_A))
emit_jit_reg_move(r_A, r_zero, ctx);
}
@ -1112,6 +1126,8 @@ jmp_cmp:
break;
case BPF_ANC | SKF_AD_IFINDEX:
/* A = skb->dev->ifindex */
case BPF_ANC | SKF_AD_HATYPE:
/* A = skb->dev->type */
ctx->flags |= SEEN_SKB | SEEN_A;
off = offsetof(struct sk_buff, dev);
/* Load *dev pointer */
@ -1120,10 +1136,15 @@ jmp_cmp:
emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
b_imm(prog->len, ctx), ctx);
emit_reg_move(r_ret, r_zero, ctx);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
ifindex) != 4);
off = offsetof(struct net_device, ifindex);
emit_load(r_A, r_s0, off, ctx);
if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
off = offsetof(struct net_device, ifindex);
emit_load(r_A, r_s0, off, ctx);
} else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
off = offsetof(struct net_device, type);
emit_half_load_unsigned(r_A, r_s0, off, ctx);
}
break;
case BPF_ANC | SKF_AD_MARK:
ctx->flags |= SEEN_SKB | SEEN_A;
@ -1143,7 +1164,7 @@ jmp_cmp:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
vlan_tci) != 2);
off = offsetof(struct sk_buff, vlan_tci);
emit_half_load(r_s0, r_skb, off, ctx);
emit_half_load_unsigned(r_s0, r_skb, off, ctx);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
} else {
@ -1170,7 +1191,7 @@ jmp_cmp:
BUILD_BUG_ON(offsetof(struct sk_buff,
queue_mapping) > 0xff);
off = offsetof(struct sk_buff, queue_mapping);
emit_half_load(r_A, r_skb, off, ctx);
emit_half_load_unsigned(r_A, r_skb, off, ctx);
break;
default:
pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,

View File

@ -90,18 +90,14 @@ FEXPORT(sk_load_half_positive)
is_offset_in_header(2, half)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
.set reorder
lh $r_A, 0(t1)
.set noreorder
lhu $r_A, 0(t1)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
wsbh t0, $r_A
seh $r_A, t0
wsbh $r_A, $r_A
# else
sll t0, $r_A, 24
andi t1, $r_A, 0xff00
sra t0, t0, 16
srl t1, t1, 8
sll t0, $r_A, 8
srl t1, $r_A, 8
andi t0, t0, 0xff00
or $r_A, t0, t1
# endif
#endif
@ -115,7 +111,7 @@ FEXPORT(sk_load_byte_positive)
is_offset_in_header(1, byte)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
lb $r_A, 0(t1)
lbu $r_A, 0(t1)
jr $r_ra
move $r_ret, zero
END(sk_load_byte)
@ -139,6 +135,11 @@ FEXPORT(sk_load_byte_positive)
* (void *to) is returned in r_s0
*
*/
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define DS_OFFSET(SIZE) (4 * SZREG)
#else
#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
#endif
#define bpf_slow_path_common(SIZE) \
/* Quick check. Are we within reasonable boundaries? */ \
LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
@ -150,7 +151,7 @@ FEXPORT(sk_load_byte_positive)
PTR_LA t0, skb_copy_bits; \
PTR_S $r_ra, (5 * SZREG)($r_sp); \
/* Assign low slot to a2 */ \
move a2, $r_sp; \
PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
jalr t0; \
/* Reset our destination slot (DS but it's ok) */ \
INT_S zero, (4 * SZREG)($r_sp); \

View File

@ -679,7 +679,7 @@ static void __cvmx_increment_ba(union cvmx_sli_mem_access_subidx *pmas)
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
pmas->cn68xx.ba++;
else
pmas->cn63xx.ba++;
pmas->s.ba++;
}
/**
@ -1351,7 +1351,7 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
else
mem_access_subid.cn63xx.ba = 0;
mem_access_subid.s.ba = 0;
/*
* Setup mem access 12-15 for port 0, 16-19 for port 1,

View File

@ -36,6 +36,7 @@ unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
EXPORT_SYMBOL_GPL(periph_rev);
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);

View File

@ -34,6 +34,7 @@ unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
EXPORT_SYMBOL_GPL(periph_rev);
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);

View File

@ -51,19 +51,12 @@ static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
unsigned int cpu = policy->cpu;
cpumask_t cpus_allowed;
unsigned int freq;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpumask_of(cpu));
freq =
((cpu_clock_freq / 1000) *
loongson2_clockmod_table[index].driver_data) / 8;
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
clk_set_rate(policy->clk, freq * 1000);

View File

@ -134,7 +134,9 @@ config IMGPDC_IRQ
config IRQ_MIPS_CPU
bool
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
config CLPS711X_IRQCHIP
bool

View File

@ -17,15 +17,14 @@
/*
* Almost all MIPS CPUs define 8 interrupt sources. They are typically
* level triggered (i.e., cannot be cleared from CPU; must be cleared from
* device). The first two are software interrupts which we don't really
* use or support. The last one is usually the CPU timer interrupt if
* counter register is present or, for CPUs with an external FPU, by
* convention it's the FPU exception interrupt.
* device).
*
* Don't even think about using this on SMP. You have been warned.
* The first two are software interrupts (i.e. not exposed as pins) which
* may be used for IPIs in multi-threaded single-core systems.
*
* This file exports one global function:
* void mips_cpu_irq_init(void);
* The last one is usually the CPU timer interrupt if the counter register
* is present, or for old CPUs with an external FPU by convention it's the
* FPU exception interrupt.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
@ -39,15 +38,18 @@
#include <asm/mipsmtregs.h>
#include <asm/setup.h>
static struct irq_domain *irq_domain;
static struct irq_domain *ipi_domain;
static inline void unmask_mips_irq(struct irq_data *d)
{
set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
set_c0_status(IE_SW0 << d->hwirq);
irq_enable_hazard();
}
static inline void mask_mips_irq(struct irq_data *d)
{
clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
clear_c0_status(IE_SW0 << d->hwirq);
irq_disable_hazard();
}
@ -70,7 +72,7 @@ static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
{
unsigned int vpflags = dvpe();
clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
clear_c0_cause(C_SW0 << d->hwirq);
evpe(vpflags);
unmask_mips_irq(d);
return 0;
@ -83,11 +85,34 @@ static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
static void mips_mt_cpu_irq_ack(struct irq_data *d)
{
unsigned int vpflags = dvpe();
clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
clear_c0_cause(C_SW0 << d->hwirq);
evpe(vpflags);
mask_mips_irq(d);
}
#ifdef CONFIG_GENERIC_IRQ_IPI
static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
{
irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
int vpflags;
local_irq_save(flags);
/* We can only send IPIs to VPEs within the local core */
WARN_ON(cpu_data[cpu].core != current_cpu_data.core);
vpflags = dvpe();
settc(cpu_vpe_id(&cpu_data[cpu]));
write_vpe_c0_cause(read_vpe_c0_cause() | (C_SW0 << hwirq));
evpe(vpflags);
local_irq_restore(flags);
}
#endif /* CONFIG_GENERIC_IRQ_IPI */
static struct irq_chip mips_mt_cpu_irq_controller = {
.name = "MIPS",
.irq_startup = mips_mt_cpu_irq_startup,
@ -98,11 +123,15 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
.irq_eoi = unmask_mips_irq,
.irq_disable = mask_mips_irq,
.irq_enable = unmask_mips_irq,
#ifdef CONFIG_GENERIC_IRQ_IPI
.ipi_send_single = mips_mt_send_ipi,
#endif
};
asmlinkage void __weak plat_irq_dispatch(void)
{
unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
unsigned int virq;
int irq;
if (!pending) {
@ -113,7 +142,11 @@ asmlinkage void __weak plat_irq_dispatch(void)
pending >>= CAUSEB_IP;
while (pending) {
irq = fls(pending) - 1;
do_IRQ(MIPS_CPU_IRQ_BASE + irq);
if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
virq = irq_linear_revmap(ipi_domain, irq);
else
virq = irq_linear_revmap(irq_domain, irq);
do_IRQ(virq);
pending &= ~BIT(irq);
}
}
@ -143,18 +176,97 @@ static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
#ifdef CONFIG_GENERIC_IRQ_IPI
struct cpu_ipi_domain_state {
DECLARE_BITMAP(allocated, 2);
};
static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct cpu_ipi_domain_state *state = domain->host_data;
unsigned int i, hwirq;
int ret;
for (i = 0; i < nr_irqs; i++) {
hwirq = find_first_zero_bit(state->allocated, 2);
if (hwirq == 2)
return -EBUSY;
bitmap_set(state->allocated, hwirq, 1);
ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq,
&mips_mt_cpu_irq_controller,
NULL);
if (ret)
return ret;
ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
if (ret)
return ret;
}
return 0;
}
static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
bool is_ipi;
switch (bus_token) {
case DOMAIN_BUS_IPI:
is_ipi = d->bus_token == bus_token;
return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
default:
return 0;
}
}
static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
.alloc = mips_cpu_ipi_alloc,
.match = mips_cpu_ipi_match,
};
static void mips_cpu_register_ipi_domain(struct device_node *of_node)
{
struct cpu_ipi_domain_state *ipi_domain_state;
ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
ipi_domain = irq_domain_add_hierarchy(irq_domain,
IRQ_DOMAIN_FLAG_IPI_SINGLE,
2, of_node,
&mips_cpu_ipi_chip_ops,
ipi_domain_state);
if (!ipi_domain)
panic("Failed to add MIPS CPU IPI domain");
ipi_domain->bus_token = DOMAIN_BUS_IPI;
}
#else /* !CONFIG_GENERIC_IRQ_IPI */
static inline void mips_cpu_register_ipi_domain(struct device_node *of_node) {}
#endif /* !CONFIG_GENERIC_IRQ_IPI */
static void __init __mips_cpu_irq_init(struct device_node *of_node)
{
struct irq_domain *domain;
/* Mask interrupts. */
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
&mips_cpu_intc_irq_domain_ops, NULL);
if (!domain)
irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
&mips_cpu_intc_irq_domain_ops,
NULL);
if (!irq_domain)
panic("Failed to add irqdomain for MIPS CPU");
/*
* Only proceed to register the software interrupt IPI implementation
* for CPUs which implement the MIPS MT (multi-threading) ASE.
*/
if (cpu_has_mipsmt)
mips_cpu_register_ipi_domain(of_node);
}
void __init mips_cpu_irq_init(void)

View File

@ -2641,3 +2641,4 @@ static struct platform_driver sbmac_driver = {
};
module_platform_driver(sbmac_driver);
MODULE_LICENSE("GPL");