1
0
Fork 0

sh: Encode L1/L2 cache shape in auxvt.

This adds in the L1I/L1D/L2 cache shape support to their respective
entries in the ELF auxvt, based on the Alpha implementation. We use
this on the userspace libc side for calculating a tightly packed
SHMLBA amongst other things.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
hifive-unleashed-5.1
Paul Mundt 2007-12-10 15:50:28 +09:00
parent c8c0a1aba9
commit cd01204b82
5 changed files with 60 additions and 5 deletions

View File

@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/log2.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@ -20,6 +21,7 @@
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
#include <asm/elf.h>
#include <asm/io.h>
#include <asm/smp.h>
#ifdef CONFIG_SUPERH32
@ -151,6 +153,27 @@ static void __uses_jump_to_uncached cache_init(void)
#define cache_init() do { } while (0)
#endif
#define CSHAPE(totalsize, linesize, assoc) \
((totalsize & ~0xff) | (linesize << 4) | assoc)
#define CACHE_DESC_SHAPE(desc) \
CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
static void detect_cache_shape(void)
{
l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
l1i_cache_shape = l1d_cache_shape;
else
l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
else
l2_cache_shape = -1; /* No S-cache */
}
#ifdef CONFIG_SH_DSP
static void __init release_dsp(void)
{
@ -237,11 +260,15 @@ asmlinkage void __cpuinit sh_cpu_init(void)
/* Init the cache */
cache_init();
if (raw_smp_processor_id() == 0)
if (raw_smp_processor_id() == 0) {
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
/* Boot CPU sets the cache shape */
detect_cache_shape();
}
/* Disable the FPU */
if (fpu_disabled) {
printk("FPU Disabled\n");

View File

@ -26,6 +26,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/elf.h>
#include <asm/sections.h>
#include <asm/irq.h>
#include <asm/setup.h>
@ -78,6 +79,8 @@ EXPORT_SYMBOL(memory_start);
unsigned long memory_end = 0;
EXPORT_SYMBOL(memory_end);
int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
static int __init early_parse_mem(char *p)
{
unsigned long size;

View File

@ -15,4 +15,16 @@
#define AT_SYSINFO_EHDR 33
#endif
/*
* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the
* value is -1, then the cache doesn't exist. Otherwise:
*
* bit 0-3: Cache set-associativity; 0 means fully associative.
* bit 4-7: Log2 of cacheline size.
* bit 8-31: Size of the entire cache >> 8.
*/
#define AT_L1I_CACHESHAPE 34
#define AT_L1D_CACHESHAPE 35
#define AT_L2_CACHESHAPE 36
#endif /* __ASM_SH_AUXVEC_H */

View File

@ -161,12 +161,25 @@ extern void __kernel_vsyscall;
#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
#define VSYSCALL_AUX_ENT \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
#else
#define VSYSCALL_AUX_ENT
#endif /* CONFIG_VSYSCALL */
extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
/* Optional vsyscall entry */ \
VSYSCALL_AUX_ENT \
\
/* Cache desc */ \
NEW_AUX_ENT(AT_L1I_CACHESHAPE, l1i_cache_shape); \
NEW_AUX_ENT(AT_L1D_CACHESHAPE, l1d_cache_shape); \
NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape); \
} while (0)
#endif /* CONFIG_VSYSCALL */
#endif /* __ASM_SH_ELF_H */

View File

@ -12,7 +12,7 @@
#include <asm/types.h>
#include <asm/ptrace.h>
#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define __icbi() \