1
0
Fork 0

[SPARC64]: Proper multi-core scheduling support.

The scheduling domain hierarchy is:

   all cpus -->
      cpus that share an instruction cache -->
          cpus that share an integer execution unit

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2007-06-04 17:01:39 -07:00 committed by David S. Miller
parent d887ab3a9b
commit f78eae2e6f
7 changed files with 89 additions and 5 deletions

View File

@ -396,6 +396,15 @@ config SCHED_SMT
when dealing with UltraSPARC cpus at a cost of slightly increased
overhead in some places. If unsure say N here.
config SCHED_MC
bool "Multi-core scheduler support"
depends on SMP
default y
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
source "kernel/Kconfig.preempt"
config CMDLINE_BOOL

View File

@ -473,6 +473,53 @@ static void __init set_core_ids(void)
}
}
static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
{
int i;
for (i = 0; i < mp->num_arcs; i++) {
struct mdesc_node *t = mp->arcs[i].arc;
const u64 *id;
if (strcmp(mp->arcs[i].name, "back"))
continue;
if (strcmp(t->name, "cpu"))
continue;
id = md_get_property(t, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).proc_id = proc_id;
}
}
static void __init __set_proc_ids(const char *exec_unit_name)
{
struct mdesc_node *mp;
int idx;
idx = 0;
md_for_each_node_by_name(mp, exec_unit_name) {
const char *type;
int len;
type = md_get_property(mp, "type", &len);
if (!find_in_proplist(type, "int", len) &&
!find_in_proplist(type, "integer", len))
continue;
mark_proc_ids(mp, idx);
idx++;
}
}
static void __init set_proc_ids(void)
{
__set_proc_ids("exec_unit");
__set_proc_ids("exec-unit");
}
static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
{
u64 val;
@ -574,9 +621,11 @@ static void __init mdesc_fill_in_cpu_data(void)
#endif
c->core_id = 0;
c->proc_id = -1;
}
set_core_ids();
set_proc_ids();
smp_fill_in_sib_core_maps();
}

View File

@ -1800,6 +1800,7 @@ static void __init of_fill_in_cpu_data(void)
cpu_data(cpuid).core_id = 0;
}
cpu_data(cpuid).proc_id = -1;
#ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map);

View File

@ -51,6 +51,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map;
@ -1217,13 +1219,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
unsigned int j;
if (cpu_data(i).core_id == 0) {
cpu_set(i, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[i]);
continue;
}
for_each_possible_cpu(j) {
if (cpu_data(i).core_id ==
cpu_data(j).core_id)
cpu_set(j, cpu_core_map[i]);
}
}
for_each_possible_cpu(i) {
unsigned int j;
if (cpu_data(i).proc_id == -1) {
cpu_set(i, cpu_sibling_map[i]);
continue;
}
for_each_possible_cpu(j) {
if (cpu_data(i).proc_id ==
cpu_data(j).proc_id)
cpu_set(j, cpu_sibling_map[i]);
}
}

View File

@ -31,7 +31,7 @@ typedef struct {
unsigned int ecache_size;
unsigned int ecache_line_size;
int core_id;
unsigned int __pad3;
int proc_id;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);

View File

@ -33,6 +33,7 @@ extern cpumask_t phys_cpu_present_map;
#define cpu_possible_map phys_cpu_present_map
extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
/*
* General functions that each host system must provide.

View File

@ -1,12 +1,19 @@
#ifndef _ASM_SPARC64_TOPOLOGY_H
#define _ASM_SPARC64_TOPOLOGY_H
#ifdef CONFIG_SMP
#include <asm/spitfire.h>
#define smt_capable() (tlb_type == hypervisor)
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#define mc_capable() (tlb_type == hypervisor)
#define smt_capable() (tlb_type == hypervisor)
#endif /* CONFIG_SMP */
#include <asm-generic/topology.h>
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
#endif /* _ASM_SPARC64_TOPOLOGY_H */