[PATCH] x86: add an accessor function for getting the per-CPU gdt

Add an accessor function for getting the per-CPU gdt.  Callee must already
have the CPU.

Signed-off-by: Zachary Amsden <zach@vmware.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Zachary Amsden 2005-10-30 14:59:34 -08:00 committed by Linus Torvalds
parent 72e12b76fe
commit 251e6912df
4 changed files with 32 additions and 27 deletions

View file

@ -597,12 +597,14 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
cpumask_t cpus;
int cpu;
struct desc_struct save_desc_40;
struct desc_struct *gdt;
cpus = apm_save_cpus();
cpu = get_cpu();
save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8];
per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc;
gdt = get_cpu_gdt_table(cpu);
save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
local_save_flags(flags);
APM_DO_CLI;
@ -610,7 +612,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
APM_DO_RESTORE_SEGS;
local_irq_restore(flags);
per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = save_desc_40;
gdt[0x40 / 8] = save_desc_40;
put_cpu();
apm_restore_cpus(cpus);
@ -639,13 +641,14 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
cpumask_t cpus;
int cpu;
struct desc_struct save_desc_40;
struct desc_struct *gdt;
cpus = apm_save_cpus();
cpu = get_cpu();
save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8];
per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc;
gdt = get_cpu_gdt_table(cpu);
save_desc_40 = gdt[0x40 / 8];
gdt[0x40 / 8] = bad_bios_desc;
local_save_flags(flags);
APM_DO_CLI;
@ -653,7 +656,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
APM_DO_RESTORE_SEGS;
local_irq_restore(flags);
__get_cpu_var(cpu_gdt_table)[0x40 / 8] = save_desc_40;
gdt[0x40 / 8] = save_desc_40;
put_cpu();
apm_restore_cpus(cpus);
return error;
@ -2295,35 +2298,36 @@ static int __init apm_init(void)
apm_bios_entry.segment = APM_CS;
for (i = 0; i < NR_CPUS; i++) {
set_base(per_cpu(cpu_gdt_table, i)[APM_CS >> 3],
struct desc_struct *gdt = get_cpu_gdt_table(i);
set_base(gdt[APM_CS >> 3],
__va((unsigned long)apm_info.bios.cseg << 4));
set_base(per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3],
set_base(gdt[APM_CS_16 >> 3],
__va((unsigned long)apm_info.bios.cseg_16 << 4));
set_base(per_cpu(cpu_gdt_table, i)[APM_DS >> 3],
set_base(gdt[APM_DS >> 3],
__va((unsigned long)apm_info.bios.dseg << 4));
#ifndef APM_RELAX_SEGMENTS
if (apm_info.bios.version == 0x100) {
#endif
/* For ASUS motherboard, Award BIOS rev 110 (and others?) */
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 - 1);
_set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1);
/* For some unknown machine. */
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 64 * 1024 - 1);
_set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
/* For the DEC Hinote Ultra CT475 (and others?) */
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 64 * 1024 - 1);
_set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
#ifndef APM_RELAX_SEGMENTS
} else {
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3],
_set_limit((char *)&gdt[APM_CS >> 3],
(apm_info.bios.cseg_len - 1) & 0xffff);
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3],
_set_limit((char *)&gdt[APM_CS_16 >> 3],
(apm_info.bios.cseg_16_len - 1) & 0xffff);
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3],
_set_limit((char *)&gdt[APM_DS >> 3],
(apm_info.bios.dseg_len - 1) & 0xffff);
/* workaround for broken BIOSes */
if (apm_info.bios.cseg_len <= apm_info.bios.offset)
_set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 -1);
_set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
/* for the BIOS that assumes granularity = 1 */
per_cpu(cpu_gdt_table, i)[APM_DS >> 3].b |= 0x800000;
gdt[APM_DS >> 3].b |= 0x800000;
printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
}
}

View file

@ -573,6 +573,7 @@ void __devinit cpu_init(void)
int cpu = smp_processor_id();
struct tss_struct * t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = &current->thread;
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
if (cpu_test_and_set(cpu, cpu_initialized)) {
@ -594,18 +595,16 @@ void __devinit cpu_init(void)
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
*/
memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table,
GDT_SIZE);
memcpy(gdt, cpu_gdt_table, GDT_SIZE);
/* Set up GDT entry for 16bit stack */
*(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |=
*(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
(CPU_16BIT_STACK_SIZE - 1);
cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
cpu_gdt_descr[cpu].address =
(unsigned long)&per_cpu(cpu_gdt_table, cpu);
cpu_gdt_descr[cpu].address = (unsigned long)gdt;
load_gdt(&cpu_gdt_descr[cpu]);
load_idt(&idt_descr);

View file

@ -108,7 +108,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
desc = (void *)desc + (seg & ~7);
} else {
/* Must disable preemption while reading the GDT. */
desc = (u32 *)&per_cpu(cpu_gdt_table, get_cpu());
desc = (u32 *)get_cpu_gdt_table(get_cpu());
desc = (void *)desc + (seg & ~7);
}

View file

@ -17,6 +17,8 @@
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu))
DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
struct Xgt_desc_struct {
@ -60,7 +62,7 @@ __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
{
_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr,
_set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
}
@ -68,7 +70,7 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *ad
static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
{
_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
_set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
}
#define LDT_entry_a(info) \
@ -109,7 +111,7 @@ static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 en
static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{
#define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
C(0); C(1); C(2);
#undef C
}