1
0
Fork 0

x86: make 32bit support per_cpu vector

so we can merge io_apic_32.c and io_apic_64.c

v2: Use cpu_online_map as target cpus for bigsmp, just like 64-bit is doing.

Also remove some unused TARGET_CPUS macro.

v3: need to check if desc is null in smp_irq_move_cleanup

also migration needs to reset vector too, so copy __target_IO_APIC_irq
from 64bit.

(the duplication will go away once the two files are unified.)

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
wifi-calibration
Yinghai Lu 2008-08-19 20:50:28 -07:00 committed by Ingo Molnar
parent 199751d715
commit 497c9a195d
19 changed files with 581 additions and 316 deletions

View File

@ -629,7 +629,7 @@ ENTRY(interrupt)
ENTRY(irq_entries_start)
RING0_INT_FRAME
vector=0
.rept NR_IRQS
.rept NR_VECTORS
ALIGN
.if vector
CFI_ADJUST_CFA_OFFSET -4

View File

@ -48,6 +48,7 @@
#include <asm/hypertransport.h>
#include <asm/setup.h>
#include <mach_ipi.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
@ -60,7 +61,7 @@ atomic_t irq_mis_count;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock);
DEFINE_SPINLOCK(vector_lock);
static DEFINE_SPINLOCK(vector_lock);
int timer_through_8259 __initdata;
@ -100,28 +101,32 @@ struct irq_cfg {
unsigned int irq;
struct irq_cfg *next;
struct irq_pin_list *irq_2_pin;
cpumask_t domain;
cpumask_t old_domain;
unsigned move_cleanup_count;
u8 vector;
u8 move_in_progress : 1;
};
/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
static struct irq_cfg irq_cfg_legacy[] __initdata = {
[0] = { .irq = 0, .vector = IRQ0_VECTOR, },
[1] = { .irq = 1, .vector = IRQ1_VECTOR, },
[2] = { .irq = 2, .vector = IRQ2_VECTOR, },
[3] = { .irq = 3, .vector = IRQ3_VECTOR, },
[4] = { .irq = 4, .vector = IRQ4_VECTOR, },
[5] = { .irq = 5, .vector = IRQ5_VECTOR, },
[6] = { .irq = 6, .vector = IRQ6_VECTOR, },
[7] = { .irq = 7, .vector = IRQ7_VECTOR, },
[8] = { .irq = 8, .vector = IRQ8_VECTOR, },
[9] = { .irq = 9, .vector = IRQ9_VECTOR, },
[10] = { .irq = 10, .vector = IRQ10_VECTOR, },
[11] = { .irq = 11, .vector = IRQ11_VECTOR, },
[12] = { .irq = 12, .vector = IRQ12_VECTOR, },
[13] = { .irq = 13, .vector = IRQ13_VECTOR, },
[14] = { .irq = 14, .vector = IRQ14_VECTOR, },
[15] = { .irq = 15, .vector = IRQ15_VECTOR, },
[0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
[1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
[2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
[3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
[4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
[5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
[6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
[7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
[8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
[9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
[10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
[11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
[12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
[13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
[14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
[15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
};
static struct irq_cfg irq_cfg_init = { .irq = -1U, };
@ -263,6 +268,7 @@ static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
return cfg;
}
static int assign_irq_vector(int irq, cpumask_t mask);
/*
* Rough estimation of how many shared IRQs there are, can
* be changed anytime.
@ -432,6 +438,65 @@ static void ioapic_mask_entry(int apic, int pin)
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#ifdef CONFIG_SMP
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
{
int apic, pin;
struct irq_cfg *cfg;
struct irq_pin_list *entry;
cfg = irq_cfg(irq);
entry = cfg->irq_2_pin;
for (;;) {
unsigned int reg;
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
io_apic_write(apic, 0x11 + pin*2, dest);
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
io_apic_modify(apic, 0x10 + pin *2, reg);
if (!entry->next)
break;
entry = entry->next;
}
}
static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int dest;
cpumask_t tmp;
cfg = irq_cfg(irq);
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
return;
if (assign_irq_vector(irq, mask))
return;
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
/*
* Only the high 8 bits are valid.
*/
dest = SET_APIC_LOGICAL_ID(dest);
spin_lock_irqsave(&ioapic_lock, flags);
__target_IO_APIC_irq(irq, dest, cfg->vector);
irq_to_desc(irq)->affinity = mask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#endif /* CONFIG_SMP */
/*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are
* shared ISA-space IRQs, so we have to support them. We are super
@ -586,45 +651,6 @@ static void clear_IO_APIC(void)
clear_IO_APIC_pin(apic, pin);
}
#ifdef CONFIG_SMP
static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
{
struct irq_cfg *cfg;
unsigned long flags;
int pin;
struct irq_pin_list *entry;
unsigned int apicid_value;
cpumask_t tmp;
cfg = irq_cfg(irq);
entry = cfg->irq_2_pin;
cpus_and(tmp, cpumask, cpu_online_map);
if (cpus_empty(tmp))
tmp = TARGET_CPUS;
cpus_and(cpumask, tmp, CPU_MASK_ALL);
apicid_value = cpu_mask_to_apicid(cpumask);
/* Prepare to do the io_apic_write */
apicid_value = apicid_value << 24;
spin_lock_irqsave(&ioapic_lock, flags);
for (;;) {
if (!entry)
break;
pin = entry->pin;
io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
if (!entry->next)
break;
entry = entry->next;
}
irq_to_desc(irq)->affinity = cpumask;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#endif /* CONFIG_SMP */
#ifndef CONFIG_SMP
void send_IPI_self(int vector)
{
@ -789,32 +815,6 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
}
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
/*
* This function currently is only a helper for the i386 smp boot process where
* we need to reprogram the ioredtbls to cater for the cpus which have come online
* so mask in all cases should simply be TARGET_CPUS
*/
#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
int pin, ioapic, irq, irq_entry;
if (skip_ioapic_setup == 1)
return;
for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
irq_entry = find_irq_entry(ioapic, pin, mp_INT);
if (irq_entry == -1)
continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
set_ioapic_affinity_irq(irq, TARGET_CPUS);
}
}
}
#endif
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
/*
* EISA Edge/Level control register, ELCR
@ -1046,47 +1046,138 @@ static inline int IO_APIC_irq_trigger(int irq)
return 0;
}
static int __assign_irq_vector(int irq)
void lock_vector_lock(void)
{
static int current_vector = FIRST_DEVICE_VECTOR, current_offset;
int vector, offset;
/* Used to the online set of cpus does not change
* during assign_irq_vector.
*/
spin_lock(&vector_lock);
}
void unlock_vector_lock(void)
{
spin_unlock(&vector_lock);
}
static int __assign_irq_vector(int irq, cpumask_t mask)
{
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
unsigned int old_vector;
int cpu;
struct irq_cfg *cfg;
cfg = irq_cfg(irq);
if (cfg->vector > 0)
return cfg->vector;
/* Only try and allocate irqs on cpus that are present */
cpus_and(mask, mask, cpu_online_map);
if ((cfg->move_in_progress) || cfg->move_cleanup_count)
return -EBUSY;
old_vector = cfg->vector;
if (old_vector) {
cpumask_t tmp;
cpus_and(tmp, cfg->domain, mask);
if (!cpus_empty(tmp))
return 0;
}
for_each_cpu_mask_nr(cpu, mask) {
cpumask_t domain, new_mask;
int new_cpu;
int vector, offset;
domain = vector_allocation_domain(cpu);
cpus_and(new_mask, domain, cpu_online_map);
vector = current_vector;
offset = current_offset;
next:
vector += 8;
if (vector >= first_system_vector) {
/* If we run out of vectors on large boxen, must share them. */
offset = (offset + 1) % 8;
vector = FIRST_DEVICE_VECTOR + offset;
}
if (vector == current_vector)
return -ENOSPC;
if (test_and_set_bit(vector, used_vectors))
if (unlikely(current_vector == vector))
continue;
if (vector == SYSCALL_VECTOR)
goto next;
for_each_cpu_mask_nr(new_cpu, new_mask)
if (per_cpu(vector_irq, new_cpu)[vector] != -1)
goto next;
/* Found one! */
current_vector = vector;
current_offset = offset;
if (old_vector) {
cfg->move_in_progress = 1;
cfg->old_domain = cfg->domain;
}
for_each_cpu_mask_nr(new_cpu, new_mask)
per_cpu(vector_irq, new_cpu)[vector] = irq;
cfg->vector = vector;
return vector;
cfg->domain = domain;
return 0;
}
return -ENOSPC;
}
static int assign_irq_vector(int irq)
static int assign_irq_vector(int irq, cpumask_t mask)
{
int err;
unsigned long flags;
int vector;
spin_lock_irqsave(&vector_lock, flags);
vector = __assign_irq_vector(irq);
err = __assign_irq_vector(irq, mask);
spin_unlock_irqrestore(&vector_lock, flags);
return vector;
return err;
}
static void __clear_irq_vector(int irq)
{
struct irq_cfg *cfg;
cpumask_t mask;
int cpu, vector;
cfg = irq_cfg(irq);
BUG_ON(!cfg->vector);
vector = cfg->vector;
cpus_and(mask, cfg->domain, cpu_online_map);
for_each_cpu_mask_nr(cpu, mask)
per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = 0;
cpus_clear(cfg->domain);
}
void __setup_vector_irq(int cpu)
{
/* Initialize vector_irq on a new cpu */
/* This function must be called with vector_lock held */
int irq, vector;
struct irq_cfg *cfg;
/* Mark the inuse vectors */
for_each_irq_cfg(cfg) {
if (!cpu_isset(cpu, cfg->domain))
continue;
vector = cfg->vector;
irq = cfg->irq;
per_cpu(vector_irq, cpu)[vector] = irq;
}
/* Mark the free vectors */
for (vector = 0; vector < NR_VECTORS; ++vector) {
irq = per_cpu(vector_irq, cpu)[vector];
if (irq < 0)
continue;
cfg = irq_cfg(irq);
if (!cpu_isset(cpu, cfg->domain))
per_cpu(vector_irq, cpu)[vector] = -1;
}
}
static struct irq_chip ioapic_chip;
@ -1095,7 +1186,7 @@ static struct irq_chip ioapic_chip;
#define IOAPIC_EDGE 0
#define IOAPIC_LEVEL 1
static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
static void ioapic_register_intr(int irq, unsigned long trigger)
{
struct irq_desc *desc;
@ -1115,79 +1206,109 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
set_irq_chip_and_handler_name(irq, &ioapic_chip,
handle_edge_irq, "edge");
}
set_intr_gate(vector, interrupt[irq]);
}
static int setup_ioapic_entry(int apic, int irq,
struct IO_APIC_route_entry *entry,
unsigned int destination, int trigger,
int polarity, int vector)
{
/*
* add it to the IO-APIC irq-routing table:
*/
memset(entry,0,sizeof(*entry));
entry->delivery_mode = INT_DELIVERY_MODE;
entry->dest_mode = INT_DEST_MODE;
entry->dest.logical.logical_dest = destination;
entry->mask = 0; /* enable IRQ */
entry->trigger = trigger;
entry->polarity = polarity;
entry->vector = vector;
/* Mask level triggered irqs.
* Use IRQ_DELAYED_DISABLE for edge triggered irqs.
*/
if (trigger)
entry->mask = 1;
return 0;
}
static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
int trigger, int polarity)
{
struct irq_cfg *cfg;
struct IO_APIC_route_entry entry;
cpumask_t mask;
if (!IO_APIC_IRQ(irq))
return;
cfg = irq_cfg(irq);
mask = TARGET_CPUS;
if (assign_irq_vector(irq, mask))
return;
cpus_and(mask, cfg->domain, mask);
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n",
apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
irq, trigger, polarity);
if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
cpu_mask_to_apicid(mask), trigger, polarity,
cfg->vector)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mp_ioapics[apic].mp_apicid, pin);
__clear_irq_vector(irq);
return;
}
ioapic_register_intr(irq, trigger);
if (irq < 16)
disable_8259A_irq(irq);
ioapic_write_entry(apic, pin, entry);
}
static void __init setup_IO_APIC_irqs(void)
{
struct IO_APIC_route_entry entry;
int apic, pin, idx, irq, first_notcon = 1, vector;
int apic, pin, idx, irq, first_notcon = 1;
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
for (apic = 0; apic < nr_ioapics; apic++) {
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
/*
* add it to the IO-APIC irq-routing table:
*/
memset(&entry, 0, sizeof(entry));
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
entry.mask = 0; /* enable IRQ */
entry.dest.logical.logical_dest =
cpu_mask_to_apicid(TARGET_CPUS);
idx = find_irq_entry(apic,pin,mp_INT);
if (idx == -1) {
if (first_notcon) {
apic_printk(APIC_VERBOSE, KERN_DEBUG
" IO-APIC (apicid-pin) %d-%d",
mp_ioapics[apic].mp_apicid,
pin);
apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mp_apicid, pin);
first_notcon = 0;
} else
apic_printk(APIC_VERBOSE, ", %d-%d",
mp_ioapics[apic].mp_apicid, pin);
apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mp_apicid, pin);
continue;
}
if (!first_notcon) {
apic_printk(APIC_VERBOSE, " not connected.\n");
first_notcon = 1;
}
entry.trigger = irq_trigger(idx);
entry.polarity = irq_polarity(idx);
if (irq_trigger(idx)) {
entry.trigger = 1;
entry.mask = 1;
}
irq = pin_2_irq(idx, apic, pin);
/*
* skip adding the timer int on secondary nodes, which causes
* a small but painful rift in the time-space continuum
*/
if (multi_timer_check(apic, irq))
continue;
else
add_pin_to_irq(irq, apic, pin);
if (!apic && !IO_APIC_IRQ(irq))
continue;
if (IO_APIC_IRQ(irq)) {
vector = assign_irq_vector(irq);
entry.vector = vector;
ioapic_register_intr(irq, vector, IOAPIC_AUTO);
if (!apic && (irq < 16))
disable_8259A_irq(irq);
}
ioapic_write_entry(apic, pin, entry);
setup_IO_APIC_irq(apic, pin, irq,
irq_trigger(idx), irq_polarity(idx));
}
}
@ -1221,7 +1342,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
* The timer IRQ doesn't have to know that behind the
* scene we may have a 8259A-master in AEOI mode ...
*/
ioapic_register_intr(0, vector, IOAPIC_EDGE);
ioapic_register_intr(0, IOAPIC_EDGE);
/*
* Add it to the IO-APIC irq-routing table:
@ -1805,8 +1926,10 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
return was_pending;
}
static void irq_complete_move(unsigned int irq);
static void ack_ioapic_irq(unsigned int irq)
{
irq_complete_move(irq);
move_native_irq(irq);
ack_APIC_irq();
}
@ -1816,6 +1939,7 @@ static void ack_ioapic_quirk_irq(unsigned int irq)
unsigned long v;
int i;
irq_complete_move(irq);
move_native_irq(irq);
/*
* It appears there is an erratum which affects at least version 0x11
@ -1858,6 +1982,64 @@ static int ioapic_retrigger_irq(unsigned int irq)
return 1;
}
#ifdef CONFIG_SMP
asmlinkage void smp_irq_move_cleanup_interrupt(void)
{
unsigned vector, me;
ack_APIC_irq();
irq_enter();
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irq;
struct irq_desc *desc;
struct irq_cfg *cfg;
irq = __get_cpu_var(vector_irq)[vector];
desc = irq_to_desc(irq);
if (!desc)
continue;
cfg = irq_cfg(irq);
spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
goto unlock;
if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
goto unlock;
__get_cpu_var(vector_irq)[vector] = -1;
cfg->move_cleanup_count--;
unlock:
spin_unlock(&desc->lock);
}
irq_exit();
}
static void irq_complete_move(unsigned int irq)
{
struct irq_cfg *cfg = irq_cfg(irq);
unsigned vector, me;
if (likely(!cfg->move_in_progress))
return;
vector = ~get_irq_regs()->orig_ax;
me = smp_processor_id();
if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
cpumask_t cleanup_mask;
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
cfg->move_in_progress = 0;
}
}
#else
static inline void irq_complete_move(unsigned int irq) {}
#endif
static struct irq_chip ioapic_chip __read_mostly = {
.name = "IO-APIC",
.startup = startup_ioapic_irq,
@ -1940,7 +2122,7 @@ static struct irq_chip lapic_chip __read_mostly = {
.ack = ack_lapic_irq,
};
static void lapic_register_intr(int irq, int vector)
static void lapic_register_intr(int irq)
{
struct irq_desc *desc;
@ -1948,7 +2130,6 @@ static void lapic_register_intr(int irq, int vector)
desc->status &= ~IRQ_LEVEL;
set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
"edge");
set_intr_gate(vector, interrupt[irq]);
}
static void __init setup_nmi(void)
@ -2036,9 +2217,9 @@ static inline void __init unlock_ExtINT_logic(void)
*/
static inline void __init check_timer(void)
{
struct irq_cfg *cfg = irq_cfg(0);
int apic1, pin1, apic2, pin2;
int no_pin1 = 0;
int vector;
unsigned int ver;
unsigned long flags;
@ -2051,8 +2232,7 @@ static inline void __init check_timer(void)
* get/set the timer IRQ vector:
*/
disable_8259A_irq(0);
vector = assign_irq_vector(0);
set_intr_gate(vector, interrupt[0]);
assign_irq_vector(0, TARGET_CPUS);
/*
* As IRQ0 is to be enabled in the 8259A, the virtual
@ -2074,7 +2254,7 @@ static inline void __init check_timer(void)
apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
"apic1=%d pin1=%d apic2=%d pin2=%d\n",
vector, apic1, pin1, apic2, pin2);
cfg->vector, apic1, pin1, apic2, pin2);
/*
* Some BIOS writers are clueless and report the ExtINTA
@ -2098,7 +2278,7 @@ static inline void __init check_timer(void)
*/
if (no_pin1) {
add_pin_to_irq(0, apic1, pin1);
setup_timer_IRQ0_pin(apic1, pin1, vector);
setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
}
unmask_IO_APIC_irq(0);
if (timer_irq_works()) {
@ -2123,7 +2303,7 @@ static inline void __init check_timer(void)
* legacy devices should be connected to IO APIC #0
*/
replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
setup_timer_IRQ0_pin(apic2, pin2, vector);
setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
unmask_IO_APIC_irq(0);
enable_8259A_irq(0);
if (timer_irq_works()) {
@ -2154,8 +2334,8 @@ static inline void __init check_timer(void)
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as Virtual Wire IRQ...\n");
lapic_register_intr(0, vector);
apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
lapic_register_intr(0);
apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
enable_8259A_irq(0);
if (timer_irq_works()) {
@ -2163,7 +2343,7 @@ static inline void __init check_timer(void)
goto out;
}
disable_8259A_irq(0);
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
apic_printk(APIC_QUIET, KERN_INFO
@ -2207,12 +2387,6 @@ out:
void __init setup_IO_APIC(void)
{
int i;
/* Reserve all the system vectors. */
for (i = first_system_vector; i < NR_VECTORS; i++)
set_bit(i, used_vectors);
enable_IO_APIC();
io_apic_irqs = ~PIC_IRQS;
@ -2334,12 +2508,14 @@ device_initcall(ioapic_init_sysfs);
unsigned int create_irq_nr(unsigned int irq_want)
{
/* Allocate an unused irq */
unsigned int irq, new, vector = 0;
unsigned int irq, new;
unsigned long flags;
struct irq_cfg *cfg_new;
#ifndef CONFIG_HAVE_SPARSE_IRQ
/* only can use bus/dev/fn.. when per_cpu vector is used */
irq_want = nr_irqs - 1;
#endif
irq = 0;
spin_lock_irqsave(&vector_lock, flags);
@ -2351,15 +2527,13 @@ unsigned int create_irq_nr(unsigned int irq_want)
continue;
if (!cfg_new)
cfg_new = irq_cfg_alloc(new);
vector = __assign_irq_vector(new);
if (likely(vector > 0))
if (__assign_irq_vector(new, TARGET_CPUS) == 0)
irq = new;
break;
}
spin_unlock_irqrestore(&vector_lock, flags);
if (irq > 0) {
set_intr_gate(vector, interrupt[irq]);
dynamic_irq_init(irq);
}
return irq;
@ -2377,8 +2551,7 @@ void destroy_irq(unsigned int irq)
dynamic_irq_cleanup(irq);
spin_lock_irqsave(&vector_lock, flags);
clear_bit(irq_cfg(irq)->vector, used_vectors);
irq_cfg(irq)->vector = 0;
__clear_irq_vector(irq);
spin_unlock_irqrestore(&vector_lock, flags);
}
@ -2388,12 +2561,19 @@ void destroy_irq(unsigned int irq)
#ifdef CONFIG_PCI_MSI
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
{
int vector;
struct irq_cfg *cfg;
int err;
unsigned dest;
cpumask_t tmp;
vector = assign_irq_vector(irq);
if (vector >= 0) {
dest = cpu_mask_to_apicid(TARGET_CPUS);
tmp = TARGET_CPUS;
err = assign_irq_vector(irq, tmp);
if (err)
return err;
cfg = irq_cfg(irq);
cpus_and(tmp, cfg->domain, tmp);
dest = cpu_mask_to_apicid(tmp);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo =
@ -2412,33 +2592,34 @@ MSI_ADDR_DEST_MODE_PHYSICAL:
((INT_DELIVERY_MODE != dest_LowestPrio) ?
MSI_DATA_DELIVERY_FIXED:
MSI_DATA_DELIVERY_LOWPRI) |
MSI_DATA_VECTOR(vector);
}
return vector;
MSI_DATA_VECTOR(cfg->vector);
return err;
}
#ifdef CONFIG_SMP
static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
{
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
cpumask_t tmp;
int vector;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
tmp = TARGET_CPUS;
vector = assign_irq_vector(irq);
if (vector < 0)
return;
dest = cpu_mask_to_apicid(mask);
if (assign_irq_vector(irq, mask))
return;
cfg = irq_cfg(irq);
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
read_msi_msg(irq, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(vector);
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
@ -2517,15 +2698,15 @@ void arch_teardown_msi_irq(unsigned int irq)
#ifdef CONFIG_SMP
static void target_ht_irq(unsigned int irq, unsigned int dest)
static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
{
struct ht_irq_msg msg;
fetch_ht_irq_msg(irq, &msg);
msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK);
msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest);
msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
write_ht_irq_msg(irq, &msg);
@ -2533,18 +2714,22 @@ static void target_ht_irq(unsigned int irq, unsigned int dest)
static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
{
struct irq_cfg *cfg;
unsigned int dest;
cpumask_t tmp;
cpus_and(tmp, mask, cpu_online_map);
if (cpus_empty(tmp))
tmp = TARGET_CPUS;
return;
cpus_and(mask, tmp, CPU_MASK_ALL);
if (assign_irq_vector(irq, mask))
return;
dest = cpu_mask_to_apicid(mask);
cfg = irq_cfg(irq);
cpus_and(tmp, cfg->domain, mask);
dest = cpu_mask_to_apicid(tmp);
target_ht_irq(irq, dest);
target_ht_irq(irq, dest, cfg->vector);
irq_to_desc(irq)->affinity = mask;
}
#endif
@ -2562,16 +2747,18 @@ static struct irq_chip ht_irq_chip = {
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
{
int vector;
vector = assign_irq_vector(irq);
if (vector >= 0) {
struct ht_irq_msg msg;
unsigned dest;
struct irq_cfg *cfg;
int err;
cpumask_t tmp;
cpus_clear(tmp);
cpu_set(vector >> 8, tmp);
tmp = TARGET_CPUS;
err = assign_irq_vector(irq, tmp);
if ( !err) {
struct ht_irq_msg msg;
unsigned dest;
cfg = irq_cfg(irq);
cpus_and(tmp, cfg->domain, tmp);
dest = cpu_mask_to_apicid(tmp);
msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
@ -2579,7 +2766,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
msg.address_lo =
HT_IRQ_LOW_BASE |
HT_IRQ_LOW_DEST_ID(dest) |
HT_IRQ_LOW_VECTOR(vector) |
HT_IRQ_LOW_VECTOR(cfg->vector) |
((INT_DEST_MODE == 0) ?
HT_IRQ_LOW_DM_PHYSICAL :
HT_IRQ_LOW_DM_LOGICAL) |
@ -2594,7 +2781,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
set_irq_chip_and_handler_name(irq, &ht_irq_chip,
handle_edge_irq, "edge");
}
return vector;
return err;
}
#endif /* CONFIG_HT_IRQ */
@ -2705,50 +2892,21 @@ int __init io_apic_get_redir_entries(int ioapic)
}
int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low)
int io_apic_set_pci_routing(int ioapic, int pin, int irq, int triggering, int polarity)
{
struct IO_APIC_route_entry entry;
if (!IO_APIC_IRQ(irq)) {
printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
ioapic);
return -EINVAL;
}
/*
* Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
* Note that we mask (disable) IRQs now -- these get enabled when the
* corresponding device driver registers for this IRQ.
*/
memset(&entry, 0, sizeof(entry));
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
entry.trigger = edge_level;
entry.polarity = active_high_low;
entry.mask = 1;
/*
* IRQs < 16 are already in the irq_2_pin[] map
*/
if (irq >= 16)
add_pin_to_irq(irq, ioapic, pin);
entry.vector = assign_irq_vector(irq);
apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
"(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
mp_ioapics[ioapic].mp_apicid, pin, entry.vector, irq,
edge_level, active_high_low);
ioapic_register_intr(irq, entry.vector, edge_level);
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
ioapic_write_entry(ioapic, pin, entry);
setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
return 0;
}
@ -2774,6 +2932,47 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
#endif /* CONFIG_ACPI */
/*
* This function currently is only a helper for the i386 smp boot process where
* we need to reprogram the ioredtbls to cater for the cpus which have come online
* so mask in all cases should simply be TARGET_CPUS
*/
#ifdef CONFIG_SMP
void __init setup_ioapic_dest(void)
{
int pin, ioapic, irq, irq_entry;
struct irq_cfg *cfg;
struct irq_desc *desc;
if (skip_ioapic_setup == 1)
return;
for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
irq_entry = find_irq_entry(ioapic, pin, mp_INT);
if (irq_entry == -1)
continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
/* setup_IO_APIC_irqs could fail to get vector for some device
* when you have too many devices, because at that time only boot
* cpu is online.
*/
cfg = irq_cfg(irq);
if (!cfg->vector)
setup_IO_APIC_irq(ioapic, pin, irq,
irq_trigger(irq_entry),
irq_polarity(irq_entry));
else {
desc = irq_to_desc(irq);
set_ioapic_affinity_irq(irq, TARGET_CPUS);
}
}
}
}
#endif
static int __init parse_disable_timer_pin_1(char *arg)
{
disable_timer_pin_1 = 1;

View File

@ -223,21 +223,25 @@ unsigned int do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs;
/* high bit used in ret_from_ code */
int overflow, irq = ~regs->orig_ax;
int overflow;
unsigned vector = ~regs->orig_ax;
struct irq_desc *desc;
unsigned irq;
desc = irq_to_desc(irq);
if (unlikely(!desc)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, irq);
BUG();
}
old_regs = set_irq_regs(regs);
irq_enter();
irq = __get_cpu_var(vector_irq)[vector];
overflow = check_stack_overflow();
desc = irq_to_desc(irq);
if (unlikely(!desc)) {
printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x\n",
__func__, irq, vector);
BUG();
}
if (!execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
print_stack_overflow();

View File

@ -90,6 +90,27 @@ static struct irqaction irq2 = {
.name = "cascade",
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,
[IRQ1_VECTOR] = 1,
[IRQ2_VECTOR] = 2,
[IRQ3_VECTOR] = 3,
[IRQ4_VECTOR] = 4,
[IRQ5_VECTOR] = 5,
[IRQ6_VECTOR] = 6,
[IRQ7_VECTOR] = 7,
[IRQ8_VECTOR] = 8,
[IRQ9_VECTOR] = 9,
[IRQ10_VECTOR] = 10,
[IRQ11_VECTOR] = 11,
[IRQ12_VECTOR] = 12,
[IRQ13_VECTOR] = 13,
[IRQ14_VECTOR] = 14,
[IRQ15_VECTOR] = 15,
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
};
/* Overridden in paravirt.c */
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
@ -105,22 +126,14 @@ void __init native_init_IRQ(void)
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
*/
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
if (i >= nr_irqs)
break;
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
/* SYSCALL_VECTOR was reserved in trap_init. */
if (!test_bit(vector, used_vectors))
set_intr_gate(vector, interrupt[i]);
if (i != SYSCALL_VECTOR)
set_intr_gate(i, interrupt[i]);
}
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
/*
* IRQ0 must be given a fixed assignment and initialized,
* because it's used before the IO-APIC is set up.
*/
set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
/*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
@ -135,6 +148,9 @@ void __init native_init_IRQ(void)
/* IPI for single call function */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@ -168,3 +184,4 @@ void __init native_init_IRQ(void)
irq_ctx_init(smp_processor_id());
}

View File

@ -582,7 +582,7 @@ static void __init lguest_init_IRQ(void)
for (i = 0; i < LGUEST_IRQS; i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
if (vector != SYSCALL_VECTOR) {
set_intr_gate(vector, interrupt[i]);
set_intr_gate(vector, interrupt[vector]);
set_irq_chip_and_handler_name(i, &lguest_irq_controller,
handle_level_irq,
"level");

View File

@ -41,6 +41,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
{ }
};
static cpumask_t vector_allocation_domain(int cpu)
{
return cpumask_of_cpu(cpu);
}
static int probe_bigsmp(void)
{

View File

@ -75,4 +75,18 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
}
#endif
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);

View File

@ -38,4 +38,18 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0;
}
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);

View File

@ -23,4 +23,18 @@ static int probe_summit(void)
return 0;
}
static cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
struct genapic apic_summit = APIC_INIT("summit", probe_summit);

View File

@ -9,22 +9,17 @@ static inline int apic_id_registered(void)
return (1);
}
/* Round robin the irqs amoung the online cpus */
static inline cpumask_t target_cpus(void)
{
static unsigned long cpu = NR_CPUS;
do {
if (cpu >= NR_CPUS)
cpu = first_cpu(cpu_online_map);
else
cpu = next_cpu(cpu, cpu_online_map);
} while (cpu >= NR_CPUS);
return cpumask_of_cpu(cpu);
#ifdef CONFIG_SMP
return cpu_online_map;
#else
return cpumask_of_cpu(0);
#endif
}
#undef APIC_DEST_LOGICAL
#define APIC_DEST_LOGICAL 0
#define TARGET_CPUS (target_cpus())
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define INT_DELIVERY_MODE (dest_Fixed)
#define INT_DEST_MODE (0) /* phys delivery to target proc */

View File

@ -17,7 +17,6 @@ static inline cpumask_t target_cpus(void)
return cpumask_of_cpu(smp_processor_id());
#endif
}
#define TARGET_CPUS (target_cpus())
#if defined CONFIG_ES7000_CLUSTERED_APIC
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
@ -81,7 +80,7 @@ static inline void setup_apic_routing(void)
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]);
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]);
}
static inline int multi_timer_check(int apic, int irq)

View File

@ -57,6 +57,7 @@ struct genapic {
unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
cpumask_t (*vector_allocation_domain)(int cpu);
#ifdef CONFIG_SMP
/* ipi */
@ -104,6 +105,7 @@ struct genapic {
APICFUNC(get_apic_id) \
.apic_id_mask = APIC_ID_MASK, \
APICFUNC(cpu_mask_to_apicid) \
APICFUNC(vector_allocation_domain) \
APICFUNC(acpi_madt_oem_check) \
IPIFUNC(send_IPI_mask) \
IPIFUNC(send_IPI_allbutself) \

View File

@ -116,12 +116,12 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_32
extern void (*const interrupt[NR_IRQS])(void);
#else
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
#endif
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64)
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
#ifdef CONFIG_X86_IO_APIC
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
extern void __setup_vector_irq(int cpu);

View File

@ -19,19 +19,14 @@
/*
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
* cleanup after irq migration on 64 bit.
* cleanup after irq migration.
*/
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
/*
* Vectors 0x20-0x2f are used for ISA interrupts on 32 bit.
* Vectors 0x30-0x3f are used for ISA interrupts on 64 bit.
* Vectors 0x30-0x3f are used for ISA interrupts.
*/
#ifdef CONFIG_X86_32
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR)
#else
#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
#endif
#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
@ -96,11 +91,7 @@
* start at 0x31(0x41) to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
#ifdef CONFIG_X86_32
# define FIRST_DEVICE_VECTOR 0x31
#else
#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
#endif
#define NR_VECTORS 256

View File

@ -14,6 +14,7 @@ BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
#endif
/*

View File

@ -85,6 +85,20 @@ static inline int apicid_to_node(int logical_apicid)
return 0;
#endif
}
static inline cpumask_t vector_allocation_domain(int cpu)
{
/* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest
* priority interrupt delivery mode.
*
* In particular there was a hyperthreading cpu observed to
* deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination.
*/
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
return domain;
}
#endif
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
@ -138,6 +152,5 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
static inline void enable_apic_mode(void)
{
}
#endif /* CONFIG_X86_LOCAL_APIC */
#endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */

View File

@ -24,6 +24,7 @@
#define check_phys_apicid_present (genapic->check_phys_apicid_present)
#define check_apicid_used (genapic->check_apicid_used)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
#define vector_allocation_domain (genapic->vector_allocation_domain)
#define enable_apic_mode (genapic->enable_apic_mode)
#define phys_pkg_id (genapic->phys_pkg_id)

View File

@ -12,8 +12,6 @@ static inline cpumask_t target_cpus(void)
return CPU_MASK_ALL;
}
#define TARGET_CPUS (target_cpus())
#define NO_BALANCE_IRQ (1)
#define esr_disable (1)

View File

@ -22,7 +22,6 @@ static inline cpumask_t target_cpus(void)
*/
return cpumask_of_cpu(0);
}
#define TARGET_CPUS (target_cpus())
#define INT_DELIVERY_MODE (dest_LowestPrio)
#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */