1
0
Fork 0

KVM: x86: use physical LAPIC array for logical x2APIC

Logical x2APIC IDs map injectively to physical x2APIC IDs, so we can
reuse the physical array for them.  This allows us to save space by
sizing the logical maps according to the needs of xAPIC.

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
hifive-unleashed-5.1
Radim Krčmář 2016-07-12 22:09:19 +02:00 committed by Paolo Bonzini
parent 64aa47bfc4
commit e45115b62f
2 changed files with 39 additions and 36 deletions

View File

@ -683,8 +683,10 @@ struct kvm_apic_map {
struct rcu_head rcu;
u8 mode;
struct kvm_lapic *phys_map[256];
/* first index is cluster id second is cpu id in a cluster */
struct kvm_lapic *logical_map[16][16];
union {
struct kvm_lapic *xapic_flat_map[8];
struct kvm_lapic *xapic_cluster_map[16][4];
};
};
/* Hyper-V emulation context */

View File

@ -115,26 +115,36 @@ static inline int apic_enabled(struct kvm_lapic *apic)
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
/* The logical map is definitely wrong if we have multiple
* modes at the same time. (Physical map is always right.)
*/
static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
{
return !(map->mode & (map->mode - 1));
}
static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
switch (map->mode) {
case KVM_APIC_MODE_X2APIC: {
u32 offset = (dest_id >> 16) * 16;
u32 max_apic_id = ARRAY_SIZE(map->phys_map) - 1;
static inline void
apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
{
unsigned lid_bits;
if (offset <= max_apic_id) {
u8 cluster_size = min(max_apic_id - offset + 1, 16U);
BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4);
BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8);
BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16);
lid_bits = map->mode;
*cluster = &map->phys_map[offset];
*mask = dest_id & (0xffff >> (16 - cluster_size));
} else {
*mask = 0;
}
*cid = dest_id >> lid_bits;
*lid = dest_id & ((1 << lid_bits) - 1);
return true;
}
case KVM_APIC_MODE_XAPIC_FLAT:
*cluster = map->xapic_flat_map;
*mask = dest_id & 0xff;
return true;
case KVM_APIC_MODE_XAPIC_CLUSTER:
*cluster = map->xapic_cluster_map[dest_id >> 4];
*mask = dest_id & 0xf;
return true;
default:
/* Not optimized. */
return false;
}
}
static void recalculate_apic_map(struct kvm *kvm)
@ -152,7 +162,8 @@ static void recalculate_apic_map(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
u16 cid, lid;
struct kvm_lapic **cluster;
u16 mask;
u32 ldr, aid;
if (!kvm_apic_present(vcpu))
@ -174,13 +185,11 @@ static void recalculate_apic_map(struct kvm *kvm)
new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
}
if (!kvm_apic_logical_map_valid(new))
if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
continue;
apic_logical_id(new, ldr, &cid, &lid);
if (lid && cid < ARRAY_SIZE(new->logical_map))
new->logical_map[cid][ffs(lid) - 1] = apic;
if (mask)
cluster[ffs(mask) - 1] = apic;
}
out:
old = rcu_dereference_protected(kvm->arch.apic_map,
@ -685,7 +694,6 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
{
int i, lowest;
bool x2apic_ipi;
u16 cid;
if (irq->shorthand == APIC_DEST_SELF && src) {
*dst = src;
@ -711,18 +719,11 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
return true;
}
if (!kvm_apic_logical_map_valid(map))
*bitmap = 0;
if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
(u16 *)bitmap))
return false;
apic_logical_id(map, irq->dest_id, &cid, (u16 *)bitmap);
if (cid >= ARRAY_SIZE(map->logical_map)) {
*bitmap = 0;
return true;
}
*dst = map->logical_map[cid];
if (!kvm_lowest_prio_delivery(irq))
return true;