1
0
Fork 0
alistair23-linux/arch/arm/kvm/arm.c

558 lines
11 KiB
C

/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#include <asm/unified.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/mman.h>
#include <asm/cputype.h>
#include <asm/tlbflush.h>
#include <asm/virt.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#ifdef REQUIRES_VIRT
__asm__(".arch_extension virt");
#endif
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
static unsigned long hyp_default_vectors;
int kvm_arch_hardware_enable(void *garbage)
{
return 0;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
void kvm_arch_hardware_disable(void *garbage)
{
}
int kvm_arch_hardware_setup(void)
{
return 0;
}
void kvm_arch_hardware_unsetup(void)
{
}
void kvm_arch_check_processor_compat(void *rtn)
{
*(int *)rtn = 0;
}
void kvm_arch_sync_events(struct kvm *kvm)
{
}
/**
* kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct
*/
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int ret = 0;
if (type)
return -EINVAL;
ret = kvm_alloc_stage2_pgd(kvm);
if (ret)
goto out_fail_alloc;
ret = create_hyp_mappings(kvm, kvm + 1);
if (ret)
goto out_free_stage2_pgd;
/* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0;
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
out_fail_alloc:
return ret;
}
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
return 0;
}
/**
* kvm_arch_destroy_vm - destroy the VM data structure
* @kvm: pointer to the KVM struct
*/
void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
kvm_free_stage2_pgd(kvm);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
}
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_ONE_REG:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
default:
r = 0;
break;
}
return r;
}
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
return -EINVAL;
}
int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
{
return 0;
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
{
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
int err;
struct kvm_vcpu *vcpu;
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
goto out;
}
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
err = create_hyp_mappings(vcpu, vcpu + 1);
if (err)
goto vcpu_uninit;
return vcpu;
vcpu_uninit:
kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
return ERR_PTR(err);
}
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
kvm_mmu_free_memory_caches(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kvm_arch_vcpu_free(vcpu);
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return 0;
}
int __attribute_const__ kvm_target_cpu(void)
{
unsigned long implementor = read_cpuid_implementor();
unsigned long part_number = read_cpuid_part_number();
if (implementor != ARM_CPU_IMP_ARM)
return -EINVAL;
switch (part_number) {
case ARM_CPU_PART_CORTEX_A15:
return KVM_ARM_TARGET_CORTEX_A15;
default:
return -EINVAL;
}
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
return -EINVAL;
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
return 0;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
return -EINVAL;
}
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
switch (ioctl) {
case KVM_ARM_VCPU_INIT: {
struct kvm_vcpu_init init;
if (copy_from_user(&init, argp, sizeof(init)))
return -EFAULT;
return kvm_vcpu_set_target(vcpu, &init);
}
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT;
if (ioctl == KVM_SET_ONE_REG)
return kvm_arm_set_reg(vcpu, &reg);
else
return kvm_arm_get_reg(vcpu, &reg);
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
struct kvm_reg_list reg_list;
unsigned n;
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
reg_list.n = kvm_arm_num_regs(vcpu);
if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
return -EFAULT;
if (n < reg_list.n)
return -E2BIG;
return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
}
default:
return -EINVAL;
}
}
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
return -EINVAL;
}
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
return -EINVAL;
}
static void cpu_init_hyp_mode(void *vector)
{
unsigned long long pgd_ptr;
unsigned long pgd_low, pgd_high;
unsigned long hyp_stack_ptr;
unsigned long stack_page;
unsigned long vector_ptr;
/* Switch from the HYP stub to our own HYP init vector */
__hyp_set_vectors((unsigned long)vector);
pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
pgd_high = (pgd_ptr >> 32ULL);
stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)__kvm_hyp_vector;
/*
* Call initialization code, and switch to the full blown
* HYP code. The init code doesn't need to preserve these registers as
* r1-r3 and r12 are already callee save according to the AAPCS.
* Note that we slightly misuse the prototype by casing the pgd_low to
* a void *.
*/
kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
}
/**
* Inits Hyp-mode on all online CPUs
*/
static int init_hyp_mode(void)
{
phys_addr_t init_phys_addr;
int cpu;
int err = 0;
/*
* Allocate Hyp PGD and setup Hyp identity mapping
*/
err = kvm_mmu_init();
if (err)
goto out_err;
/*
* It is probably enough to obtain the default on one
* CPU. It's unlikely to be different on the others.
*/
hyp_default_vectors = __hyp_get_vectors();
/*
* Allocate stack pages for Hypervisor-mode
*/
for_each_possible_cpu(cpu) {
unsigned long stack_page;
stack_page = __get_free_page(GFP_KERNEL);
if (!stack_page) {
err = -ENOMEM;
goto out_free_stack_pages;
}
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
}
/*
* Execute the init code on each CPU.
*
* Note: The stack is not mapped yet, so don't do anything else than
* initializing the hypervisor mode on each CPU using a local stack
* space for temporary storage.
*/
init_phys_addr = virt_to_phys(__kvm_hyp_init);
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, cpu_init_hyp_mode,
(void *)(long)init_phys_addr, 1);
}
/*
* Unmap the identity mapping
*/
kvm_clear_hyp_idmap();
/*
* Map the Hyp-code called directly from the host
*/
err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
if (err) {
kvm_err("Cannot map world-switch code\n");
goto out_free_mappings;
}
/*
* Map the Hyp stack pages
*/
for_each_possible_cpu(cpu) {
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_free_mappings;
}
}
/*
* Map the host VFP structures
*/
kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
if (!kvm_host_vfp_state) {
err = -ENOMEM;
kvm_err("Cannot allocate host VFP state\n");
goto out_free_mappings;
}
for_each_possible_cpu(cpu) {
struct vfp_hard_struct *vfp;
vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
err = create_hyp_mappings(vfp, vfp + 1);
if (err) {
kvm_err("Cannot map host VFP state: %d\n", err);
goto out_free_vfp;
}
}
kvm_info("Hyp mode initialized successfully\n");
return 0;
out_free_vfp:
free_percpu(kvm_host_vfp_state);
out_free_mappings:
free_hyp_pmds();
out_free_stack_pages:
for_each_possible_cpu(cpu)
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
out_err:
kvm_err("error initializing Hyp mode: %d\n", err);
return err;
}
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
int kvm_arch_init(void *opaque)
{
int err;
if (!is_hyp_mode_available()) {
kvm_err("HYP mode not available\n");
return -ENODEV;
}
if (kvm_target_cpu() < 0) {
kvm_err("Target CPU not supported!\n");
return -ENODEV;
}
err = init_hyp_mode();
if (err)
goto out_err;
return 0;
out_err:
return err;
}
/* NOP: Compiling as a module not supported */
void kvm_arch_exit(void)
{
}
static int arm_init(void)
{
int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
return rc;
}
module_init(arm_init);