2021-05-01 10:05:21 +00:00
|
|
|
/* SPDX-License-Identifier: MIT */
|
|
|
|
|
|
|
|
#include "hv.h"
|
|
|
|
#include "assert.h"
|
|
|
|
#include "cpu_regs.h"
|
2021-05-25 11:05:10 +00:00
|
|
|
#include "gxf.h"
|
2021-09-15 14:31:33 +00:00
|
|
|
#include "memory.h"
|
2021-05-27 15:03:11 +00:00
|
|
|
#include "pcie.h"
|
2021-05-27 12:16:17 +00:00
|
|
|
#include "smp.h"
|
2021-09-15 14:31:33 +00:00
|
|
|
#include "string.h"
|
2021-06-12 08:52:23 +00:00
|
|
|
#include "usb.h"
|
2021-05-08 12:54:07 +00:00
|
|
|
#include "utils.h"
|
2021-05-01 10:05:21 +00:00
|
|
|
|
2021-05-25 11:04:20 +00:00
|
|
|
#define HV_TICK_RATE 1000
|
|
|
|
|
2021-09-15 14:16:28 +00:00
|
|
|
DECLARE_SPINLOCK(bhl);
|
|
|
|
|
2021-05-04 15:24:52 +00:00
|
|
|
void hv_enter_guest(u64 x0, u64 x1, u64 x2, u64 x3, void *entry);
|
2021-09-15 14:31:33 +00:00
|
|
|
void hv_exit_guest(void) __attribute__((noreturn));
|
2021-05-04 10:36:23 +00:00
|
|
|
|
|
|
|
extern char _hv_vectors_start[0];
|
|
|
|
|
2021-05-25 11:04:20 +00:00
|
|
|
u64 hv_tick_interval;
|
|
|
|
|
2021-09-21 04:18:06 +00:00
|
|
|
int hv_want_cpu;
|
|
|
|
|
2021-09-15 14:31:33 +00:00
|
|
|
static bool hv_should_exit;
|
|
|
|
bool hv_started_cpus[MAX_CPUS];
|
2021-09-21 04:17:00 +00:00
|
|
|
u32 hv_cpus_in_guest;
|
2021-09-15 13:03:39 +00:00
|
|
|
u64 hv_saved_sp[MAX_CPUS];
|
|
|
|
|
2021-09-24 01:57:27 +00:00
|
|
|
struct hv_secondary_info_t {
|
|
|
|
uint64_t hcr;
|
|
|
|
uint64_t hacr;
|
|
|
|
uint64_t vtcr, vttbr;
|
|
|
|
uint64_t mdcr;
|
|
|
|
uint64_t mdscr;
|
|
|
|
uint64_t amx_ctl;
|
|
|
|
uint64_t apvmkeylo, apvmkeyhi, apsts;
|
|
|
|
uint64_t actlr_el2;
|
|
|
|
uint64_t actlr_el1;
|
|
|
|
uint64_t cnthctl;
|
|
|
|
uint64_t sprr_config;
|
|
|
|
uint64_t gxf_config;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct hv_secondary_info_t hv_secondary_info;
|
|
|
|
|
2021-05-01 10:05:21 +00:00
|
|
|
void hv_init(void)
|
|
|
|
{
|
2021-05-27 15:03:11 +00:00
|
|
|
pcie_shutdown();
|
2021-06-12 08:52:23 +00:00
|
|
|
// reenable hpm interrupts for the guest for unused iodevs
|
|
|
|
usb_hpm_restore_irqs(0);
|
2021-05-27 12:16:17 +00:00
|
|
|
smp_start_secondaries();
|
|
|
|
hv_wdt_init();
|
|
|
|
|
2021-05-01 10:05:21 +00:00
|
|
|
// Enable physical timer for EL1
|
2021-05-04 10:23:35 +00:00
|
|
|
msr(CNTHCTL_EL2, CNTHCTL_EL1PTEN | CNTHCTL_EL1PCTEN);
|
2021-05-01 10:05:21 +00:00
|
|
|
|
|
|
|
hv_pt_init();
|
|
|
|
|
|
|
|
// Configure hypervisor defaults
|
2021-06-21 16:14:53 +00:00
|
|
|
hv_write_hcr(HCR_API | // Allow PAuth instructions
|
|
|
|
HCR_APK | // Allow PAuth key registers
|
|
|
|
HCR_TEA | // Trap external aborts
|
|
|
|
HCR_E2H | // VHE mode (forced)
|
|
|
|
HCR_RW | // AArch64 guest
|
|
|
|
HCR_AMO | // Trap SError exceptions
|
|
|
|
HCR_VM); // Enable stage 2 translation
|
2021-05-01 10:05:21 +00:00
|
|
|
|
2021-05-04 10:36:23 +00:00
|
|
|
// No guest vectors initially
|
|
|
|
msr(VBAR_EL12, 0);
|
|
|
|
|
2021-05-25 11:04:20 +00:00
|
|
|
// Compute tick interval
|
|
|
|
hv_tick_interval = mrs(CNTFRQ_EL0) / HV_TICK_RATE;
|
|
|
|
|
2021-05-01 10:05:21 +00:00
|
|
|
sysop("dsb ishst");
|
|
|
|
sysop("tlbi alle1is");
|
|
|
|
sysop("dsb ish");
|
|
|
|
sysop("isb");
|
|
|
|
}
|
2021-05-04 10:36:23 +00:00
|
|
|
|
2021-05-27 12:11:49 +00:00
|
|
|
static void hv_set_gxf_vbar(void)
|
|
|
|
{
|
|
|
|
msr(SYS_IMP_APL_VBAR_GL1, _hv_vectors_start);
|
|
|
|
}
|
|
|
|
|
2021-05-04 10:36:23 +00:00
|
|
|
void hv_start(void *entry, u64 regs[4])
|
|
|
|
{
|
2021-09-15 14:31:33 +00:00
|
|
|
hv_should_exit = false;
|
|
|
|
memset(hv_started_cpus, 0, sizeof(hv_started_cpus));
|
2021-09-21 04:17:00 +00:00
|
|
|
hv_started_cpus[0] = 1;
|
2021-09-15 14:31:33 +00:00
|
|
|
|
2021-05-04 10:36:23 +00:00
|
|
|
msr(VBAR_EL1, _hv_vectors_start);
|
|
|
|
|
2021-05-27 12:11:49 +00:00
|
|
|
if (gxf_enabled())
|
|
|
|
gl2_call(hv_set_gxf_vbar, 0, 0, 0, 0);
|
|
|
|
|
2021-09-24 01:57:27 +00:00
|
|
|
hv_secondary_info.hcr = mrs(HCR_EL2);
|
|
|
|
hv_secondary_info.hacr = mrs(HACR_EL2);
|
|
|
|
hv_secondary_info.vtcr = mrs(VTCR_EL2);
|
|
|
|
hv_secondary_info.vttbr = mrs(VTTBR_EL2);
|
|
|
|
hv_secondary_info.mdcr = mrs(MDCR_EL2);
|
|
|
|
hv_secondary_info.mdscr = mrs(MDSCR_EL1);
|
|
|
|
hv_secondary_info.amx_ctl = mrs(SYS_IMP_APL_AMX_CTL_EL2);
|
|
|
|
hv_secondary_info.apvmkeylo = mrs(SYS_IMP_APL_APVMKEYLO_EL2);
|
|
|
|
hv_secondary_info.apvmkeyhi = mrs(SYS_IMP_APL_APVMKEYHI_EL2);
|
|
|
|
hv_secondary_info.apsts = mrs(SYS_IMP_APL_APSTS_EL12);
|
|
|
|
hv_secondary_info.actlr_el2 = mrs(ACTLR_EL2);
|
|
|
|
hv_secondary_info.actlr_el1 = mrs(SYS_IMP_APL_ACTLR_EL12);
|
|
|
|
hv_secondary_info.cnthctl = mrs(CNTHCTL_EL2);
|
|
|
|
hv_secondary_info.sprr_config = mrs(SYS_IMP_APL_SPRR_CONFIG_EL1);
|
|
|
|
hv_secondary_info.gxf_config = mrs(SYS_IMP_APL_GXF_CONFIG_EL1);
|
|
|
|
|
2021-05-25 11:04:20 +00:00
|
|
|
hv_arm_tick();
|
2021-09-21 04:18:06 +00:00
|
|
|
hv_want_cpu = -1;
|
2021-09-21 04:17:00 +00:00
|
|
|
hv_cpus_in_guest = 1;
|
|
|
|
|
2021-05-04 10:36:23 +00:00
|
|
|
hv_enter_guest(regs[0], regs[1], regs[2], regs[3], entry);
|
2021-09-15 14:31:33 +00:00
|
|
|
|
2021-09-21 04:17:00 +00:00
|
|
|
__atomic_sub_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
|
2021-09-15 14:31:33 +00:00
|
|
|
spin_lock(&bhl);
|
|
|
|
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_stop();
|
2021-05-04 15:24:52 +00:00
|
|
|
|
2021-09-15 14:31:33 +00:00
|
|
|
hv_should_exit = true;
|
|
|
|
printf("HV: Exiting hypervisor (main CPU)\n");
|
|
|
|
|
|
|
|
for (int i = 0; i < MAX_CPUS; i++) {
|
|
|
|
if (hv_started_cpus[i]) {
|
|
|
|
printf("HV: Waiting for CPU %d to exit\n", i);
|
|
|
|
spin_unlock(&bhl);
|
|
|
|
smp_wait(i);
|
|
|
|
spin_lock(&bhl);
|
|
|
|
hv_started_cpus[i] = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("HV: All CPUs exited\n");
|
|
|
|
spin_unlock(&bhl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hv_init_secondary(struct hv_secondary_info_t *info)
|
|
|
|
{
|
|
|
|
gxf_init();
|
|
|
|
|
|
|
|
msr(VBAR_EL1, _hv_vectors_start);
|
|
|
|
|
|
|
|
msr(HCR_EL2, info->hcr);
|
|
|
|
msr(HACR_EL2, info->hacr);
|
|
|
|
msr(VTCR_EL2, info->vtcr);
|
|
|
|
msr(VTTBR_EL2, info->vttbr);
|
|
|
|
msr(MDCR_EL2, info->mdcr);
|
|
|
|
msr(MDSCR_EL1, info->mdscr);
|
|
|
|
msr(SYS_IMP_APL_AMX_CTL_EL2, info->amx_ctl);
|
|
|
|
msr(SYS_IMP_APL_APVMKEYLO_EL2, info->apvmkeylo);
|
|
|
|
msr(SYS_IMP_APL_APVMKEYHI_EL2, info->apvmkeyhi);
|
|
|
|
msr(SYS_IMP_APL_APSTS_EL12, info->apsts);
|
2021-09-24 01:57:27 +00:00
|
|
|
msr(ACTLR_EL2, info->actlr_el2);
|
|
|
|
msr(SYS_IMP_APL_ACTLR_EL12, info->actlr_el1);
|
2021-09-15 14:31:33 +00:00
|
|
|
msr(CNTHCTL_EL2, info->cnthctl);
|
|
|
|
msr(SYS_IMP_APL_SPRR_CONFIG_EL1, info->sprr_config);
|
|
|
|
msr(SYS_IMP_APL_GXF_CONFIG_EL1, info->gxf_config);
|
|
|
|
|
|
|
|
if (gxf_enabled())
|
|
|
|
gl2_call(hv_set_gxf_vbar, 0, 0, 0, 0);
|
|
|
|
|
|
|
|
hv_arm_tick();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hv_enter_secondary(void *entry, u64 regs[4])
|
|
|
|
{
|
|
|
|
hv_enter_guest(regs[0], regs[1], regs[2], regs[3], entry);
|
|
|
|
|
|
|
|
spin_lock(&bhl);
|
|
|
|
|
|
|
|
hv_should_exit = true;
|
|
|
|
printf("HV: Exiting from CPU %d\n", smp_id());
|
|
|
|
|
2021-09-21 04:17:00 +00:00
|
|
|
__atomic_sub_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
|
|
|
|
|
2021-09-15 14:31:33 +00:00
|
|
|
spin_unlock(&bhl);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hv_start_secondary(int cpu, void *entry, u64 regs[4])
|
|
|
|
{
|
|
|
|
printf("HV: Initializing secondary %d\n", cpu);
|
|
|
|
iodev_console_flush();
|
|
|
|
|
|
|
|
mmu_init_secondary(cpu);
|
|
|
|
iodev_console_flush();
|
2021-09-24 01:57:27 +00:00
|
|
|
smp_call4(cpu, hv_init_secondary, (u64)&hv_secondary_info, 0, 0, 0);
|
2021-09-15 14:31:33 +00:00
|
|
|
smp_wait(cpu);
|
|
|
|
iodev_console_flush();
|
|
|
|
|
|
|
|
printf("HV: Entering guest secondary %d at %p\n", cpu, entry);
|
|
|
|
hv_started_cpus[cpu] = true;
|
2021-09-21 04:17:00 +00:00
|
|
|
__atomic_add_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
|
2021-09-15 14:31:33 +00:00
|
|
|
|
|
|
|
iodev_console_flush();
|
|
|
|
smp_call4(cpu, hv_enter_secondary, (u64)entry, (u64)regs, 0, 0);
|
2021-05-04 10:36:23 +00:00
|
|
|
}
|
2021-05-25 11:04:20 +00:00
|
|
|
|
2021-09-21 04:17:00 +00:00
|
|
|
void hv_rendezvous(void)
|
|
|
|
{
|
|
|
|
if (!__atomic_load_n(&hv_cpus_in_guest, __ATOMIC_ACQUIRE))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* IPI all CPUs. This might result in spurious IPIs to the guest... */
|
|
|
|
for (int i = 0; i < MAX_CPUS; i++) {
|
|
|
|
if (i != smp_id() && hv_started_cpus[i]) {
|
|
|
|
smp_send_ipi(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while (__atomic_load_n(&hv_cpus_in_guest, __ATOMIC_ACQUIRE))
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2021-09-21 04:18:06 +00:00
|
|
|
void hv_switch_cpu(int cpu)
|
|
|
|
{
|
2021-09-21 04:33:36 +00:00
|
|
|
if (cpu > MAX_CPUS || cpu < 0 || !hv_started_cpus[cpu]) {
|
|
|
|
printf("HV: CPU #%d is inactive or invalid\n", cpu);
|
|
|
|
return;
|
|
|
|
}
|
2021-09-21 04:18:06 +00:00
|
|
|
hv_rendezvous();
|
|
|
|
printf("HV: switching to CPU #%d\n", cpu);
|
|
|
|
hv_want_cpu = cpu;
|
|
|
|
hv_rearm();
|
|
|
|
}
|
|
|
|
|
2021-05-25 11:05:10 +00:00
|
|
|
void hv_write_hcr(u64 val)
|
|
|
|
{
|
|
|
|
if (gxf_enabled() && !in_gl12())
|
|
|
|
gl2_call(hv_write_hcr, val, 0, 0, 0);
|
|
|
|
else
|
|
|
|
msr(HCR_EL2, val);
|
|
|
|
}
|
|
|
|
|
2021-05-27 12:11:49 +00:00
|
|
|
u64 hv_get_spsr(void)
|
|
|
|
{
|
|
|
|
if (in_gl12())
|
|
|
|
return mrs(SYS_IMP_APL_SPSR_GL1);
|
|
|
|
else
|
|
|
|
return mrs(SPSR_EL2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hv_set_spsr(u64 val)
|
|
|
|
{
|
|
|
|
if (in_gl12())
|
|
|
|
return msr(SYS_IMP_APL_SPSR_GL1, val);
|
|
|
|
else
|
|
|
|
return msr(SPSR_EL2, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 hv_get_esr(void)
|
|
|
|
{
|
|
|
|
if (in_gl12())
|
|
|
|
return mrs(SYS_IMP_APL_ESR_GL1);
|
|
|
|
else
|
|
|
|
return mrs(ESR_EL2);
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 hv_get_far(void)
|
|
|
|
{
|
|
|
|
if (in_gl12())
|
|
|
|
return mrs(SYS_IMP_APL_FAR_GL1);
|
|
|
|
else
|
|
|
|
return mrs(FAR_EL2);
|
|
|
|
}
|
|
|
|
|
2021-05-29 18:29:52 +00:00
|
|
|
u64 hv_get_afsr1(void)
|
|
|
|
{
|
|
|
|
if (in_gl12())
|
|
|
|
return mrs(SYS_IMP_APL_AFSR1_GL1);
|
|
|
|
else
|
|
|
|
return mrs(AFSR1_EL2);
|
|
|
|
}
|
|
|
|
|
2021-05-27 12:11:49 +00:00
|
|
|
u64 hv_get_elr(void)
|
|
|
|
{
|
|
|
|
if (in_gl12())
|
|
|
|
return mrs(SYS_IMP_APL_ELR_GL1);
|
|
|
|
else
|
|
|
|
return mrs(ELR_EL2);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hv_set_elr(u64 val)
|
|
|
|
{
|
|
|
|
if (in_gl12())
|
|
|
|
return msr(SYS_IMP_APL_ELR_GL1, val);
|
|
|
|
else
|
|
|
|
return msr(ELR_EL2, val);
|
|
|
|
}
|
|
|
|
|
2021-05-25 11:04:20 +00:00
|
|
|
void hv_arm_tick(void)
|
|
|
|
{
|
|
|
|
msr(CNTP_TVAL_EL0, hv_tick_interval);
|
|
|
|
msr(CNTP_CTL_EL0, CNTx_CTL_ENABLE);
|
|
|
|
}
|
|
|
|
|
2021-09-21 04:18:06 +00:00
|
|
|
void hv_rearm(void)
|
|
|
|
{
|
|
|
|
msr(CNTP_TVAL_EL0, 0);
|
|
|
|
msr(CNTP_CTL_EL0, CNTx_CTL_ENABLE);
|
|
|
|
}
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
void hv_check_rendezvous(struct exc_info *ctx)
|
2021-09-21 04:33:36 +00:00
|
|
|
{
|
|
|
|
if (hv_want_cpu == smp_id()) {
|
|
|
|
hv_want_cpu = -1;
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_proxy(ctx, START_HV, HV_USER_INTERRUPT, NULL);
|
2021-09-21 04:33:36 +00:00
|
|
|
} else if (hv_want_cpu != -1) {
|
|
|
|
// Unlock the HV so the target CPU can get into the proxy
|
|
|
|
spin_unlock(&bhl);
|
|
|
|
while (hv_want_cpu != -1)
|
|
|
|
sysop("dmb sy");
|
|
|
|
spin_lock(&bhl);
|
|
|
|
// Make sure we tick at least once more before running the guest
|
|
|
|
hv_rearm();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
void hv_tick(struct exc_info *ctx)
|
2021-05-25 11:04:20 +00:00
|
|
|
{
|
2021-09-15 14:31:33 +00:00
|
|
|
if (hv_should_exit) {
|
|
|
|
spin_unlock(&bhl);
|
|
|
|
hv_exit_guest();
|
|
|
|
}
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_pet();
|
2021-05-27 16:24:29 +00:00
|
|
|
iodev_handle_events(uartproxy_iodev);
|
2021-09-21 04:33:36 +00:00
|
|
|
if (iodev_can_read(uartproxy_iodev)) {
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_proxy(ctx, START_HV, HV_USER_INTERRUPT, NULL);
|
2021-09-21 04:18:06 +00:00
|
|
|
}
|
2021-08-23 08:02:28 +00:00
|
|
|
hv_vuart_poll();
|
2021-05-25 11:04:20 +00:00
|
|
|
}
|