hv_exc: New time accounting around Python callbacks

This does an explicit hypervisor rendezvous. It's not great because it
introduces spurious guest IPIs, but xnu doesn't seem to care...

Signed-off-by: Hector Martin <marcan@marcan.st>
This commit is contained in:
Hector Martin 2021-09-21 13:17:00 +09:00
parent 5feae51f25
commit 977cbdf4f8
5 changed files with 48 additions and 8 deletions

View file

@ -24,6 +24,7 @@ u64 hv_tick_interval;
static bool hv_should_exit;
bool hv_started_cpus[MAX_CPUS];
u32 hv_cpus_in_guest;
u64 hv_saved_sp[MAX_CPUS];
void hv_init(void)
@ -69,6 +70,7 @@ void hv_start(void *entry, u64 regs[4])
{
hv_should_exit = false;
memset(hv_started_cpus, 0, sizeof(hv_started_cpus));
hv_started_cpus[0] = 1;
msr(VBAR_EL1, _hv_vectors_start);
@ -76,8 +78,11 @@ void hv_start(void *entry, u64 regs[4])
gl2_call(hv_set_gxf_vbar, 0, 0, 0, 0);
hv_arm_tick();
hv_cpus_in_guest = 1;
hv_enter_guest(regs[0], regs[1], regs[2], regs[3], entry);
__atomic_sub_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
spin_lock(&bhl);
hv_wdt_stop();
@ -149,6 +154,8 @@ static void hv_enter_secondary(void *entry, u64 regs[4])
hv_should_exit = true;
printf("HV: Exiting from CPU %d\n", smp_id());
__atomic_sub_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
spin_unlock(&bhl);
}
@ -182,11 +189,27 @@ void hv_start_secondary(int cpu, void *entry, u64 regs[4])
printf("HV: Entering guest secondary %d at %p\n", cpu, entry);
hv_started_cpus[cpu] = true;
__atomic_add_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
iodev_console_flush();
smp_call4(cpu, hv_enter_secondary, (u64)entry, (u64)regs, 0, 0);
}
void hv_rendezvous(void)
{
if (!__atomic_load_n(&hv_cpus_in_guest, __ATOMIC_ACQUIRE))
return;
/* IPI all CPUs. This might result in spurious IPIs to the guest... */
for (int i = 0; i < MAX_CPUS; i++) {
if (i != smp_id() && hv_started_cpus[i]) {
smp_send_ipi(i);
}
}
while (__atomic_load_n(&hv_cpus_in_guest, __ATOMIC_ACQUIRE))
;
}
void hv_write_hcr(u64 val)
{
if (gxf_enabled() && !in_gl12())

View file

@ -90,6 +90,7 @@ void hv_set_elr(u64 val);
void hv_init(void);
void hv_start(void *entry, u64 regs[4]);
void hv_start_secondary(int cpu, void *entry, u64 regs[4]);
void hv_rendezvous(void);
void hv_arm_tick(void);
void hv_tick(u64 *regs);

View file

@ -9,7 +9,7 @@
#include "uart.h"
#include "uartproxy.h"
//#define TIME_ACCOUNTING
#define TIME_ACCOUNTING
extern spinlock_t bhl;
@ -33,12 +33,21 @@ void hv_exit_guest(void) __attribute__((noreturn));
static u64 stolen_time = 0;
static u64 exc_entry_time;
extern u32 hv_cpus_in_guest;
void hv_exc_proxy(u64 *regs, uartproxy_boot_reason_t reason, uartproxy_exc_code_t type, void *extra)
{
int from_el = FIELD_GET(SPSR_M, hv_get_spsr()) >> 2;
hv_wdt_breadcrumb('P');
#ifdef TIME_ACCOUNTING
/* Wait until all CPUs have entered the HV (max 1ms), to ensure they exit with an
* updated timer offset. */
hv_rendezvous();
u64 entry_time = mrs(CNTPCT_EL0);
#endif
struct uartproxy_exc_info exc_info = {
.cpu_id = smp_id(),
.spsr = hv_get_spsr(),
@ -73,6 +82,10 @@ void hv_exc_proxy(u64 *regs, uartproxy_boot_reason_t reason, uartproxy_exc_code_
msr(SP_EL0, exc_info.sp[0]);
msr(SP_EL1, exc_info.sp[1]);
hv_wdt_breadcrumb('p');
#ifdef TIME_ACCOUNTING
u64 lost = mrs(CNTPCT_EL0) - entry_time;
stolen_time += lost;
#endif
return;
case EXC_EXIT_GUEST:
spin_unlock(&bhl);
@ -217,6 +230,7 @@ static bool hv_handle_msr(u64 *regs, u64 iss)
static void hv_exc_entry(u64 *regs)
{
UNUSED(regs);
__atomic_sub_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
spin_lock(&bhl);
hv_wdt_breadcrumb('X');
exc_entry_time = mrs(CNTPCT_EL0);
@ -233,13 +247,9 @@ static void hv_exc_exit(u64 *regs)
hv_update_fiq();
/* reenable PMU counters */
reg_set(SYS_IMP_APL_PMCR0, PERCPU(exc_entry_pmcr0_cnt));
#ifdef TIME_ACCOUNTING
u64 lost = mrs(CNTPCT_EL0) - exc_entry_time;
if (lost > 8)
stolen_time += lost - 8;
#endif
msr(CNTVOFF_EL2, stolen_time);
spin_unlock(&bhl);
__atomic_add_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
}
void hv_exc_sync(u64 *regs)

View file

@ -166,6 +166,11 @@ void smp_start_secondaries(void)
spin_table[0].mpidr = mrs(MPIDR_EL1) & 0xFFFFFF;
}
void smp_send_ipi(int cpu)
{
msr(SYS_IMP_APL_IPI_RR_GLOBAL_EL1, spin_table[cpu].mpidr);
}
void smp_call4(int cpu, void *func, u64 arg0, u64 arg1, u64 arg2, u64 arg3)
{
struct spin_table *target = &spin_table[cpu];
@ -182,7 +187,7 @@ void smp_call4(int cpu, void *func, u64 arg0, u64 arg1, u64 arg2, u64 arg3)
target->target = (u64)func;
sysop("dmb sy");
msr(SYS_IMP_APL_IPI_RR_GLOBAL_EL1, spin_table[cpu].mpidr);
smp_send_ipi(cpu);
while (target->flag == flag)
sysop("dmb sy");
@ -205,7 +210,7 @@ void smp_set_wfe_mode(bool new_mode)
for (int cpu = 1; cpu < MAX_CPUS; cpu++)
if (smp_is_alive(cpu))
msr(SYS_IMP_APL_IPI_RR_GLOBAL_EL1, spin_table[cpu].mpidr);
smp_send_ipi(cpu);
sysop("sev");
}

View file

@ -28,6 +28,7 @@ bool smp_is_alive(int cpu);
int smp_get_mpidr(int cpu);
u64 smp_get_release_addr(int cpu);
void smp_set_wfe_mode(bool new_mode);
void smp_send_ipi(int cpu);
static inline int smp_id(void)
{