hv_exc: Avoid delivering spurious HV-triggered IPIs to the guest

Signed-off-by: Hector Martin <marcan@marcan.st>
This commit is contained in:
Hector Martin 2021-09-21 13:33:36 +09:00
parent 098d394a5c
commit a16731e8b3
5 changed files with 50 additions and 9 deletions

View file

@ -215,6 +215,10 @@ void hv_rendezvous(void)
void hv_switch_cpu(int cpu)
{
if (cpu > MAX_CPUS || cpu < 0 || !hv_started_cpus[cpu]) {
printf("HV: CPU #%d is inactive or invalid\n", cpu);
return;
}
hv_rendezvous();
printf("HV: switching to CPU #%d\n", cpu);
hv_want_cpu = cpu;
@ -297,6 +301,22 @@ void hv_rearm(void)
msr(CNTP_CTL_EL0, CNTx_CTL_ENABLE);
}
void hv_check_rendezvous(u64 *regs)
{
if (hv_want_cpu == smp_id()) {
hv_want_cpu = -1;
hv_exc_proxy(regs, START_HV, HV_USER_INTERRUPT, NULL);
} else if (hv_want_cpu != -1) {
// Unlock the HV so the target CPU can get into the proxy
spin_unlock(&bhl);
while (hv_want_cpu != -1)
sysop("dmb sy");
spin_lock(&bhl);
// Make sure we tick at least once more before running the guest
hv_rearm();
}
}
void hv_tick(u64 *regs)
{
if (hv_should_exit) {
@ -305,8 +325,7 @@ void hv_tick(u64 *regs)
}
hv_wdt_pet();
iodev_handle_events(uartproxy_iodev);
if (hv_want_cpu == smp_id() || iodev_can_read(uartproxy_iodev)) {
hv_want_cpu = -1;
if (iodev_can_read(uartproxy_iodev)) {
hv_exc_proxy(regs, START_HV, HV_USER_INTERRUPT, NULL);
}
hv_vuart_poll();

View file

@ -94,6 +94,7 @@ void hv_rendezvous(void);
void hv_switch_cpu(int cpu);
void hv_arm_tick(void);
void hv_rearm(void);
void hv_check_rendezvous(u64 *regs);
void hv_tick(u64 *regs);
#endif

View file

@ -22,6 +22,7 @@ extern spinlock_t bhl;
#define D_PERCPU(t, x) t x[MAX_CPUS]
#define PERCPU(x) x[mrs(TPIDR_EL2)]
D_PERCPU(static bool, ipi_queued);
D_PERCPU(static bool, ipi_pending);
D_PERCPU(static bool, pmc_pending);
D_PERCPU(static u64, pmc_irq_mode);
@ -184,9 +185,25 @@ static bool hv_handle_msr(u64 *regs, u64 iss)
* don't do any wfis that assume otherwise in m1n1. */
SYSREG_PASS(SYS_IMP_APL_CYC_OVRD)
/* IPI handling */
SYSREG_PASS(SYS_IMP_APL_IPI_RR_LOCAL_EL1)
SYSREG_PASS(SYS_IMP_APL_IPI_RR_GLOBAL_EL1)
SYSREG_PASS(SYS_IMP_APL_IPI_CR_EL1)
case SYSREG_ISS(SYS_IMP_APL_IPI_RR_LOCAL_EL1): {
assert(!is_read);
u64 mpidr = (regs[rt] & 0xff) | (mrs(MPIDR_EL1) & 0xffff00);
msr(SYS_IMP_APL_IPI_RR_LOCAL_EL1, regs[rt]);
for (int i = 0; i < MAX_CPUS; i++)
if (mpidr == smp_get_mpidr(i))
ipi_queued[i] = true;
return true;
}
case SYSREG_ISS(SYS_IMP_APL_IPI_RR_GLOBAL_EL1):
assert(!is_read);
u64 mpidr = (regs[rt] & 0xff) | ((regs[rt] & 0xff0000) >> 8);
msr(SYS_IMP_APL_IPI_RR_LOCAL_EL1, regs[rt]);
for (int i = 0; i < MAX_CPUS; i++) {
if (mpidr == (smp_get_mpidr(i) & 0xffff))
ipi_queued[i] = true;
}
return true;
case SYSREG_ISS(SYS_IMP_APL_IPI_SR_EL1):
if (is_read)
regs[rt] = PERCPU(ipi_pending) ? IPI_SR_PENDING : 0;
@ -332,11 +349,14 @@ void hv_exc_fiq(u64 *regs)
}
if (mrs(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
hv_tick(regs);
PERCPU(ipi_pending) = true;
if (PERCPU(ipi_queued)) {
PERCPU(ipi_pending) = true;
PERCPU(ipi_queued) = false;
}
msr(SYS_IMP_APL_IPI_SR_EL1, IPI_SR_PENDING);
sysop("isb");
}
hv_check_rendezvous(regs);
// Handles guest timers
hv_exc_exit(regs);

View file

@ -168,7 +168,8 @@ void smp_start_secondaries(void)
void smp_send_ipi(int cpu)
{
msr(SYS_IMP_APL_IPI_RR_GLOBAL_EL1, spin_table[cpu].mpidr);
u64 mpidr = spin_table[cpu].mpidr;
msr(SYS_IMP_APL_IPI_RR_GLOBAL_EL1, (mpidr & 0xff) | ((mpidr & 0xff00) << 8));
}
void smp_call4(int cpu, void *func, u64 arg0, u64 arg1, u64 arg2, u64 arg3)
@ -220,7 +221,7 @@ bool smp_is_alive(int cpu)
return spin_table[cpu].flag;
}
int smp_get_mpidr(int cpu)
uint64_t smp_get_mpidr(int cpu)
{
return spin_table[cpu].mpidr;
}

View file

@ -25,7 +25,7 @@ void smp_call4(int cpu, void *func, u64 arg0, u64 arg1, u64 arg2, u64 arg3);
u64 smp_wait(int cpu);
bool smp_is_alive(int cpu);
int smp_get_mpidr(int cpu);
uint64_t smp_get_mpidr(int cpu);
u64 smp_get_release_addr(int cpu);
void smp_set_wfe_mode(bool new_mode);
void smp_send_ipi(int cpu);