2021-05-04 10:36:23 +00:00
|
|
|
/* SPDX-License-Identifier: MIT */
|
|
|
|
|
|
|
|
#include "hv.h"
|
|
|
|
#include "assert.h"
|
|
|
|
#include "cpu_regs.h"
|
|
|
|
#include "exception.h"
|
2021-09-15 14:22:42 +00:00
|
|
|
#include "smp.h"
|
2021-05-04 10:36:23 +00:00
|
|
|
#include "string.h"
|
2021-05-27 12:16:17 +00:00
|
|
|
#include "uart.h"
|
2021-05-04 10:36:23 +00:00
|
|
|
#include "uartproxy.h"
|
|
|
|
|
2021-09-21 04:17:00 +00:00
|
|
|
#define TIME_ACCOUNTING
|
2021-09-15 14:31:54 +00:00
|
|
|
|
2021-09-15 14:16:28 +00:00
|
|
|
extern spinlock_t bhl;
|
|
|
|
|
2021-05-25 11:08:35 +00:00
|
|
|
#define _SYSREG_ISS(_1, _2, op0, op1, CRn, CRm, op2) \
|
|
|
|
(((op0) << ESR_ISS_MSR_OP0_SHIFT) | ((op1) << ESR_ISS_MSR_OP1_SHIFT) | \
|
|
|
|
((CRn) << ESR_ISS_MSR_CRn_SHIFT) | ((CRm) << ESR_ISS_MSR_CRm_SHIFT) | \
|
|
|
|
((op2) << ESR_ISS_MSR_OP2_SHIFT))
|
|
|
|
#define SYSREG_ISS(...) _SYSREG_ISS(__VA_ARGS__)
|
|
|
|
|
2021-09-15 14:26:14 +00:00
|
|
|
#define D_PERCPU(t, x) t x[MAX_CPUS]
|
|
|
|
#define PERCPU(x) x[mrs(TPIDR_EL2)]
|
|
|
|
|
2021-09-21 04:33:36 +00:00
|
|
|
D_PERCPU(static bool, ipi_queued);
|
2021-09-15 14:26:14 +00:00
|
|
|
D_PERCPU(static bool, ipi_pending);
|
|
|
|
D_PERCPU(static bool, pmc_pending);
|
|
|
|
D_PERCPU(static u64, pmc_irq_mode);
|
|
|
|
|
|
|
|
D_PERCPU(static u64, exc_entry_pmcr0_cnt);
|
2021-05-25 11:08:35 +00:00
|
|
|
|
2021-05-04 15:24:52 +00:00
|
|
|
void hv_exit_guest(void) __attribute__((noreturn));
|
|
|
|
|
2021-05-27 15:38:42 +00:00
|
|
|
static u64 stolen_time = 0;
|
|
|
|
static u64 exc_entry_time;
|
|
|
|
|
2021-09-21 04:17:00 +00:00
|
|
|
extern u32 hv_cpus_in_guest;
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
void hv_exc_proxy(struct exc_info *ctx, uartproxy_boot_reason_t reason, uartproxy_exc_code_t type,
|
|
|
|
void *extra)
|
2021-05-04 10:36:23 +00:00
|
|
|
{
|
2021-09-21 12:09:23 +00:00
|
|
|
int from_el = FIELD_GET(SPSR_M, ctx->spsr) >> 2;
|
2021-05-04 15:27:21 +00:00
|
|
|
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('P');
|
|
|
|
|
2021-09-21 04:17:00 +00:00
|
|
|
#ifdef TIME_ACCOUNTING
|
|
|
|
/* Wait until all CPUs have entered the HV (max 1ms), to ensure they exit with an
|
|
|
|
* updated timer offset. */
|
|
|
|
hv_rendezvous();
|
|
|
|
u64 entry_time = mrs(CNTPCT_EL0);
|
|
|
|
#endif
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
ctx->elr_phys = hv_translate(ctx->elr, false, false);
|
|
|
|
ctx->far_phys = hv_translate(ctx->far, false, false);
|
|
|
|
ctx->sp_phys = hv_translate(from_el == 0 ? ctx->sp[0] : ctx->sp[1], false, false);
|
|
|
|
ctx->extra = extra;
|
2021-05-04 10:36:23 +00:00
|
|
|
|
|
|
|
struct uartproxy_msg_start start = {
|
2021-05-15 14:55:34 +00:00
|
|
|
.reason = reason,
|
2021-05-04 10:36:23 +00:00
|
|
|
.code = type,
|
2021-09-21 12:09:23 +00:00
|
|
|
.info = ctx,
|
2021-05-04 10:36:23 +00:00
|
|
|
};
|
|
|
|
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_suspend();
|
2021-05-04 10:36:23 +00:00
|
|
|
int ret = uartproxy_run(&start);
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_resume();
|
2021-05-04 10:36:23 +00:00
|
|
|
|
2021-05-04 15:24:52 +00:00
|
|
|
switch (ret) {
|
|
|
|
case EXC_RET_HANDLED:
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('p');
|
2021-09-21 04:17:00 +00:00
|
|
|
#ifdef TIME_ACCOUNTING
|
|
|
|
u64 lost = mrs(CNTPCT_EL0) - entry_time;
|
|
|
|
stolen_time += lost;
|
|
|
|
#endif
|
2021-05-04 15:24:52 +00:00
|
|
|
return;
|
|
|
|
case EXC_EXIT_GUEST:
|
2021-09-15 14:16:28 +00:00
|
|
|
spin_unlock(&bhl);
|
2021-05-04 15:24:52 +00:00
|
|
|
hv_exit_guest();
|
|
|
|
default:
|
|
|
|
printf("Guest exception not handled, rebooting.\n");
|
2021-09-21 12:09:23 +00:00
|
|
|
print_regs(ctx->regs, 0);
|
2021-05-04 18:21:48 +00:00
|
|
|
flush_and_reboot();
|
2021-05-04 10:36:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-25 11:08:35 +00:00
|
|
|
static void hv_update_fiq(void)
|
|
|
|
{
|
|
|
|
u64 hcr = mrs(HCR_EL2);
|
|
|
|
bool fiq_pending = false;
|
|
|
|
|
|
|
|
if (mrs(CNTP_CTL_EL02) == (CNTx_CTL_ISTATUS | CNTx_CTL_ENABLE)) {
|
|
|
|
fiq_pending = true;
|
2021-05-27 15:38:11 +00:00
|
|
|
reg_clr(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENA_ENA_P);
|
2021-05-25 11:08:35 +00:00
|
|
|
} else {
|
2021-05-27 15:38:11 +00:00
|
|
|
reg_set(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENA_ENA_P);
|
2021-05-25 11:08:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mrs(CNTV_CTL_EL02) == (CNTx_CTL_ISTATUS | CNTx_CTL_ENABLE)) {
|
|
|
|
fiq_pending = true;
|
2021-05-27 15:38:11 +00:00
|
|
|
reg_clr(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENA_ENA_V);
|
2021-05-25 11:08:35 +00:00
|
|
|
} else {
|
2021-05-27 15:38:11 +00:00
|
|
|
reg_set(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENA_ENA_V);
|
2021-05-25 11:08:35 +00:00
|
|
|
}
|
|
|
|
|
2021-09-15 14:26:14 +00:00
|
|
|
fiq_pending |= PERCPU(ipi_pending) || PERCPU(pmc_pending);
|
2021-05-25 11:08:35 +00:00
|
|
|
|
|
|
|
sysop("isb");
|
|
|
|
|
|
|
|
if ((hcr & HCR_VF) && !fiq_pending) {
|
|
|
|
hv_write_hcr(hcr & ~HCR_VF);
|
|
|
|
} else if (!(hcr & HCR_VF) && fiq_pending) {
|
|
|
|
hv_write_hcr(hcr | HCR_VF);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-27 15:37:48 +00:00
|
|
|
#define SYSREG_MAP(sr, to) \
|
|
|
|
case SYSREG_ISS(sr): \
|
|
|
|
if (is_read) \
|
|
|
|
regs[rt] = _mrs(sr_tkn(to)); \
|
|
|
|
else \
|
|
|
|
_msr(sr_tkn(to), regs[rt]); \
|
|
|
|
return true;
|
|
|
|
|
2021-05-25 11:08:35 +00:00
|
|
|
#define SYSREG_PASS(sr) \
|
|
|
|
case SYSREG_ISS(sr): \
|
|
|
|
if (is_read) \
|
|
|
|
regs[rt] = _mrs(sr_tkn(sr)); \
|
|
|
|
else \
|
|
|
|
_msr(sr_tkn(sr), regs[rt]); \
|
|
|
|
return true;
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
static bool hv_handle_msr(struct exc_info *ctx, u64 iss)
|
2021-05-25 11:08:35 +00:00
|
|
|
{
|
|
|
|
u64 reg = iss & (ESR_ISS_MSR_OP0 | ESR_ISS_MSR_OP2 | ESR_ISS_MSR_OP1 | ESR_ISS_MSR_CRn |
|
|
|
|
ESR_ISS_MSR_CRm);
|
|
|
|
u64 rt = FIELD_GET(ESR_ISS_MSR_Rt, iss);
|
|
|
|
bool is_read = iss & ESR_ISS_MSR_DIR;
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
u64 *regs = ctx->regs;
|
|
|
|
|
2021-05-25 11:08:35 +00:00
|
|
|
regs[31] = 0;
|
|
|
|
|
|
|
|
switch (reg) {
|
2021-05-27 17:36:06 +00:00
|
|
|
/* Some kind of timer */
|
|
|
|
SYSREG_PASS(sys_reg(3, 7, 15, 1, 1));
|
2021-05-27 15:37:48 +00:00
|
|
|
/* Noisy traps */
|
|
|
|
SYSREG_MAP(SYS_ACTLR_EL1, SYS_IMP_APL_ACTLR_EL12)
|
2021-05-29 18:37:12 +00:00
|
|
|
SYSREG_PASS(SYS_IMP_APL_HID4)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_EHID4)
|
2021-06-03 11:50:53 +00:00
|
|
|
/* pass through PMU handling */
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMCR1)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMCR2)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMCR3)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMCR4)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMESR0)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMESR1)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMSR)
|
|
|
|
#ifndef DEBUG_PMU_IRQ
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC0)
|
|
|
|
#endif
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC1)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC2)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC3)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC4)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC5)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC6)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC7)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC8)
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_PMC9)
|
2021-09-18 13:27:27 +00:00
|
|
|
/* Handle this one here because m1n1/Linux (will) use it for explicit cpuidle.
|
|
|
|
* We can pass it through; going into deep sleep doesn't break the HV since we
|
|
|
|
* don't do any wfis that assume otherwise in m1n1. */
|
|
|
|
SYSREG_PASS(SYS_IMP_APL_CYC_OVRD)
|
2021-05-25 11:08:35 +00:00
|
|
|
/* IPI handling */
|
2021-05-27 15:38:11 +00:00
|
|
|
SYSREG_PASS(SYS_IMP_APL_IPI_CR_EL1)
|
2021-09-21 04:33:36 +00:00
|
|
|
case SYSREG_ISS(SYS_IMP_APL_IPI_RR_LOCAL_EL1): {
|
|
|
|
assert(!is_read);
|
|
|
|
u64 mpidr = (regs[rt] & 0xff) | (mrs(MPIDR_EL1) & 0xffff00);
|
|
|
|
msr(SYS_IMP_APL_IPI_RR_LOCAL_EL1, regs[rt]);
|
|
|
|
for (int i = 0; i < MAX_CPUS; i++)
|
|
|
|
if (mpidr == smp_get_mpidr(i))
|
|
|
|
ipi_queued[i] = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case SYSREG_ISS(SYS_IMP_APL_IPI_RR_GLOBAL_EL1):
|
|
|
|
assert(!is_read);
|
|
|
|
u64 mpidr = (regs[rt] & 0xff) | ((regs[rt] & 0xff0000) >> 8);
|
|
|
|
msr(SYS_IMP_APL_IPI_RR_LOCAL_EL1, regs[rt]);
|
|
|
|
for (int i = 0; i < MAX_CPUS; i++) {
|
|
|
|
if (mpidr == (smp_get_mpidr(i) & 0xffff))
|
|
|
|
ipi_queued[i] = true;
|
|
|
|
}
|
|
|
|
return true;
|
2021-05-27 15:38:11 +00:00
|
|
|
case SYSREG_ISS(SYS_IMP_APL_IPI_SR_EL1):
|
2021-05-25 11:08:35 +00:00
|
|
|
if (is_read)
|
2021-09-15 14:26:14 +00:00
|
|
|
regs[rt] = PERCPU(ipi_pending) ? IPI_SR_PENDING : 0;
|
2021-05-25 11:08:35 +00:00
|
|
|
else if (regs[rt] & IPI_SR_PENDING)
|
2021-09-15 14:26:14 +00:00
|
|
|
PERCPU(ipi_pending) = false;
|
2021-05-25 11:08:35 +00:00
|
|
|
return true;
|
2021-06-03 11:50:53 +00:00
|
|
|
/* shadow the interrupt mode and state flag */
|
|
|
|
case SYSREG_ISS(SYS_IMP_APL_PMCR0):
|
|
|
|
if (is_read) {
|
2021-09-15 14:26:14 +00:00
|
|
|
u64 val = (mrs(SYS_IMP_APL_PMCR0) & ~PMCR0_IMODE_MASK) | PERCPU(pmc_irq_mode);
|
|
|
|
regs[rt] =
|
|
|
|
val | (PERCPU(pmc_pending) ? PMCR0_IACT : 0) | PERCPU(exc_entry_pmcr0_cnt);
|
2021-06-03 11:50:53 +00:00
|
|
|
} else {
|
2021-09-15 14:26:14 +00:00
|
|
|
PERCPU(pmc_pending) = !!(regs[rt] & PMCR0_IACT);
|
|
|
|
PERCPU(pmc_irq_mode) = regs[rt] & PMCR0_IMODE_MASK;
|
|
|
|
PERCPU(exc_entry_pmcr0_cnt) = regs[rt] & PMCR0_CNT_MASK;
|
|
|
|
msr(SYS_IMP_APL_PMCR0, regs[rt] & ~PERCPU(exc_entry_pmcr0_cnt));
|
2021-06-03 11:50:53 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
#ifdef DEBUG_PMU_IRQ
|
|
|
|
case SYSREG_ISS(SYS_IMP_APL_PMC0):
|
|
|
|
if (is_read) {
|
|
|
|
regs[rt] = mrs(SYS_IMP_APL_PMC0);
|
|
|
|
} else {
|
|
|
|
msr(SYS_IMP_APL_PMC0, regs[rt]);
|
|
|
|
printf("msr(SYS_IMP_APL_PMC0, 0x%04lx_%08lx)\n", regs[rt] >> 32,
|
|
|
|
regs[rt] & 0xFFFFFFFF);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
#endif
|
2021-08-14 07:27:16 +00:00
|
|
|
/* M1RACLES reg, handle here due to silly 12.0 "mitigation" */
|
|
|
|
case SYSREG_ISS(sys_reg(3, 5, 15, 10, 1)):
|
|
|
|
if (is_read)
|
|
|
|
regs[rt] = 0;
|
|
|
|
return true;
|
2021-05-25 11:08:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
static void hv_exc_entry(struct exc_info *ctx)
|
2021-05-27 15:38:42 +00:00
|
|
|
{
|
2021-09-21 12:09:23 +00:00
|
|
|
ctx->spsr = hv_get_spsr();
|
|
|
|
ctx->elr = hv_get_elr();
|
|
|
|
ctx->esr = hv_get_esr();
|
|
|
|
ctx->far = hv_get_far();
|
|
|
|
ctx->afsr1 = hv_get_afsr1();
|
|
|
|
ctx->sp[0] = mrs(SP_EL0);
|
|
|
|
ctx->sp[1] = mrs(SP_EL1);
|
|
|
|
ctx->sp[2] = (u64)ctx;
|
|
|
|
ctx->cpu_id = smp_id();
|
|
|
|
ctx->mpidr = mrs(MPIDR_EL1);
|
|
|
|
|
2021-09-21 12:15:05 +00:00
|
|
|
sysop("isb");
|
|
|
|
|
2021-09-21 13:17:52 +00:00
|
|
|
// Enable SErrors in the HV, but only if not already pending
|
|
|
|
if (!(mrs(ISR_EL1) & 0x100))
|
|
|
|
sysop("msr daifclr, 4");
|
2021-09-21 12:15:05 +00:00
|
|
|
|
2021-09-21 04:17:00 +00:00
|
|
|
__atomic_sub_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
|
2021-09-15 14:16:28 +00:00
|
|
|
spin_lock(&bhl);
|
2021-05-27 15:38:42 +00:00
|
|
|
hv_wdt_breadcrumb('X');
|
|
|
|
exc_entry_time = mrs(CNTPCT_EL0);
|
2021-06-05 10:37:37 +00:00
|
|
|
/* disable PMU counters in the hypervisor */
|
|
|
|
u64 pmcr0 = mrs(SYS_IMP_APL_PMCR0);
|
2021-09-15 14:26:14 +00:00
|
|
|
PERCPU(exc_entry_pmcr0_cnt) = pmcr0 & PMCR0_CNT_MASK;
|
2021-06-05 10:37:37 +00:00
|
|
|
msr(SYS_IMP_APL_PMCR0, pmcr0 & ~PMCR0_CNT_MASK);
|
2021-05-27 15:38:42 +00:00
|
|
|
}
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
static void hv_exc_exit(struct exc_info *ctx)
|
2021-05-25 11:07:02 +00:00
|
|
|
{
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('x');
|
2021-05-25 11:08:35 +00:00
|
|
|
hv_update_fiq();
|
2021-09-15 14:26:14 +00:00
|
|
|
/* reenable PMU counters */
|
|
|
|
reg_set(SYS_IMP_APL_PMCR0, PERCPU(exc_entry_pmcr0_cnt));
|
2021-05-27 15:38:42 +00:00
|
|
|
msr(CNTVOFF_EL2, stolen_time);
|
2021-09-15 14:16:28 +00:00
|
|
|
spin_unlock(&bhl);
|
2021-09-21 04:17:00 +00:00
|
|
|
__atomic_add_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
|
2021-09-21 12:09:23 +00:00
|
|
|
|
|
|
|
hv_set_spsr(ctx->spsr);
|
|
|
|
hv_set_elr(ctx->elr);
|
|
|
|
msr(SP_EL0, ctx->sp[0]);
|
|
|
|
msr(SP_EL1, ctx->sp[1]);
|
2021-05-25 11:07:02 +00:00
|
|
|
}
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
void hv_exc_sync(struct exc_info *ctx)
|
2021-05-04 10:36:23 +00:00
|
|
|
{
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('S');
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_entry(ctx);
|
2021-05-25 10:57:46 +00:00
|
|
|
bool handled = false;
|
2021-09-21 12:09:23 +00:00
|
|
|
u32 ec = FIELD_GET(ESR_EC, ctx->esr);
|
2021-05-04 10:36:23 +00:00
|
|
|
|
|
|
|
switch (ec) {
|
|
|
|
case ESR_EC_DABORT_LOWER:
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('D');
|
2021-09-21 12:09:23 +00:00
|
|
|
handled = hv_handle_dabort(ctx);
|
2021-05-04 10:36:23 +00:00
|
|
|
break;
|
2021-05-25 11:08:35 +00:00
|
|
|
case ESR_EC_MSR:
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('M');
|
2021-09-21 12:09:23 +00:00
|
|
|
handled = hv_handle_msr(ctx, FIELD_GET(ESR_ISS, ctx->esr));
|
2021-05-25 11:08:35 +00:00
|
|
|
break;
|
|
|
|
case ESR_EC_IMPDEF:
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('A');
|
2021-09-21 12:09:23 +00:00
|
|
|
switch (FIELD_GET(ESR_ISS, ctx->esr)) {
|
2021-05-25 11:08:35 +00:00
|
|
|
case ESR_ISS_IMPDEF_MSR:
|
2021-09-21 12:09:23 +00:00
|
|
|
handled = hv_handle_msr(ctx, ctx->afsr1);
|
2021-05-25 11:08:35 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
2021-05-04 10:36:23 +00:00
|
|
|
}
|
|
|
|
|
2021-05-27 12:16:17 +00:00
|
|
|
if (handled) {
|
|
|
|
hv_wdt_breadcrumb('+');
|
2021-09-21 12:09:23 +00:00
|
|
|
ctx->elr += 4;
|
2021-05-27 12:16:17 +00:00
|
|
|
} else {
|
|
|
|
hv_wdt_breadcrumb('-');
|
2021-09-21 12:10:44 +00:00
|
|
|
// VM code can forward a nested SError exception here
|
|
|
|
if (FIELD_GET(ESR_EC, ctx->esr) == ESR_EC_SERROR)
|
|
|
|
hv_exc_proxy(ctx, START_EXCEPTION_LOWER, EXC_SERROR, NULL);
|
|
|
|
else
|
|
|
|
hv_exc_proxy(ctx, START_EXCEPTION_LOWER, EXC_SYNC, NULL);
|
2021-05-27 12:16:17 +00:00
|
|
|
}
|
2021-05-25 11:07:02 +00:00
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_exit(ctx);
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('s');
|
2021-05-04 10:36:23 +00:00
|
|
|
}
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
void hv_exc_irq(struct exc_info *ctx)
|
2021-05-04 10:36:23 +00:00
|
|
|
{
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('I');
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_entry(ctx);
|
|
|
|
hv_exc_proxy(ctx, START_EXCEPTION_LOWER, EXC_IRQ, NULL);
|
|
|
|
hv_exc_exit(ctx);
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('i');
|
2021-05-04 10:36:23 +00:00
|
|
|
}
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
void hv_exc_fiq(struct exc_info *ctx)
|
2021-05-04 10:36:23 +00:00
|
|
|
{
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('F');
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_entry(ctx);
|
2021-05-25 11:04:20 +00:00
|
|
|
if (mrs(CNTP_CTL_EL0) == (CNTx_CTL_ISTATUS | CNTx_CTL_ENABLE)) {
|
|
|
|
msr(CNTP_CTL_EL0, CNTx_CTL_ISTATUS | CNTx_CTL_IMASK | CNTx_CTL_ENABLE);
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_tick(ctx);
|
2021-05-25 11:04:20 +00:00
|
|
|
hv_arm_tick();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mrs(CNTV_CTL_EL0) == (CNTx_CTL_ISTATUS | CNTx_CTL_ENABLE)) {
|
|
|
|
msr(CNTV_CTL_EL0, CNTx_CTL_ISTATUS | CNTx_CTL_IMASK | CNTx_CTL_ENABLE);
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_proxy(ctx, START_HV, HV_VTIMER, NULL);
|
2021-05-25 11:04:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u64 reg = mrs(SYS_IMP_APL_PMCR0);
|
|
|
|
if ((reg & (PMCR0_IMODE_MASK | PMCR0_IACT)) == (PMCR0_IMODE_FIQ | PMCR0_IACT)) {
|
2021-06-03 11:50:53 +00:00
|
|
|
#ifdef DEBUG_PMU_IRQ
|
|
|
|
printf("[FIQ] PMC IRQ, masking and delivering to the guest\n");
|
|
|
|
#endif
|
2021-05-25 11:04:20 +00:00
|
|
|
reg_clr(SYS_IMP_APL_PMCR0, PMCR0_IACT | PMCR0_IMODE_MASK);
|
2021-09-15 14:26:14 +00:00
|
|
|
PERCPU(pmc_pending) = true;
|
2021-05-25 11:04:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
reg = mrs(SYS_IMP_APL_UPMCR0);
|
|
|
|
if ((reg & UPMCR0_IMODE_MASK) == UPMCR0_IMODE_FIQ && (mrs(SYS_IMP_APL_UPMSR) & UPMSR_IACT)) {
|
|
|
|
printf("[FIQ] UPMC IRQ, masking");
|
|
|
|
reg_clr(SYS_IMP_APL_UPMCR0, UPMCR0_IMODE_MASK);
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_proxy(ctx, START_EXCEPTION_LOWER, EXC_FIQ, NULL);
|
2021-05-25 11:04:20 +00:00
|
|
|
}
|
2021-05-25 11:08:35 +00:00
|
|
|
|
2021-05-27 15:38:11 +00:00
|
|
|
if (mrs(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
|
2021-09-21 04:33:36 +00:00
|
|
|
if (PERCPU(ipi_queued)) {
|
|
|
|
PERCPU(ipi_pending) = true;
|
|
|
|
PERCPU(ipi_queued) = false;
|
|
|
|
}
|
2021-05-27 15:38:11 +00:00
|
|
|
msr(SYS_IMP_APL_IPI_SR_EL1, IPI_SR_PENDING);
|
2021-05-25 11:08:35 +00:00
|
|
|
sysop("isb");
|
|
|
|
}
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_check_rendezvous(ctx);
|
2021-05-25 11:08:35 +00:00
|
|
|
|
|
|
|
// Handles guest timers
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_exit(ctx);
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('f');
|
2021-05-04 10:36:23 +00:00
|
|
|
}
|
|
|
|
|
2021-09-21 12:09:23 +00:00
|
|
|
void hv_exc_serr(struct exc_info *ctx)
|
2021-05-04 10:36:23 +00:00
|
|
|
{
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('E');
|
2021-09-21 12:09:23 +00:00
|
|
|
hv_exc_entry(ctx);
|
|
|
|
hv_exc_proxy(ctx, START_EXCEPTION_LOWER, EXC_SERROR, NULL);
|
|
|
|
hv_exc_exit(ctx);
|
2021-05-27 12:16:17 +00:00
|
|
|
hv_wdt_breadcrumb('e');
|
2021-05-04 10:36:23 +00:00
|
|
|
}
|