hle: kernel: KScheduler: Fix deadlock with core waiting for a thread lock that has migrated.

- Previously, it was possible for a thread migration to occur from core A to core B.
- Next, core B waits on a guest lock that must be released by a thread queued for core A.
- Meanwhile, core A is still waiting on the core B's current thread lock - resulting in a deadlock.
- Fix this by try-locking the thread lock.
- Fixes softlocks in FF8 and Pokemon Legends Arceus.
This commit is contained in:
bunnei 2022-01-27 12:17:14 -08:00
parent 8a244dd3d3
commit 3a1a3dd0db
2 changed files with 24 additions and 23 deletions

View file

@ -258,7 +258,7 @@ private:
private: private:
constexpr void ClearAffinityBit(u64& affinity, s32 core) { constexpr void ClearAffinityBit(u64& affinity, s32 core) {
affinity &= ~(u64(1) << core); affinity &= ~(UINT64_C(1) << core);
} }
constexpr s32 GetNextCore(u64& affinity) { constexpr s32 GetNextCore(u64& affinity) {

View file

@ -710,10 +710,7 @@ void KScheduler::Unload(KThread* thread) {
} }
void KScheduler::Reload(KThread* thread) { void KScheduler::Reload(KThread* thread) {
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr"); LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName());
if (thread) {
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext32());
@ -722,11 +719,10 @@ void KScheduler::Reload(KThread* thread) {
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
cpu_core.ClearExclusiveState(); cpu_core.ClearExclusiveState();
} }
}
void KScheduler::SwitchContextStep2() { void KScheduler::SwitchContextStep2() {
// Load context of new thread // Load context of new thread
Reload(current_thread.load()); Reload(GetCurrentThread());
RescheduleCurrentCore(); RescheduleCurrentCore();
} }
@ -735,13 +731,17 @@ void KScheduler::ScheduleImpl() {
KThread* previous_thread = GetCurrentThread(); KThread* previous_thread = GetCurrentThread();
KThread* next_thread = state.highest_priority_thread; KThread* next_thread = state.highest_priority_thread;
state.needs_scheduling = false; state.needs_scheduling.store(false);
// We never want to schedule a null thread, so use the idle thread if we don't have a next. // We never want to schedule a null thread, so use the idle thread if we don't have a next.
if (next_thread == nullptr) { if (next_thread == nullptr) {
next_thread = idle_thread; next_thread = idle_thread;
} }
if (next_thread->GetCurrentCore() != core_id) {
next_thread->SetCurrentCore(core_id);
}
// We never want to schedule a dummy thread, as these are only used by host threads for locking. // We never want to schedule a dummy thread, as these are only used by host threads for locking.
if (next_thread->GetThreadType() == ThreadType::Dummy) { if (next_thread->GetThreadType() == ThreadType::Dummy) {
ASSERT_MSG(false, "Dummy threads should never be scheduled!"); ASSERT_MSG(false, "Dummy threads should never be scheduled!");
@ -755,14 +755,8 @@ void KScheduler::ScheduleImpl() {
return; return;
} }
if (next_thread->GetCurrentCore() != core_id) { // Update the CPU time tracking variables.
next_thread->SetCurrentCore(core_id);
}
current_thread.store(next_thread);
KProcess* const previous_process = system.Kernel().CurrentProcess(); KProcess* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process); UpdateLastContextSwitchTime(previous_thread, previous_process);
// Save context for previous thread // Save context for previous thread
@ -770,6 +764,10 @@ void KScheduler::ScheduleImpl() {
std::shared_ptr<Common::Fiber>* old_context; std::shared_ptr<Common::Fiber>* old_context;
old_context = &previous_thread->GetHostContext(); old_context = &previous_thread->GetHostContext();
// Set the new thread.
current_thread.store(next_thread);
guard.Unlock(); guard.Unlock();
Common::Fiber::YieldTo(*old_context, *switch_fiber); Common::Fiber::YieldTo(*old_context, *switch_fiber);
@ -797,8 +795,8 @@ void KScheduler::SwitchToCurrent() {
do { do {
auto next_thread = current_thread.load(); auto next_thread = current_thread.load();
if (next_thread != nullptr) { if (next_thread != nullptr) {
next_thread->context_guard.Lock(); const auto locked = next_thread->context_guard.TryLock();
if (next_thread->GetRawState() != ThreadState::Runnable) { if (state.needs_scheduling.load()) {
next_thread->context_guard.Unlock(); next_thread->context_guard.Unlock();
break; break;
} }
@ -806,6 +804,9 @@ void KScheduler::SwitchToCurrent() {
next_thread->context_guard.Unlock(); next_thread->context_guard.Unlock();
break; break;
} }
if (!locked) {
continue;
}
} }
auto thread = next_thread ? next_thread : idle_thread; auto thread = next_thread ? next_thread : idle_thread;
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext()); Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());