mirror of
https://github.com/yuzu-mirror/yuzu
synced 2024-11-27 23:00:28 +00:00
NVDRV: Further refactors and eliminate old code.
This commit is contained in:
parent
2931101e6f
commit
920429fde7
18 changed files with 12 additions and 242 deletions
|
@ -138,8 +138,6 @@ add_library(core STATIC
|
|||
frontend/emu_window.h
|
||||
frontend/framebuffer_layout.cpp
|
||||
frontend/framebuffer_layout.h
|
||||
hardware_interrupt_manager.cpp
|
||||
hardware_interrupt_manager.h
|
||||
hid/emulated_console.cpp
|
||||
hid/emulated_console.h
|
||||
hid/emulated_controller.cpp
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "core/file_sys/savedata_factory.h"
|
||||
#include "core/file_sys/vfs_concat.h"
|
||||
#include "core/file_sys/vfs_real.h"
|
||||
#include "core/hardware_interrupt_manager.h"
|
||||
#include "core/hid/hid_core.h"
|
||||
#include "core/hle/kernel/k_memory_manager.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
|
@ -226,7 +225,6 @@ struct System::Impl {
|
|||
|
||||
service_manager = std::make_shared<Service::SM::ServiceManager>(kernel);
|
||||
services = std::make_unique<Service::Services>(service_manager, system);
|
||||
interrupt_manager = std::make_unique<Hardware::InterruptManager>(system);
|
||||
|
||||
// Initialize time manager, which must happen after kernel is created
|
||||
time_manager.Initialize();
|
||||
|
@ -454,7 +452,6 @@ struct System::Impl {
|
|||
std::unique_ptr<Loader::AppLoader> app_loader;
|
||||
std::unique_ptr<Tegra::GPU> gpu_core;
|
||||
std::unique_ptr<Tegra::Host1x::Host1x> host1x_core;
|
||||
std::unique_ptr<Hardware::InterruptManager> interrupt_manager;
|
||||
std::unique_ptr<Core::DeviceMemory> device_memory;
|
||||
std::unique_ptr<AudioCore::AudioCore> audio_core;
|
||||
Core::Memory::Memory memory;
|
||||
|
@ -680,14 +677,6 @@ const Tegra::Host1x::Host1x& System::Host1x() const {
|
|||
return *impl->host1x_core;
|
||||
}
|
||||
|
||||
Core::Hardware::InterruptManager& System::InterruptManager() {
|
||||
return *impl->interrupt_manager;
|
||||
}
|
||||
|
||||
const Core::Hardware::InterruptManager& System::InterruptManager() const {
|
||||
return *impl->interrupt_manager;
|
||||
}
|
||||
|
||||
VideoCore::RendererBase& System::Renderer() {
|
||||
return impl->gpu_core->Renderer();
|
||||
}
|
||||
|
|
|
@ -91,10 +91,6 @@ namespace Core::Timing {
|
|||
class CoreTiming;
|
||||
}
|
||||
|
||||
namespace Core::Hardware {
|
||||
class InterruptManager;
|
||||
}
|
||||
|
||||
namespace Core::HID {
|
||||
class HIDCore;
|
||||
}
|
||||
|
@ -305,12 +301,6 @@ public:
|
|||
/// Provides a constant reference to the core timing instance.
|
||||
[[nodiscard]] const Timing::CoreTiming& CoreTiming() const;
|
||||
|
||||
/// Provides a reference to the interrupt manager instance.
|
||||
[[nodiscard]] Core::Hardware::InterruptManager& InterruptManager();
|
||||
|
||||
/// Provides a constant reference to the interrupt manager instance.
|
||||
[[nodiscard]] const Core::Hardware::InterruptManager& InterruptManager() const;
|
||||
|
||||
/// Provides a reference to the kernel instance.
|
||||
[[nodiscard]] Kernel::KernelCore& Kernel();
|
||||
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/hardware_interrupt_manager.h"
|
||||
#include "core/hle/service/nvdrv/nvdrv_interface.h"
|
||||
#include "core/hle/service/sm/sm.h"
|
||||
|
||||
namespace Core::Hardware {
|
||||
|
||||
InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
|
||||
gpu_interrupt_event = Core::Timing::CreateEvent(
|
||||
"GPUInterrupt",
|
||||
[this](std::uintptr_t message, u64 time,
|
||||
std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
|
||||
auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
|
||||
const u32 syncpt = static_cast<u32>(message >> 32);
|
||||
const u32 value = static_cast<u32>(message);
|
||||
nvdrv->SignalGPUInterruptSyncpt(syncpt, value);
|
||||
return std::nullopt;
|
||||
});
|
||||
}
|
||||
|
||||
InterruptManager::~InterruptManager() = default;
|
||||
|
||||
void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||
const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value;
|
||||
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg);
|
||||
}
|
||||
|
||||
} // namespace Core::Hardware
|
|
@ -1,32 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Core::Timing {
|
||||
struct EventType;
|
||||
}
|
||||
|
||||
namespace Core::Hardware {
|
||||
|
||||
class InterruptManager {
|
||||
public:
|
||||
explicit InterruptManager(Core::System& system);
|
||||
~InterruptManager();
|
||||
|
||||
void GPUInterruptSyncpt(u32 syncpoint_id, u32 value);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event;
|
||||
};
|
||||
|
||||
} // namespace Core::Hardware
|
|
@ -77,12 +77,9 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>&
|
|||
return NvResult::NotImplemented;
|
||||
}
|
||||
|
||||
void nvhost_ctrl::OnOpen(DeviceFD fd) {
|
||||
events_interface.RegisterForSignal(this);
|
||||
}
|
||||
void nvhost_ctrl::OnClose(DeviceFD fd) {
|
||||
events_interface.UnregisterForSignal(this);
|
||||
}
|
||||
void nvhost_ctrl::OnOpen(DeviceFD fd) {}
|
||||
|
||||
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
||||
|
||||
NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||
IocGetConfigParams params{};
|
||||
|
@ -395,21 +392,4 @@ u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void nvhost_ctrl::SignalNvEvent(u32 syncpoint_id, u32 value) {
|
||||
u64 signal_mask = events_mask;
|
||||
while (signal_mask != 0) {
|
||||
const u64 event_id = std::countr_zero(signal_mask);
|
||||
signal_mask &= ~(1ULL << event_id);
|
||||
auto& event = events[event_id];
|
||||
if (event.assigned_syncpt != syncpoint_id || event.assigned_value != value) {
|
||||
continue;
|
||||
}
|
||||
if (event.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
|
||||
EventState::Waiting) {
|
||||
event.kevent->GetWritableEvent().Signal();
|
||||
}
|
||||
event.status.store(EventState::Signalled, std::memory_order_release);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Service::Nvidia::Devices
|
||||
|
|
|
@ -56,8 +56,6 @@ public:
|
|||
};
|
||||
static_assert(sizeof(SyncpointEventValue) == sizeof(u32));
|
||||
|
||||
void SignalNvEvent(u32 syncpoint_id, u32 value);
|
||||
|
||||
private:
|
||||
struct InternalEvent {
|
||||
// Mask representing registered events
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include "video_core/control/channel_state.h"
|
||||
#include "video_core/engines/puller.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/host1x/host1x.h"
|
||||
|
||||
namespace Service::Nvidia::Devices {
|
||||
namespace {
|
||||
|
@ -31,7 +32,8 @@ nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
|
|||
syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
|
||||
channel_state{system.GPU().AllocateChannel()} {
|
||||
channel_fence.id = syncpoint_manager.AllocateSyncpoint();
|
||||
channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id);
|
||||
channel_fence.value =
|
||||
system_.Host1x().GetSyncpointManager().GetGuestSyncpointValue(channel_fence.id);
|
||||
sm_exception_breakpoint_int_report_event =
|
||||
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
|
||||
sm_exception_breakpoint_pause_report_event =
|
||||
|
@ -189,7 +191,8 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
|
|||
}
|
||||
|
||||
system.GPU().InitChannel(*channel_state);
|
||||
channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
|
||||
channel_fence.value =
|
||||
system.Host1x().GetSyncpointManager().GetGuestSyncpointValue(channel_fence.id);
|
||||
|
||||
params.fence_out = channel_fence;
|
||||
|
||||
|
|
|
@ -33,23 +33,6 @@ EventInterface::EventInterface(Module& module_) : module{module_}, guard{}, on_s
|
|||
|
||||
EventInterface::~EventInterface() = default;
|
||||
|
||||
void EventInterface::RegisterForSignal(Devices::nvhost_ctrl* device) {
|
||||
std::unique_lock<std::mutex> lk(guard);
|
||||
on_signal.push_back(device);
|
||||
}
|
||||
|
||||
void EventInterface::UnregisterForSignal(Devices::nvhost_ctrl* device) {
|
||||
std::unique_lock<std::mutex> lk(guard);
|
||||
on_signal.remove(device);
|
||||
}
|
||||
|
||||
void EventInterface::Signal(u32 syncpoint_id, u32 value) {
|
||||
std::unique_lock<std::mutex> lk(guard);
|
||||
for (auto* device : on_signal) {
|
||||
device->SignalNvEvent(syncpoint_id, value);
|
||||
}
|
||||
}
|
||||
|
||||
Kernel::KEvent* EventInterface::CreateEvent(std::string name) {
|
||||
Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name));
|
||||
return new_event;
|
||||
|
@ -221,10 +204,6 @@ NvResult Module::Close(DeviceFD fd) {
|
|||
return NvResult::Success;
|
||||
}
|
||||
|
||||
void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||
events_interface.Signal(syncpoint_id, value);
|
||||
}
|
||||
|
||||
NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) {
|
||||
if (fd < 0) {
|
||||
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
|
||||
|
|
|
@ -49,11 +49,6 @@ public:
|
|||
EventInterface(Module& module_);
|
||||
~EventInterface();
|
||||
|
||||
void RegisterForSignal(Devices::nvhost_ctrl*);
|
||||
void UnregisterForSignal(Devices::nvhost_ctrl*);
|
||||
|
||||
void Signal(u32 syncpoint_id, u32 value);
|
||||
|
||||
Kernel::KEvent* CreateEvent(std::string name);
|
||||
|
||||
void FreeEvent(Kernel::KEvent* event);
|
||||
|
@ -96,8 +91,6 @@ public:
|
|||
/// Closes a device file descriptor and returns operation success.
|
||||
NvResult Close(DeviceFD fd);
|
||||
|
||||
void SignalSyncpt(const u32 syncpoint_id, const u32 value);
|
||||
|
||||
NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
|
||||
|
||||
private:
|
||||
|
|
|
@ -15,10 +15,6 @@
|
|||
|
||||
namespace Service::Nvidia {
|
||||
|
||||
void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
|
||||
nvdrv->SignalSyncpt(syncpoint_id, value);
|
||||
}
|
||||
|
||||
void NVDRV::Open(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_NVDRV, "called");
|
||||
IPC::ResponseBuilder rb{ctx, 4};
|
||||
|
|
|
@ -18,8 +18,6 @@ public:
|
|||
explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name);
|
||||
~NVDRV() override;
|
||||
|
||||
void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value);
|
||||
|
||||
private:
|
||||
void Open(Kernel::HLERequestContext& ctx);
|
||||
void Ioctl1(Kernel::HLERequestContext& ctx);
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "video_core/command_classes/host1x.h"
|
||||
#include "video_core/gpu.h"
|
||||
|
||||
Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {}
|
||||
|
||||
Tegra::Host1x::~Host1x() = default;
|
||||
|
||||
void Tegra::Host1x::ProcessMethod(Method method, u32 argument) {
|
||||
switch (method) {
|
||||
case Method::LoadSyncptPayload32:
|
||||
syncpoint_value = argument;
|
||||
break;
|
||||
case Method::WaitSyncpt:
|
||||
case Method::WaitSyncpt32:
|
||||
Execute(argument);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast<u32>(method));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void Tegra::Host1x::Execute(u32 data) {
|
||||
gpu.WaitFence(data, syncpoint_value);
|
||||
}
|
|
@ -14,7 +14,6 @@
|
|||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/frontend/emu_window.h"
|
||||
#include "core/hardware_interrupt_manager.h"
|
||||
#include "core/hle/service/nvdrv/nvdata.h"
|
||||
#include "core/perf_stats.h"
|
||||
#include "video_core/cdma_pusher.h"
|
||||
|
@ -36,8 +35,6 @@
|
|||
|
||||
namespace Tegra {
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||
|
||||
struct GPU::Impl {
|
||||
explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
|
||||
: gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
|
||||
|
@ -197,30 +194,6 @@ struct GPU::Impl {
|
|||
return *shader_notify;
|
||||
}
|
||||
|
||||
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
|
||||
void WaitFence(u32 syncpoint_id, u32 value) {
|
||||
if (syncpoint_id == UINT32_MAX) {
|
||||
return;
|
||||
}
|
||||
MICROPROFILE_SCOPE(GPU_wait);
|
||||
host1x.GetSyncpointManager().WaitHost(syncpoint_id, value);
|
||||
}
|
||||
|
||||
void IncrementSyncPoint(u32 syncpoint_id) {
|
||||
host1x.GetSyncpointManager().IncrementHost(syncpoint_id);
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const {
|
||||
return host1x.GetSyncpointManager().GetHostSyncpointValue(syncpoint_id);
|
||||
}
|
||||
|
||||
void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
|
||||
auto& syncpoint_manager = host1x.GetSyncpointManager();
|
||||
syncpoint_manager.RegisterHostAction(syncpoint_id, value, [this, syncpoint_id, value]() {
|
||||
TriggerCpuInterrupt(syncpoint_id, value);
|
||||
});
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 GetTicks() const {
|
||||
// This values were reversed engineered by fincs from NVN
|
||||
// The gpu clock is reported in units of 385/625 nanoseconds
|
||||
|
@ -322,11 +295,6 @@ struct GPU::Impl {
|
|||
gpu_thread.FlushAndInvalidateRegion(addr, size);
|
||||
}
|
||||
|
||||
void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const {
|
||||
auto& interrupt_manager = system.InterruptManager();
|
||||
interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value);
|
||||
}
|
||||
|
||||
void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
|
||||
Service::Nvidia::NvFence* fences, size_t num_fences) {
|
||||
size_t current_request_counter{};
|
||||
|
@ -524,22 +492,6 @@ void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
|
|||
impl->RequestSwapBuffers(framebuffer, fences, num_fences);
|
||||
}
|
||||
|
||||
void GPU::WaitFence(u32 syncpoint_id, u32 value) {
|
||||
impl->WaitFence(syncpoint_id, value);
|
||||
}
|
||||
|
||||
void GPU::IncrementSyncPoint(u32 syncpoint_id) {
|
||||
impl->IncrementSyncPoint(syncpoint_id);
|
||||
}
|
||||
|
||||
u32 GPU::GetSyncpointValue(u32 syncpoint_id) const {
|
||||
return impl->GetSyncpointValue(syncpoint_id);
|
||||
}
|
||||
|
||||
void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
|
||||
impl->RegisterSyncptInterrupt(syncpoint_id, value);
|
||||
}
|
||||
|
||||
u64 GPU::GetTicks() const {
|
||||
return impl->GetTicks();
|
||||
}
|
||||
|
|
|
@ -171,15 +171,6 @@ public:
|
|||
/// Returns a const reference to the shader notifier.
|
||||
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
|
||||
|
||||
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
|
||||
void WaitFence(u32 syncpoint_id, u32 value);
|
||||
|
||||
void IncrementSyncPoint(u32 syncpoint_id);
|
||||
|
||||
[[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const;
|
||||
|
||||
void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
|
||||
|
||||
[[nodiscard]] u64 GetTicks() const;
|
||||
|
||||
[[nodiscard]] bool IsAsync() const;
|
||||
|
|
|
@ -2,12 +2,15 @@
|
|||
// Licensed under GPLv3 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/microprofile.h"
|
||||
#include "video_core/host1x/syncpoint_manager.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
||||
namespace Host1x {
|
||||
|
||||
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||
|
||||
SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
|
||||
std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
|
||||
std::function<void(void)>& action) {
|
||||
|
@ -58,6 +61,7 @@ void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) {
|
|||
}
|
||||
|
||||
void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) {
|
||||
MICROPROFILE_SCOPE(GPU_wait);
|
||||
Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value);
|
||||
}
|
||||
|
||||
|
|
|
@ -396,10 +396,6 @@ void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) {
|
|||
}
|
||||
|
||||
void RasterizerOpenGL::SignalSyncPoint(u32 value) {
|
||||
if (!gpu.IsAsync()) {
|
||||
gpu.IncrementSyncPoint(value);
|
||||
return;
|
||||
}
|
||||
fence_manager.SignalSyncPoint(value);
|
||||
}
|
||||
|
||||
|
|
|
@ -458,10 +458,6 @@ void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
|
|||
}
|
||||
|
||||
void RasterizerVulkan::SignalSyncPoint(u32 value) {
|
||||
if (!gpu.IsAsync()) {
|
||||
gpu.IncrementSyncPoint(value);
|
||||
return;
|
||||
}
|
||||
fence_manager.SignalSyncPoint(value);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue