2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

Merge pull request #7670 from ameerj/vsync-block

gpu: Add shut down method to synchronize threads before destruction
This commit is contained in:
Fernando S 2022-01-04 14:16:24 +01:00 committed by GitHub
commit 118d5fa3b0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 30 additions and 10 deletions

View file

@ -317,6 +317,8 @@ struct System::Impl {
is_powered_on = false;
exit_lock = false;
gpu_core->NotifyShutdown();
services.reset();
service_manager.reset();
cheat_engine.reset();

View file

@ -266,11 +266,10 @@ void NVFlinger::Compose() {
auto& gpu = system.GPU();
const auto& multi_fence = buffer->get().multi_fence;
const auto stop_token = vsync_thread.get_stop_token();
guard->unlock();
for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
const auto& fence = multi_fence.fences[fence_id];
gpu.WaitFence(fence.id, fence.value, stop_token);
gpu.WaitFence(fence.id, fence.value);
}
guard->lock();

View file

@ -206,7 +206,7 @@ struct GPU::Impl {
}
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
void WaitFence(u32 syncpoint_id, u32 value, std::stop_token stop_token = {}) {
void WaitFence(u32 syncpoint_id, u32 value) {
// Synced GPU, is always in sync
if (!is_async) {
return;
@ -218,8 +218,13 @@ struct GPU::Impl {
}
MICROPROFILE_SCOPE(GPU_wait);
std::unique_lock lock{sync_mutex};
sync_cv.wait(lock, stop_token,
[=, this] { return syncpoints.at(syncpoint_id).load() >= value; });
sync_cv.wait(lock, [=, this] {
if (shutting_down.load(std::memory_order_relaxed)) {
// We're shutting down, ensure no threads continue to wait for the next syncpoint
return true;
}
return syncpoints.at(syncpoint_id).load() >= value;
});
}
void IncrementSyncPoint(u32 syncpoint_id) {
@ -307,6 +312,12 @@ struct GPU::Impl {
cpu_context->MakeCurrent();
}
void NotifyShutdown() {
std::unique_lock lk{sync_mutex};
shutting_down.store(true, std::memory_order::relaxed);
sync_cv.notify_all();
}
/// Obtain the CPU Context
void ObtainContext() {
cpu_context->MakeCurrent();
@ -665,6 +676,8 @@ struct GPU::Impl {
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
/// Shader build notifier
std::unique_ptr<VideoCore::ShaderNotify> shader_notify;
/// When true, we are about to shut down emulation session, so terminate outstanding tasks
std::atomic_bool shutting_down{};
std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{};
@ -673,7 +686,7 @@ struct GPU::Impl {
std::mutex sync_mutex;
std::mutex device_mutex;
std::condition_variable_any sync_cv;
std::condition_variable sync_cv;
struct FlushRequest {
explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_)
@ -812,8 +825,8 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const {
return impl->ShaderNotify();
}
void GPU::WaitFence(u32 syncpoint_id, u32 value, std::stop_token stop_token) {
impl->WaitFence(syncpoint_id, value, stop_token);
void GPU::WaitFence(u32 syncpoint_id, u32 value) {
impl->WaitFence(syncpoint_id, value);
}
void GPU::IncrementSyncPoint(u32 syncpoint_id) {
@ -852,6 +865,10 @@ void GPU::Start() {
impl->Start();
}
void GPU::NotifyShutdown() {
impl->NotifyShutdown();
}
void GPU::ObtainContext() {
impl->ObtainContext();
}

View file

@ -5,7 +5,6 @@
#pragma once
#include <memory>
#include <stop_token>
#include "common/bit_field.h"
#include "common/common_types.h"
@ -210,7 +209,7 @@ public:
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
void WaitFence(u32 syncpoint_id, u32 value, std::stop_token stop_token = {});
void WaitFence(u32 syncpoint_id, u32 value);
void IncrementSyncPoint(u32 syncpoint_id);
@ -233,6 +232,9 @@ public:
/// core timing events.
void Start();
/// Performs any additional necessary steps to shutdown GPU emulation.
void NotifyShutdown();
/// Obtain the CPU Context
void ObtainContext();