mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-07-04 23:31:19 +01:00
Merge pull request #8531 from FernandoS27/core-timing-fix-reg
Core timing: use only one thread.
This commit is contained in:
commit
c765d5be0b
2 changed files with 2 additions and 12 deletions
|
@ -61,12 +61,7 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
|||
const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {};
|
||||
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||
if (is_multicore) {
|
||||
const auto hardware_concurrency = std::thread::hardware_concurrency();
|
||||
size_t id = 0;
|
||||
worker_threads.emplace_back(ThreadEntry, std::ref(*this), id++);
|
||||
if (hardware_concurrency > 8) {
|
||||
worker_threads.emplace_back(ThreadEntry, std::ref(*this), id++);
|
||||
}
|
||||
worker_threads.emplace_back(ThreadEntry, std::ref(*this), 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -228,14 +223,11 @@ std::optional<s64> CoreTiming::Advance() {
|
|||
event_queue.pop_back();
|
||||
|
||||
if (const auto event_type{evt.type.lock()}) {
|
||||
sequence_mutex.lock();
|
||||
|
||||
event_mutex.unlock();
|
||||
|
||||
event_type->guard.lock();
|
||||
sequence_mutex.unlock();
|
||||
const s64 delay = static_cast<s64>(GetGlobalTimeNs().count() - evt.time);
|
||||
event_type->callback(evt.user_data, std::chrono::nanoseconds{delay});
|
||||
event_type->guard.unlock();
|
||||
|
||||
event_mutex.lock();
|
||||
pending_events.fetch_sub(1, std::memory_order_relaxed);
|
||||
|
|
|
@ -32,7 +32,6 @@ struct EventType {
|
|||
TimedCallback callback;
|
||||
/// A pointer to the name of the event.
|
||||
const std::string name;
|
||||
mutable std::mutex guard;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -157,7 +156,6 @@ private:
|
|||
std::condition_variable wait_pause_cv;
|
||||
std::condition_variable wait_signal_cv;
|
||||
mutable std::mutex event_mutex;
|
||||
mutable std::mutex sequence_mutex;
|
||||
|
||||
std::atomic<bool> paused_state{};
|
||||
bool is_paused{};
|
||||
|
|
Loading…
Reference in a new issue