mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-07-04 23:31:19 +01:00
Kernel: Added WaitObject and changed "waitable" objects inherit from it.
This commit is contained in:
parent
0c7498545f
commit
c22bac6398
8 changed files with 73 additions and 71 deletions
|
@ -14,7 +14,7 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
class Event : public Object {
|
||||
class Event : public WaitObject {
|
||||
public:
|
||||
std::string GetTypeName() const override { return "Event"; }
|
||||
std::string GetName() const override { return name; }
|
||||
|
@ -27,16 +27,12 @@ public:
|
|||
|
||||
bool locked; ///< Event signal wait
|
||||
bool permanent_locked; ///< Hack - to set event permanent state (for easy passthrough)
|
||||
std::vector<Handle> waiting_threads; ///< Threads that are waiting for the event
|
||||
std::string name; ///< Name of event (optional)
|
||||
|
||||
ResultVal<bool> WaitSynchronization() override {
|
||||
bool wait = locked;
|
||||
if (locked) {
|
||||
Handle thread = GetCurrentThread()->GetHandle();
|
||||
if (std::find(waiting_threads.begin(), waiting_threads.end(), thread) == waiting_threads.end()) {
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
AddWaitingThread(GetCurrentThread());
|
||||
Kernel::WaitCurrentThread(WAITTYPE_EVENT, this);
|
||||
}
|
||||
if (reset_type != RESETTYPE_STICKY && !permanent_locked) {
|
||||
|
@ -86,20 +82,12 @@ ResultCode SignalEvent(const Handle handle) {
|
|||
if (evt == nullptr) return InvalidHandle(ErrorModule::Kernel);
|
||||
|
||||
// Resume threads waiting for event to signal
|
||||
bool event_caught = false;
|
||||
for (size_t i = 0; i < evt->waiting_threads.size(); ++i) {
|
||||
Thread* thread = Kernel::g_handle_table.Get<Thread>(evt->waiting_threads[i]).get();
|
||||
if (thread != nullptr)
|
||||
thread->ResumeFromWait();
|
||||
bool event_caught = evt->ResumeAllWaitingThreads();
|
||||
|
||||
// If any thread is signalled awake by this event, assume the event was "caught" and reset
|
||||
// the event. This will result in the next thread waiting on the event to block. Otherwise,
|
||||
// the event will not be reset, and the next thread to call WaitSynchronization on it will
|
||||
// not block. Not sure if this is correct behavior, but it seems to work.
|
||||
event_caught = true;
|
||||
}
|
||||
evt->waiting_threads.clear();
|
||||
|
||||
if (!evt->permanent_locked) {
|
||||
evt->locked = event_caught;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,32 @@ SharedPtr<Thread> g_main_thread = nullptr;
|
|||
HandleTable g_handle_table;
|
||||
u64 g_program_id = 0;
|
||||
|
||||
void WaitObject::AddWaitingThread(Thread* thread) {
|
||||
if (std::find(waiting_threads.begin(), waiting_threads.end(), thread) == waiting_threads.end()) {
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
}
|
||||
|
||||
Thread* WaitObject::ResumeNextThread() {
|
||||
if (waiting_threads.empty()) return nullptr;
|
||||
|
||||
auto next_thread = waiting_threads.front();
|
||||
|
||||
next_thread->ResumeFromWait();
|
||||
waiting_threads.erase(waiting_threads.begin());
|
||||
|
||||
return next_thread.get();
|
||||
}
|
||||
|
||||
void WaitObject::ReleaseAllWaitingThreads() {
|
||||
auto waiting_threads_copy = waiting_threads;
|
||||
|
||||
for (auto thread : waiting_threads_copy)
|
||||
thread->ReleaseWaitObject(this);
|
||||
|
||||
waiting_threads.clear();
|
||||
}
|
||||
|
||||
HandleTable::HandleTable() {
|
||||
next_generation = 1;
|
||||
Clear();
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
|
||||
#include <array>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
|
@ -92,6 +94,29 @@ inline void intrusive_ptr_release(Object* object) {
|
|||
template <typename T>
|
||||
using SharedPtr = boost::intrusive_ptr<T>;
|
||||
|
||||
/// Class that represents a Kernel object that a thread can be waiting on
|
||||
class WaitObject : public Object {
|
||||
public:
|
||||
|
||||
/**
|
||||
* Add a thread to wait on this object
|
||||
* @param thread Pointer to thread to add
|
||||
*/
|
||||
void AddWaitingThread(Thread* thread);
|
||||
|
||||
/**
|
||||
* Resumes the next thread waiting on this object
|
||||
* @return Pointer to the thread that was resumed, nullptr if no threads are waiting
|
||||
*/
|
||||
Thread* ResumeNextThread();
|
||||
|
||||
/// Releases all threads waiting on this object
|
||||
void ReleaseAllWaitingThreads();
|
||||
|
||||
private:
|
||||
std::vector<Thread*> waiting_threads; ///< Threads waiting for this object to become available
|
||||
};
|
||||
|
||||
/**
|
||||
* This class allows the creation of Handles, which are references to objects that can be tested
|
||||
* for validity and looked up. Here they are used to pass references to kernel objects to/from the
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
class Mutex : public Object {
|
||||
class Mutex : public WaitObject {
|
||||
public:
|
||||
std::string GetTypeName() const override { return "Mutex"; }
|
||||
std::string GetName() const override { return name; }
|
||||
|
@ -24,7 +24,6 @@ public:
|
|||
bool initial_locked; ///< Initial lock state when mutex was created
|
||||
bool locked; ///< Current locked state
|
||||
Handle lock_thread; ///< Handle to thread that currently has mutex
|
||||
std::vector<Handle> waiting_threads; ///< Threads that are waiting for the mutex
|
||||
std::string name; ///< Name of mutex (optional)
|
||||
|
||||
ResultVal<bool> WaitSynchronization() override;
|
||||
|
@ -45,36 +44,20 @@ void MutexAcquireLock(Mutex* mutex, Handle thread = GetCurrentThread()->GetHandl
|
|||
mutex->lock_thread = thread;
|
||||
}
|
||||
|
||||
bool ReleaseMutexForThread(Mutex* mutex, Handle thread_handle) {
|
||||
MutexAcquireLock(mutex, thread_handle);
|
||||
|
||||
Thread* thread = Kernel::g_handle_table.Get<Thread>(thread_handle).get();
|
||||
if (thread == nullptr) {
|
||||
LOG_ERROR(Kernel, "Called with invalid handle: %08X", thread_handle);
|
||||
return false;
|
||||
}
|
||||
|
||||
thread->ResumeFromWait();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resumes a thread waiting for the specified mutex
|
||||
* @param mutex The mutex that some thread is waiting on
|
||||
*/
|
||||
void ResumeWaitingThread(Mutex* mutex) {
|
||||
// Find the next waiting thread for the mutex...
|
||||
if (mutex->waiting_threads.empty()) {
|
||||
auto next_thread = mutex->ResumeNextThread();
|
||||
if (next_thread != nullptr) {
|
||||
MutexAcquireLock(mutex, next_thread->GetHandle());
|
||||
} else {
|
||||
// Reset mutex lock thread handle, nothing is waiting
|
||||
mutex->locked = false;
|
||||
mutex->lock_thread = -1;
|
||||
}
|
||||
else {
|
||||
// Resume the next waiting thread and re-lock the mutex
|
||||
std::vector<Handle>::iterator iter = mutex->waiting_threads.begin();
|
||||
ReleaseMutexForThread(mutex, *iter);
|
||||
mutex->waiting_threads.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
void MutexEraseLock(Mutex* mutex) {
|
||||
|
@ -175,7 +158,7 @@ Handle CreateMutex(bool initial_locked, const std::string& name) {
|
|||
ResultVal<bool> Mutex::WaitSynchronization() {
|
||||
bool wait = locked;
|
||||
if (locked) {
|
||||
waiting_threads.push_back(GetCurrentThread()->GetHandle());
|
||||
AddWaitingThread(GetCurrentThread());
|
||||
Kernel::WaitCurrentThread(WAITTYPE_MUTEX, this);
|
||||
} else {
|
||||
// Lock the mutex when the first thread accesses it
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
class Semaphore : public Object {
|
||||
class Semaphore : public WaitObject {
|
||||
public:
|
||||
std::string GetTypeName() const override { return "Semaphore"; }
|
||||
std::string GetName() const override { return name; }
|
||||
|
@ -22,7 +22,6 @@ public:
|
|||
|
||||
s32 max_count; ///< Maximum number of simultaneous holders the semaphore can have
|
||||
s32 available_count; ///< Number of free slots left in the semaphore
|
||||
std::queue<Handle> waiting_threads; ///< Threads that are waiting for the semaphore
|
||||
std::string name; ///< Name of semaphore (optional)
|
||||
|
||||
/**
|
||||
|
@ -38,7 +37,7 @@ public:
|
|||
|
||||
if (wait) {
|
||||
Kernel::WaitCurrentThread(WAITTYPE_SEMA, this);
|
||||
waiting_threads.push(GetCurrentThread()->GetHandle());
|
||||
AddWaitingThread(GetCurrentThread());
|
||||
} else {
|
||||
--available_count;
|
||||
}
|
||||
|
@ -83,11 +82,7 @@ ResultCode ReleaseSemaphore(s32* count, Handle handle, s32 release_count) {
|
|||
|
||||
// Notify some of the threads that the semaphore has been released
|
||||
// stop once the semaphore is full again or there are no more waiting threads
|
||||
while (!semaphore->waiting_threads.empty() && semaphore->IsAvailable()) {
|
||||
Thread* thread = Kernel::g_handle_table.Get<Thread>(semaphore->waiting_threads.front()).get();
|
||||
if (thread != nullptr)
|
||||
thread->ResumeFromWait();
|
||||
semaphore->waiting_threads.pop();
|
||||
while (semaphore->IsAvailable() && semaphore->ResumeNextThread() != nullptr) {
|
||||
--semaphore->available_count;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,10 +25,7 @@ namespace Kernel {
|
|||
ResultVal<bool> Thread::WaitSynchronization() {
|
||||
const bool wait = status != THREADSTATUS_DORMANT;
|
||||
if (wait) {
|
||||
Thread* thread = GetCurrentThread();
|
||||
if (std::find(waiting_threads.begin(), waiting_threads.end(), thread) == waiting_threads.end()) {
|
||||
waiting_threads.push_back(thread);
|
||||
}
|
||||
AddWaitingThread(GetCurrentThread());
|
||||
WaitCurrentThread(WAITTYPE_THREADEND, this);
|
||||
}
|
||||
|
||||
|
@ -110,11 +107,7 @@ void Thread::Stop(const char* reason) {
|
|||
|
||||
ChangeReadyState(this, false);
|
||||
status = THREADSTATUS_DORMANT;
|
||||
for (auto& waiting_thread : waiting_threads) {
|
||||
if (CheckWaitType(waiting_thread.get(), WAITTYPE_THREADEND, this))
|
||||
waiting_thread->ResumeFromWait();
|
||||
}
|
||||
waiting_threads.clear();
|
||||
ResumeAllWaitingThreads();
|
||||
|
||||
// Stopped threads are never waiting.
|
||||
wait_type = WAITTYPE_NONE;
|
||||
|
|
|
@ -52,7 +52,7 @@ enum WaitType {
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread : public Kernel::Object {
|
||||
class Thread : public WaitObject {
|
||||
public:
|
||||
static ResultVal<SharedPtr<Thread>> Create(std::string name, VAddr entry_point, s32 priority,
|
||||
u32 arg, s32 processor_id, VAddr stack_top, u32 stack_size);
|
||||
|
@ -99,8 +99,6 @@ public:
|
|||
Object* wait_object;
|
||||
VAddr wait_address;
|
||||
|
||||
std::vector<SharedPtr<Thread>> waiting_threads;
|
||||
|
||||
std::string name;
|
||||
|
||||
/// Whether this thread is intended to never actually be executed, i.e. always idle
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
class Timer : public Object {
|
||||
class Timer : public WaitObject {
|
||||
public:
|
||||
std::string GetTypeName() const override { return "Timer"; }
|
||||
std::string GetName() const override { return name; }
|
||||
|
@ -24,7 +24,6 @@ public:
|
|||
ResetType reset_type; ///< The ResetType of this timer
|
||||
|
||||
bool signaled; ///< Whether the timer has been signaled or not
|
||||
std::set<Handle> waiting_threads; ///< Threads that are waiting for the timer
|
||||
std::string name; ///< Name of timer (optional)
|
||||
|
||||
u64 initial_delay; ///< The delay until the timer fires for the first time
|
||||
|
@ -33,7 +32,7 @@ public:
|
|||
ResultVal<bool> WaitSynchronization() override {
|
||||
bool wait = !signaled;
|
||||
if (wait) {
|
||||
waiting_threads.insert(GetCurrentThread()->GetHandle());
|
||||
AddWaitingThread(GetCurrentThread());
|
||||
Kernel::WaitCurrentThread(WAITTYPE_TIMER, this);
|
||||
}
|
||||
return MakeResult<bool>(wait);
|
||||
|
@ -92,12 +91,7 @@ static void TimerCallback(u64 timer_handle, int cycles_late) {
|
|||
timer->signaled = true;
|
||||
|
||||
// Resume all waiting threads
|
||||
for (Handle thread_handle : timer->waiting_threads) {
|
||||
if (SharedPtr<Thread> thread = Kernel::g_handle_table.Get<Thread>(thread_handle))
|
||||
thread->ResumeFromWait();
|
||||
}
|
||||
|
||||
timer->waiting_threads.clear();
|
||||
timer->ResumeAllWaitingThreads();
|
||||
|
||||
if (timer->reset_type == RESETTYPE_ONESHOT)
|
||||
timer->signaled = false;
|
||||
|
|
Loading…
Reference in a new issue