2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

hle: kernel: Refactor out various KThread std::shared_ptr usage.

This commit is contained in:
bunnei 2021-04-02 18:02:10 -07:00
parent d9df63583f
commit 34bed1ab41
10 changed files with 30 additions and 58 deletions

View file

@ -38,8 +38,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern
return MakeResult(std::move(client_session)); return MakeResult(std::move(client_session));
} }
ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread, ResultCode ClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) { Core::Timing::CoreTiming& core_timing) {
// Keep ServerSession alive until we're done working with it. // Keep ServerSession alive until we're done working with it.
if (!parent->Server()) { if (!parent->Server()) {

View file

@ -46,7 +46,7 @@ public:
return HANDLE_TYPE; return HANDLE_TYPE;
} }
ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory, ResultCode SendSyncRequest(KThread* thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing); Core::Timing::CoreTiming& core_timing);
bool IsSignaled() const override; bool IsSignaled() const override;

View file

@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
GlobalSchedulerContext::~GlobalSchedulerContext() = default; GlobalSchedulerContext::~GlobalSchedulerContext() = default;
void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) { void GlobalSchedulerContext::AddThread(KThread* thread) {
std::scoped_lock lock{global_list_guard}; std::scoped_lock lock{global_list_guard};
thread_list.push_back(std::move(thread)); thread_list.push_back(thread);
} }
void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) { void GlobalSchedulerContext::RemoveThread(KThread* thread) {
std::scoped_lock lock{global_list_guard}; std::scoped_lock lock{global_list_guard};
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
thread_list.end()); thread_list.end());

View file

@ -38,13 +38,13 @@ public:
~GlobalSchedulerContext(); ~GlobalSchedulerContext();
/// Adds a new thread to the scheduler /// Adds a new thread to the scheduler
void AddThread(std::shared_ptr<KThread> thread); void AddThread(KThread* thread);
/// Removes a thread from the scheduler /// Removes a thread from the scheduler
void RemoveThread(std::shared_ptr<KThread> thread); void RemoveThread(KThread* thread);
/// Returns a list of all threads managed by the scheduler /// Returns a list of all threads managed by the scheduler
[[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const { [[nodiscard]] const std::vector<KThread*>& GetThreadList() const {
return thread_list; return thread_list;
} }
@ -79,7 +79,7 @@ private:
LockType scheduler_lock; LockType scheduler_lock;
/// Lists all thread ids that aren't deleted/etc. /// Lists all thread ids that aren't deleted/etc.
std::vector<std::shared_ptr<KThread>> thread_list; std::vector<KThread*> thread_list;
Common::SpinLock global_list_guard{}; Common::SpinLock global_list_guard{};
}; };

View file

@ -46,11 +46,11 @@ void SessionRequestHandler::ClientDisconnected(
boost::range::remove_erase(connected_sessions, server_session); boost::range::remove_erase(connected_sessions, server_session);
} }
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
std::shared_ptr<ServerSession> server_session, std::shared_ptr<ServerSession> server_session_,
std::shared_ptr<KThread> thread) KThread* thread_)
: server_session(std::move(server_session)), : server_session(std::move(server_session_)),
thread(std::move(thread)), kernel{kernel}, memory{memory} { thread(thread_), kernel{kernel_}, memory{memory_} {
cmd_buf[0] = 0; cmd_buf[0] = 0;
} }

View file

@ -109,8 +109,7 @@ protected:
class HLERequestContext { class HLERequestContext {
public: public:
explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
std::shared_ptr<ServerSession> session, std::shared_ptr<ServerSession> session, KThread* thread);
std::shared_ptr<KThread> thread);
~HLERequestContext(); ~HLERequestContext();
/// Returns a pointer to the IPC command buffer for this request. /// Returns a pointer to the IPC command buffer for this request.
@ -276,10 +275,6 @@ public:
return *thread; return *thread;
} }
const KThread& GetThread() const {
return *thread;
}
bool IsThreadWaiting() const { bool IsThreadWaiting() const {
return is_thread_waiting; return is_thread_waiting;
} }
@ -291,7 +286,8 @@ private:
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
std::shared_ptr<Kernel::ServerSession> server_session; std::shared_ptr<Kernel::ServerSession> server_session;
std::shared_ptr<KThread> thread; KThread* thread;
// TODO(yuriks): Check common usage of this and optimize size accordingly // TODO(yuriks): Check common usage of this and optimize size accordingly
boost::container::small_vector<Handle, 8> move_handles; boost::container::small_vector<Handle, 8> move_handles;
boost::container::small_vector<Handle, 8> copy_handles; boost::container::small_vector<Handle, 8> copy_handles;

View file

@ -60,8 +60,6 @@ struct KernelCore::Impl {
void Initialize(KernelCore& kernel) { void Initialize(KernelCore& kernel) {
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
RegisterHostThread();
service_thread_manager = service_thread_manager =
std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager"); std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
is_phantom_mode_for_singlecore = false; is_phantom_mode_for_singlecore = false;
@ -77,6 +75,8 @@ struct KernelCore::Impl {
InitializeSchedulers(); InitializeSchedulers();
InitializeSuspendThreads(); InitializeSuspendThreads();
InitializePreemption(kernel); InitializePreemption(kernel);
RegisterHostThread();
} }
void InitializeCores() { void InitializeCores() {

View file

@ -44,12 +44,7 @@ ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kern
bool ServerSession::IsSignaled() const { bool ServerSession::IsSignaled() const {
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive. // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
if (!parent->Client()) { return !parent->Client();
return true;
}
// Wait if we have no pending requests, or if we're currently handling a request.
return !pending_requesting_threads.empty() && currently_handling == nullptr;
} }
void ServerSession::ClientDisconnected() { void ServerSession::ClientDisconnected() {
@ -62,11 +57,6 @@ void ServerSession::ClientDisconnected() {
// invalidated (set to null). // invalidated (set to null).
handler->ClientDisconnected(SharedFrom(this)); handler->ClientDisconnected(SharedFrom(this));
} }
// Clean up the list of client threads with pending requests, they are unneeded now that the
// client endpoint is closed.
pending_requesting_threads.clear();
currently_handling = nullptr;
} }
void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) { void ServerSession::AppendDomainRequestHandler(std::shared_ptr<SessionRequestHandler> handler) {
@ -116,11 +106,9 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }
ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread, ResultCode ServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) {
Core::Memory::Memory& memory) {
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
auto context = auto context = std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), thread);
std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
@ -161,10 +149,9 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
return result; return result;
} }
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread, ResultCode ServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing) { Core::Timing::CoreTiming& core_timing) {
return QueueSyncRequest(std::move(thread), memory); return QueueSyncRequest(thread, memory);
} }
} // namespace Kernel } // namespace Kernel

View file

@ -95,7 +95,7 @@ public:
* *
* @returns ResultCode from the operation. * @returns ResultCode from the operation.
*/ */
ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory, ResultCode HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory,
Core::Timing::CoreTiming& core_timing); Core::Timing::CoreTiming& core_timing);
/// Called when a client disconnection occurs. /// Called when a client disconnection occurs.
@ -130,7 +130,7 @@ public:
private: private:
/// Queues a sync request from the emulated application. /// Queues a sync request from the emulated application.
ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory); ResultCode QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory);
/// Completes a sync request from the emulated application. /// Completes a sync request from the emulated application.
ResultCode CompleteSyncRequest(HLERequestContext& context); ResultCode CompleteSyncRequest(HLERequestContext& context);
@ -148,16 +148,6 @@ private:
/// This is the list of domain request handlers (after conversion to a domain) /// This is the list of domain request handlers (after conversion to a domain)
std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers; std::vector<std::shared_ptr<SessionRequestHandler>> domain_request_handlers;
/// List of threads that are pending a response after a sync request. This list is processed in
/// a LIFO manner, thus, the last request will be dispatched first.
/// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
std::vector<std::shared_ptr<KThread>> pending_requesting_threads;
/// Thread whose request is currently being handled. A request is considered "handled" when a
/// response is sent via svcReplyAndReceive.
/// TODO(Subv): Find a better name for this.
std::shared_ptr<KThread> currently_handling;
/// When set to True, converts the session to a domain at the end of the command /// When set to True, converts the session to a domain at the end of the command
bool convert_to_domain{}; bool convert_to_domain{};

View file

@ -60,7 +60,7 @@ private:
const auto process_id = rp.PopRaw<u64>(); const auto process_id = rp.PopRaw<u64>();
const auto data1 = ctx.ReadBuffer(0); const auto data1 = ctx.ReadBuffer(0);
const auto data2 = [ctx] { const auto data2 = [&ctx] {
if (ctx.CanReadBuffer(1)) { if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1); return ctx.ReadBuffer(1);
} }
@ -87,7 +87,7 @@ private:
const auto process_id = rp.PopRaw<u64>(); const auto process_id = rp.PopRaw<u64>();
const auto data1 = ctx.ReadBuffer(0); const auto data1 = ctx.ReadBuffer(0);
const auto data2 = [ctx] { const auto data2 = [&ctx] {
if (ctx.CanReadBuffer(1)) { if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1); return ctx.ReadBuffer(1);
} }
@ -139,7 +139,7 @@ private:
const auto title_id = rp.PopRaw<u64>(); const auto title_id = rp.PopRaw<u64>();
const auto data1 = ctx.ReadBuffer(0); const auto data1 = ctx.ReadBuffer(0);
const auto data2 = [ctx] { const auto data2 = [&ctx] {
if (ctx.CanReadBuffer(1)) { if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1); return ctx.ReadBuffer(1);
} }
@ -163,7 +163,7 @@ private:
const auto title_id = rp.PopRaw<u64>(); const auto title_id = rp.PopRaw<u64>();
const auto data1 = ctx.ReadBuffer(0); const auto data1 = ctx.ReadBuffer(0);
const auto data2 = [ctx] { const auto data2 = [&ctx] {
if (ctx.CanReadBuffer(1)) { if (ctx.CanReadBuffer(1)) {
return ctx.ReadBuffer(1); return ctx.ReadBuffer(1);
} }