2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

kernel: remove kernel_

This commit is contained in:
Liam 2023-03-07 10:49:41 -05:00
parent 9368e17a92
commit c0b9e93b77
41 changed files with 290 additions and 295 deletions

View file

@ -12,11 +12,11 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) {
} }
void KAutoObject::RegisterWithKernel() { void KAutoObject::RegisterWithKernel() {
kernel.RegisterKernelObject(this); m_kernel.RegisterKernelObject(this);
} }
void KAutoObject::UnregisterWithKernel() { void KAutoObject::UnregisterWithKernel() {
kernel.UnregisterKernelObject(this); m_kernel.UnregisterKernelObject(this);
} }
} // namespace Kernel } // namespace Kernel

View file

@ -80,7 +80,7 @@ private:
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const); KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
public: public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) { explicit KAutoObject(KernelCore& kernel) : m_kernel(kernel) {
RegisterWithKernel(); RegisterWithKernel();
} }
virtual ~KAutoObject() = default; virtual ~KAutoObject() = default;
@ -169,7 +169,7 @@ private:
void UnregisterWithKernel(); void UnregisterWithKernel();
protected: protected:
KernelCore& kernel; KernelCore& m_kernel;
private: private:
std::atomic<u32> m_ref_count{}; std::atomic<u32> m_ref_count{};
@ -179,7 +179,7 @@ class KAutoObjectWithListContainer;
class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> { class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> {
public: public:
explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {} explicit KAutoObjectWithList(KernelCore& kernel) : KAutoObject(kernel) {}
static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
const u64 lid = lhs.GetId(); const u64 lid = lhs.GetId();

View file

@ -11,7 +11,7 @@
namespace Kernel { namespace Kernel {
KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KClientPort::~KClientPort() = default; KClientPort::~KClientPort() = default;
void KClientPort::Initialize(KPort* parent, s32 max_sessions) { void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
@ -23,7 +23,7 @@ void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
} }
void KClientPort::OnSessionFinalized() { void KClientPort::OnSessionFinalized() {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (const auto prev = m_num_sessions--; prev == m_max_sessions) { if (const auto prev = m_num_sessions--; prev == m_max_sessions) {
this->NotifyAvailable(); this->NotifyAvailable();
@ -58,12 +58,12 @@ Result KClientPort::CreateSession(KClientSession** out) {
// Reserve a new session from the resource limit. // Reserve a new session from the resource limit.
//! FIXME: we are reserving this from the wrong resource limit! //! FIXME: we are reserving this from the wrong resource limit!
KScopedResourceReservation session_reservation(kernel.ApplicationProcess()->GetResourceLimit(), KScopedResourceReservation session_reservation(
LimitableResource::SessionCountMax); m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
// Allocate a session normally. // Allocate a session normally.
session = KSession::Create(kernel); session = KSession::Create(m_kernel);
// Check that we successfully created a session. // Check that we successfully created a session.
R_UNLESS(session != nullptr, ResultOutOfResource); R_UNLESS(session != nullptr, ResultOutOfResource);
@ -105,7 +105,7 @@ Result KClientPort::CreateSession(KClientSession** out) {
session_reservation.Commit(); session_reservation.Commit();
// Register the session. // Register the session.
KSession::Register(kernel, session); KSession::Register(m_kernel, session);
ON_RESULT_FAILURE { ON_RESULT_FAILURE {
session->GetClientSession().Close(); session->GetClientSession().Close();
session->GetServerSession().Close(); session->GetServerSession().Close();

View file

@ -12,8 +12,7 @@ namespace Kernel {
static constexpr u32 MessageBufferSize = 0x100; static constexpr u32 MessageBufferSize = 0x100;
KClientSession::KClientSession(KernelCore& kernel_) KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
: KAutoObjectWithSlabHeapAndContainer{kernel_} {}
KClientSession::~KClientSession() = default; KClientSession::~KClientSession() = default;
void KClientSession::Destroy() { void KClientSession::Destroy() {
@ -25,12 +24,12 @@ void KClientSession::OnServerClosed() {}
Result KClientSession::SendSyncRequest() { Result KClientSession::SendSyncRequest() {
// Create a session request. // Create a session request.
KSessionRequest* request = KSessionRequest::Create(kernel); KSessionRequest* request = KSessionRequest::Create(m_kernel);
R_UNLESS(request != nullptr, ResultOutOfResource); R_UNLESS(request != nullptr, ResultOutOfResource);
SCOPE_EXIT({ request->Close(); }); SCOPE_EXIT({ request->Close(); });
// Initialize the request. // Initialize the request.
request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize); request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTLSAddress(), MessageBufferSize);
// Send the request. // Send the request.
R_RETURN(m_parent->GetServerSession().OnRequest(request)); R_RETURN(m_parent->GetServerSession().OnRequest(request));

View file

@ -30,7 +30,7 @@ class KClientSession final
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
public: public:
explicit KClientSession(KernelCore& kernel_); explicit KClientSession(KernelCore& kernel);
~KClientSession() override; ~KClientSession() override;
void Initialize(KSession* parent) { void Initialize(KSession* parent) {

View file

@ -16,18 +16,18 @@
namespace Kernel { namespace Kernel {
KCodeMemory::KCodeMemory(KernelCore& kernel_) KCodeMemory::KCodeMemory(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {} : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) { Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
// Set members. // Set members.
m_owner = GetCurrentProcessPointer(kernel); m_owner = GetCurrentProcessPointer(m_kernel);
// Get the owner page table. // Get the owner page table.
auto& page_table = m_owner->PageTable(); auto& page_table = m_owner->PageTable();
// Construct the page group. // Construct the page group.
m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
// Lock the memory. // Lock the memory.
R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
@ -74,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
R_UNLESS(!m_is_mapped, ResultInvalidState); R_UNLESS(!m_is_mapped, ResultInvalidState);
// Map the memory. // Map the memory.
R_TRY(GetCurrentProcess(kernel).PageTable().MapPageGroup( R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup(
address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
// Mark ourselves as mapped. // Mark ourselves as mapped.
@ -91,8 +91,8 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
KScopedLightLock lk(m_lock); KScopedLightLock lk(m_lock);
// Unmap the memory. // Unmap the memory.
R_TRY(GetCurrentProcess(kernel).PageTable().UnmapPageGroup(address, *m_page_group, R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group,
KMemoryState::CodeOut)); KMemoryState::CodeOut));
// Mark ourselves as unmapped. // Mark ourselves as unmapped.
m_is_mapped = false; m_is_mapped = false;

View file

@ -29,7 +29,7 @@ class KCodeMemory final
KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
public: public:
explicit KCodeMemory(KernelCore& kernel_); explicit KCodeMemory(KernelCore& kernel);
Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size); Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
void Finalize() override; void Finalize() override;

View file

@ -57,8 +57,8 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue { class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
public: public:
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_) explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel)
: KThreadQueue(kernel_) {} : KThreadQueue(kernel) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner. // Remove the thread as a waiter from its owner.
@ -75,8 +75,8 @@ private:
public: public:
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable( explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
KernelCore& kernel_, KConditionVariable::ThreadTree* t) KernelCore& kernel, KConditionVariable::ThreadTree* t)
: KThreadQueue(kernel_), m_tree(t) {} : KThreadQueue(kernel), m_tree(t) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread as a waiter from its owner. // Remove the thread as a waiter from its owner.

View file

@ -12,7 +12,7 @@ class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObj
KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
public: public:
explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} explicit KDebug(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
static void PostDestroy(uintptr_t arg) {} static void PostDestroy(uintptr_t arg) {}
}; };

View file

@ -9,8 +9,8 @@
namespace Kernel { namespace Kernel {
KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_) KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {} : KAutoObjectWithSlabHeapAndContainer(kernel), m_lock(kernel), m_is_initialized(false) {}
KDeviceAddressSpace::~KDeviceAddressSpace() = default; KDeviceAddressSpace::~KDeviceAddressSpace() = default;
void KDeviceAddressSpace::Initialize() { void KDeviceAddressSpace::Initialize() {

View file

@ -7,8 +7,8 @@
namespace Kernel { namespace Kernel {
KEvent::KEvent(KernelCore& kernel_) KEvent::KEvent(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {} : KAutoObjectWithSlabHeapAndContainer{kernel}, m_readable_event{kernel} {}
KEvent::~KEvent() = default; KEvent::~KEvent() = default;
@ -36,7 +36,7 @@ void KEvent::Finalize() {
} }
Result KEvent::Signal() { Result KEvent::Signal() {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
R_SUCCEED_IF(m_readable_event_destroyed); R_SUCCEED_IF(m_readable_event_destroyed);
@ -44,7 +44,7 @@ Result KEvent::Signal() {
} }
Result KEvent::Clear() { Result KEvent::Clear() {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
R_SUCCEED_IF(m_readable_event_destroyed); R_SUCCEED_IF(m_readable_event_destroyed);

View file

@ -16,7 +16,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj
KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
public: public:
explicit KEvent(KernelCore& kernel_); explicit KEvent(KernelCore& kernel);
~KEvent() override; ~KEvent() override;
void Initialize(KProcess* owner); void Initialize(KProcess* owner);

View file

@ -7,8 +7,8 @@
namespace Kernel { namespace Kernel {
KPort::KPort(KernelCore& kernel_) KPort::KPort(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_server{kernel_}, m_client{kernel_} {} : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KPort::~KPort() = default; KPort::~KPort() = default;
@ -29,7 +29,7 @@ void KPort::Initialize(s32 max_sessions, bool is_light, uintptr_t name) {
} }
void KPort::OnClientClosed() { void KPort::OnClientClosed() {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (m_state == State::Normal) { if (m_state == State::Normal) {
m_state = State::ClientClosed; m_state = State::ClientClosed;
@ -37,7 +37,7 @@ void KPort::OnClientClosed() {
} }
void KPort::OnServerClosed() { void KPort::OnServerClosed() {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (m_state == State::Normal) { if (m_state == State::Normal) {
m_state = State::ServerClosed; m_state = State::ServerClosed;
@ -45,12 +45,12 @@ void KPort::OnServerClosed() {
} }
bool KPort::IsServerClosed() const { bool KPort::IsServerClosed() const {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
return m_state == State::ServerClosed; return m_state == State::ServerClosed;
} }
Result KPort::EnqueueSession(KServerSession* session) { Result KPort::EnqueueSession(KServerSession* session) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
R_UNLESS(m_state == State::Normal, ResultPortClosed); R_UNLESS(m_state == State::Normal, ResultPortClosed);

View file

@ -19,7 +19,7 @@ class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjec
KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
public: public:
explicit KPort(KernelCore& kernel_); explicit KPort(KernelCore& kernel);
~KPort() override; ~KPort() override;
static void PostDestroy(uintptr_t arg) {} static void PostDestroy(uintptr_t arg) {}

View file

@ -126,7 +126,7 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() {
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
main_thread_stack_size}; main_thread_stack_size};
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
capacity != pool_size) { capacity != pool_size) {
LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size); LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
} }
@ -150,7 +150,7 @@ u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
} }
bool KProcess::ReleaseUserException(KThread* thread) { bool KProcess::ReleaseUserException(KThread* thread) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (exception_thread == thread) { if (exception_thread == thread) {
exception_thread = nullptr; exception_thread = nullptr;
@ -164,7 +164,7 @@ bool KProcess::ReleaseUserException(KThread* thread) {
next->EndWait(ResultSuccess); next->EndWait(ResultSuccess);
} }
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(m_kernel);
return true; return true;
} else { } else {
@ -173,11 +173,11 @@ bool KProcess::ReleaseUserException(KThread* thread) {
} }
void KProcess::PinCurrentThread(s32 core_id) { void KProcess::PinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread. // Get the current thread.
KThread* cur_thread = KThread* cur_thread =
kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// If the thread isn't terminated, pin it. // If the thread isn't terminated, pin it.
if (!cur_thread->IsTerminationRequested()) { if (!cur_thread->IsTerminationRequested()) {
@ -186,27 +186,27 @@ void KProcess::PinCurrentThread(s32 core_id) {
cur_thread->Pin(core_id); cur_thread->Pin(core_id);
// An update is needed. // An update is needed.
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(m_kernel);
} }
} }
void KProcess::UnpinCurrentThread(s32 core_id) { void KProcess::UnpinCurrentThread(s32 core_id) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// Get the current thread. // Get the current thread.
KThread* cur_thread = KThread* cur_thread =
kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
// Unpin it. // Unpin it.
cur_thread->Unpin(); cur_thread->Unpin();
UnpinThread(core_id, cur_thread); UnpinThread(core_id, cur_thread);
// An update is needed. // An update is needed.
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(m_kernel);
} }
void KProcess::UnpinThread(KThread* thread) { void KProcess::UnpinThread(KThread* thread) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// Get the thread's core id. // Get the thread's core id.
const auto core_id = thread->GetActiveCore(); const auto core_id = thread->GetActiveCore();
@ -216,7 +216,7 @@ void KProcess::UnpinThread(KThread* thread) {
thread->Unpin(); thread->Unpin();
// An update is needed. // An update is needed.
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(m_kernel);
} }
Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
@ -234,7 +234,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
} }
if (shemen_info == nullptr) { if (shemen_info == nullptr) {
shemen_info = KSharedMemoryInfo::Allocate(kernel); shemen_info = KSharedMemoryInfo::Allocate(m_kernel);
R_UNLESS(shemen_info != nullptr, ResultOutOfMemory); R_UNLESS(shemen_info != nullptr, ResultOutOfMemory);
shemen_info->Initialize(shmem); shemen_info->Initialize(shmem);
@ -265,7 +265,7 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
if (shemen_info->Close()) { if (shemen_info->Close()) {
shared_memory_list.erase(iter); shared_memory_list.erase(iter);
KSharedMemoryInfo::Free(kernel, shemen_info); KSharedMemoryInfo::Free(m_kernel, shemen_info);
} }
// Close a reference to the shared memory. // Close a reference to the shared memory.
@ -298,7 +298,7 @@ u64 KProcess::GetFreeThreadCount() const {
Result KProcess::Reset() { Result KProcess::Reset() {
// Lock the process and the scheduler. // Lock the process and the scheduler.
KScopedLightLock lk(state_lock); KScopedLightLock lk(state_lock);
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Validate that we're in a state that we can reset. // Validate that we're in a state that we can reset.
R_UNLESS(state != State::Terminated, ResultInvalidState); R_UNLESS(state != State::Terminated, ResultInvalidState);
@ -313,7 +313,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
// Lock ourselves and the scheduler. // Lock ourselves and the scheduler.
KScopedLightLock lk{state_lock}; KScopedLightLock lk{state_lock};
KScopedLightLock list_lk{list_lock}; KScopedLightLock list_lk{list_lock};
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Validate our state. // Validate our state.
R_UNLESS(state != State::Terminating, ResultInvalidState); R_UNLESS(state != State::Terminating, ResultInvalidState);
@ -366,7 +366,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
// Initialize process address space // Initialize process address space
if (const Result result{page_table.InitializeForProcess( if (const Result result{page_table.InitializeForProcess(
metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
0x8000000, code_size, &kernel.GetAppSystemResource(), resource_limit)}; 0x8000000, code_size, &m_kernel.GetAppSystemResource(), resource_limit)};
result.IsError()) { result.IsError()) {
R_RETURN(result); R_RETURN(result);
} }
@ -421,7 +421,7 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
ChangeState(State::Running); ChangeState(State::Running);
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); SetupMainThread(m_kernel.System(), *this, main_thread_priority, main_thread_stack_top);
} }
void KProcess::PrepareForTermination() { void KProcess::PrepareForTermination() {
@ -432,7 +432,7 @@ void KProcess::PrepareForTermination() {
if (thread->GetOwnerProcess() != this) if (thread->GetOwnerProcess() != this)
continue; continue;
if (thread == GetCurrentThreadPointer(kernel)) if (thread == GetCurrentThreadPointer(m_kernel))
continue; continue;
// TODO(Subv): When are the other running/ready threads terminated? // TODO(Subv): When are the other running/ready threads terminated?
@ -443,7 +443,7 @@ void KProcess::PrepareForTermination() {
} }
}; };
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList());
this->DeleteThreadLocalRegion(plr_address); this->DeleteThreadLocalRegion(plr_address);
plr_address = 0; plr_address = 0;
@ -471,7 +471,7 @@ void KProcess::Finalize() {
shmem->Close(); shmem->Close();
it = shared_memory_list.erase(it); it = shared_memory_list.erase(it);
KSharedMemoryInfo::Free(kernel, info); KSharedMemoryInfo::Free(m_kernel, info);
} }
} }
@ -494,7 +494,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
// See if we can get a region from a partially used TLP. // See if we can get a region from a partially used TLP.
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
tlr = it->Reserve(); tlr = it->Reserve();
@ -512,12 +512,12 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
} }
// Allocate a new page. // Allocate a new page.
tlp = KThreadLocalPage::Allocate(kernel); tlp = KThreadLocalPage::Allocate(m_kernel);
R_UNLESS(tlp != nullptr, ResultOutOfMemory); R_UNLESS(tlp != nullptr, ResultOutOfMemory);
auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); }); auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); });
// Initialize the new page. // Initialize the new page.
R_TRY(tlp->Initialize(kernel, this)); R_TRY(tlp->Initialize(m_kernel, this));
// Reserve a TLR. // Reserve a TLR.
tlr = tlp->Reserve(); tlr = tlp->Reserve();
@ -525,7 +525,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
// Insert into our tree. // Insert into our tree.
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (tlp->IsAllUsed()) { if (tlp->IsAllUsed()) {
fully_used_tlp_tree.insert(*tlp); fully_used_tlp_tree.insert(*tlp);
} else { } else {
@ -544,7 +544,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
// Release the region. // Release the region.
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Try to find the page in the partially used list. // Try to find the page in the partially used list.
auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
@ -581,7 +581,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
if (page_to_free != nullptr) { if (page_to_free != nullptr) {
page_to_free->Finalize(); page_to_free->Finalize();
KThreadLocalPage::Free(kernel, page_to_free); KThreadLocalPage::Free(m_kernel, page_to_free);
} }
R_SUCCEED(); R_SUCCEED();
@ -639,8 +639,8 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
}; };
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
code_set.memory.size()); code_set.memory.size());
ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute); ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read); ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
@ -648,14 +648,14 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
} }
bool KProcess::IsSignaled() const { bool KProcess::IsSignaled() const {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
return is_signaled; return is_signaled;
} }
KProcess::KProcess(KernelCore& kernel_) KProcess::KProcess(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, : KAutoObjectWithSlabHeapAndContainer{kernel}, page_table{m_kernel.System()},
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, handle_table{m_kernel}, address_arbiter{m_kernel.System()}, condition_var{m_kernel.System()},
state_lock{kernel_}, list_lock{kernel_} {} state_lock{m_kernel}, list_lock{m_kernel} {}
KProcess::~KProcess() = default; KProcess::~KProcess() = default;

View file

@ -68,7 +68,7 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public: public:
explicit KProcess(KernelCore& kernel_); explicit KProcess(KernelCore& kernel);
~KProcess() override; ~KProcess() override;
enum class State { enum class State {

View file

@ -11,7 +11,7 @@
namespace Kernel { namespace Kernel {
KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KReadableEvent::~KReadableEvent() = default; KReadableEvent::~KReadableEvent() = default;
@ -25,7 +25,7 @@ void KReadableEvent::Initialize(KEvent* parent) {
} }
bool KReadableEvent::IsSignaled() const { bool KReadableEvent::IsSignaled() const {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
return m_is_signaled; return m_is_signaled;
} }
@ -33,7 +33,7 @@ bool KReadableEvent::IsSignaled() const {
void KReadableEvent::Destroy() { void KReadableEvent::Destroy() {
if (m_parent) { if (m_parent) {
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
m_parent->OnReadableEventDestroyed(); m_parent->OnReadableEventDestroyed();
} }
m_parent->Close(); m_parent->Close();
@ -41,7 +41,7 @@ void KReadableEvent::Destroy() {
} }
Result KReadableEvent::Signal() { Result KReadableEvent::Signal() {
KScopedSchedulerLock lk{kernel}; KScopedSchedulerLock lk{m_kernel};
if (!m_is_signaled) { if (!m_is_signaled) {
m_is_signaled = true; m_is_signaled = true;
@ -58,7 +58,7 @@ Result KReadableEvent::Clear() {
} }
Result KReadableEvent::Reset() { Result KReadableEvent::Reset() {
KScopedSchedulerLock lk{kernel}; KScopedSchedulerLock lk{m_kernel};
R_UNLESS(m_is_signaled, ResultInvalidState); R_UNLESS(m_is_signaled, ResultInvalidState);

View file

@ -17,7 +17,7 @@ class KReadableEvent : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject); KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
public: public:
explicit KReadableEvent(KernelCore& kernel_); explicit KReadableEvent(KernelCore& kernel);
~KReadableEvent() override; ~KReadableEvent() override;
void Initialize(KEvent* parent); void Initialize(KEvent* parent);

View file

@ -11,8 +11,8 @@
namespace Kernel { namespace Kernel {
constexpr s64 DefaultTimeout = 10000000000; // 10 seconds constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
KResourceLimit::KResourceLimit(KernelCore& kernel_) KResourceLimit::KResourceLimit(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, lock{kernel_}, cond_var{kernel_} {} : KAutoObjectWithSlabHeapAndContainer{kernel}, lock{kernel}, cond_var{kernel} {}
KResourceLimit::~KResourceLimit() = default; KResourceLimit::~KResourceLimit() = default;
void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) { void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) {

View file

@ -27,7 +27,7 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
} }
} }
KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { KScheduler::KScheduler(KernelCore& kernel) : m_kernel{kernel} {
m_switch_fiber = std::make_shared<Common::Fiber>([this] { m_switch_fiber = std::make_shared<Common::Fiber>([this] {
while (true) { while (true) {
ScheduleImplFiber(); ScheduleImplFiber();
@ -47,7 +47,7 @@ void KScheduler::SetInterruptTaskRunnable() {
void KScheduler::RequestScheduleOnInterrupt() { void KScheduler::RequestScheduleOnInterrupt() {
m_state.needs_scheduling = true; m_state.needs_scheduling = true;
if (CanSchedule(kernel)) { if (CanSchedule(m_kernel)) {
ScheduleOnInterrupt(); ScheduleOnInterrupt();
} }
} }
@ -97,50 +97,50 @@ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
} }
void KScheduler::Schedule() { void KScheduler::Schedule() {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
ASSERT(m_core_id == GetCurrentCoreId(kernel)); ASSERT(m_core_id == GetCurrentCoreId(m_kernel));
ScheduleImpl(); ScheduleImpl();
} }
void KScheduler::ScheduleOnInterrupt() { void KScheduler::ScheduleOnInterrupt() {
GetCurrentThread(kernel).DisableDispatch(); GetCurrentThread(m_kernel).DisableDispatch();
Schedule(); Schedule();
GetCurrentThread(kernel).EnableDispatch(); GetCurrentThread(m_kernel).EnableDispatch();
} }
void KScheduler::PreemptSingleCore() { void KScheduler::PreemptSingleCore() {
GetCurrentThread(kernel).DisableDispatch(); GetCurrentThread(m_kernel).DisableDispatch();
auto* thread = GetCurrentThreadPointer(kernel); auto* thread = GetCurrentThreadPointer(m_kernel);
auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore()); auto& previous_scheduler = m_kernel.Scheduler(thread->GetCurrentCore());
previous_scheduler.Unload(thread); previous_scheduler.Unload(thread);
Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber); Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
GetCurrentThread(kernel).EnableDispatch(); GetCurrentThread(m_kernel).EnableDispatch();
} }
void KScheduler::RescheduleCurrentCore() { void KScheduler::RescheduleCurrentCore() {
ASSERT(!kernel.IsPhantomModeForSingleCore()); ASSERT(!m_kernel.IsPhantomModeForSingleCore());
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
GetCurrentThread(kernel).EnableDispatch(); GetCurrentThread(m_kernel).EnableDispatch();
if (m_state.needs_scheduling.load()) { if (m_state.needs_scheduling.load()) {
// Disable interrupts, and then check again if rescheduling is needed. // Disable interrupts, and then check again if rescheduling is needed.
// KScopedInterruptDisable intr_disable; // KScopedInterruptDisable intr_disable;
kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
} }
} }
void KScheduler::RescheduleCurrentCoreImpl() { void KScheduler::RescheduleCurrentCoreImpl() {
// Check that scheduling is needed. // Check that scheduling is needed.
if (m_state.needs_scheduling.load()) [[likely]] { if (m_state.needs_scheduling.load()) [[likely]] {
GetCurrentThread(kernel).DisableDispatch(); GetCurrentThread(m_kernel).DisableDispatch();
Schedule(); Schedule();
GetCurrentThread(kernel).EnableDispatch(); GetCurrentThread(m_kernel).EnableDispatch();
} }
} }
@ -153,14 +153,14 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
// Insert the main thread into the priority queue. // Insert the main thread into the priority queue.
// { // {
// KScopedSchedulerLock lk{kernel}; // KScopedSchedulerLock lk{m_kernel};
// GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel)); // GetPriorityQueue(m_kernel).PushBack(GetCurrentThreadPointer(m_kernel));
// SetSchedulerUpdateNeeded(kernel); // SetSchedulerUpdateNeeded(m_kernel);
// } // }
// Bind interrupt handler. // Bind interrupt handler.
// kernel.GetInterruptManager().BindHandler( // kernel.GetInterruptManager().BindHandler(
// GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, // GetSchedulerInterruptHandler(m_kernel), KInterruptName::Scheduler, m_core_id,
// KInterruptController::PriorityLevel::Scheduler, false, false); // KInterruptController::PriorityLevel::Scheduler, false, false);
// Set the current thread. // Set the current thread.
@ -168,7 +168,7 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
} }
void KScheduler::Activate() { void KScheduler::Activate() {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
// m_state.should_count_idle = KTargetSystem::IsDebugMode(); // m_state.should_count_idle = KTargetSystem::IsDebugMode();
m_is_active = true; m_is_active = true;
@ -176,7 +176,7 @@ void KScheduler::Activate() {
} }
void KScheduler::OnThreadStart() { void KScheduler::OnThreadStart() {
GetCurrentThread(kernel).EnableDispatch(); GetCurrentThread(m_kernel).EnableDispatch();
} }
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
@ -184,7 +184,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
prev_highest_thread != highest_thread) [[likely]] { prev_highest_thread != highest_thread) [[likely]] {
if (prev_highest_thread != nullptr) [[likely]] { if (prev_highest_thread != nullptr) [[likely]] {
IncrementScheduledCount(prev_highest_thread); IncrementScheduledCount(prev_highest_thread);
prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks()); prev_highest_thread->SetLastScheduledTick(m_kernel.System().CoreTiming().GetCPUTicks());
} }
if (m_state.should_count_idle) { if (m_state.should_count_idle) {
if (highest_thread != nullptr) [[likely]] { if (highest_thread != nullptr) [[likely]] {
@ -328,8 +328,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
} }
void KScheduler::SwitchThread(KThread* next_thread) { void KScheduler::SwitchThread(KThread* next_thread) {
KProcess* const cur_process = GetCurrentProcessPointer(kernel); KProcess* const cur_process = GetCurrentProcessPointer(m_kernel);
KThread* const cur_thread = GetCurrentThreadPointer(kernel); KThread* const cur_thread = GetCurrentThreadPointer(m_kernel);
// We never want to schedule a null thread, so use the idle thread if we don't have a next. // We never want to schedule a null thread, so use the idle thread if we don't have a next.
if (next_thread == nullptr) { if (next_thread == nullptr) {
@ -351,7 +351,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
// Update the CPU time tracking variables. // Update the CPU time tracking variables.
const s64 prev_tick = m_last_context_switch_time; const s64 prev_tick = m_last_context_switch_time;
const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks(); const s64 cur_tick = m_kernel.System().CoreTiming().GetCPUTicks();
const s64 tick_diff = cur_tick - prev_tick; const s64 tick_diff = cur_tick - prev_tick;
cur_thread->AddCpuTime(m_core_id, tick_diff); cur_thread->AddCpuTime(m_core_id, tick_diff);
if (cur_process != nullptr) { if (cur_process != nullptr) {
@ -375,7 +375,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
// } // }
// Set the new thread. // Set the new thread.
SetCurrentThread(kernel, next_thread); SetCurrentThread(m_kernel, next_thread);
m_current_thread = next_thread; m_current_thread = next_thread;
// Set the new Thread Local region. // Set the new Thread Local region.
@ -388,7 +388,7 @@ void KScheduler::ScheduleImpl() {
std::atomic_thread_fence(std::memory_order_seq_cst); std::atomic_thread_fence(std::memory_order_seq_cst);
// Load the appropriate thread pointers for scheduling. // Load the appropriate thread pointers for scheduling.
KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; KThread* const cur_thread{GetCurrentThreadPointer(m_kernel)};
KThread* highest_priority_thread{m_state.highest_priority_thread}; KThread* highest_priority_thread{m_state.highest_priority_thread};
// Check whether there are runnable interrupt tasks. // Check whether there are runnable interrupt tasks.
@ -493,7 +493,7 @@ void KScheduler::ScheduleImplFiber() {
} }
void KScheduler::Unload(KThread* thread) { void KScheduler::Unload(KThread* thread) {
auto& cpu_core = kernel.System().ArmInterface(m_core_id); auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
cpu_core.SaveContext(thread->GetContext32()); cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64()); cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified. // Save the TPIDR_EL0 system register in case it was modified.
@ -508,7 +508,7 @@ void KScheduler::Unload(KThread* thread) {
} }
void KScheduler::Reload(KThread* thread) { void KScheduler::Reload(KThread* thread) {
auto& cpu_core = kernel.System().ArmInterface(m_core_id); auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64()); cpu_core.LoadContext(thread->GetContext64());
cpu_core.SetTlsAddress(thread->GetTLSAddress()); cpu_core.SetTlsAddress(thread->GetTLSAddress());
@ -891,7 +891,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) { if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
RescheduleCores(kernel, core_mask); RescheduleCores(m_kernel, core_mask);
} }
} }

View file

@ -149,7 +149,7 @@ private:
KInterruptTaskManager* interrupt_task_manager{nullptr}; KInterruptTaskManager* interrupt_task_manager{nullptr};
}; };
KernelCore& kernel; KernelCore& m_kernel;
SchedulingState m_state; SchedulingState m_state;
bool m_is_active{false}; bool m_is_active{false};
s32 m_core_id{0}; s32 m_core_id{0};

View file

@ -12,7 +12,7 @@
namespace Kernel { namespace Kernel {
KServerPort::KServerPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
KServerPort::~KServerPort() = default; KServerPort::~KServerPort() = default;
void KServerPort::Initialize(KPort* parent) { void KServerPort::Initialize(KPort* parent) {
@ -35,7 +35,7 @@ void KServerPort::CleanupSessions() {
// Get the last session in the list // Get the last session in the list
KServerSession* session = nullptr; KServerSession* session = nullptr;
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (!m_session_list.empty()) { if (!m_session_list.empty()) {
session = std::addressof(m_session_list.front()); session = std::addressof(m_session_list.front());
m_session_list.pop_front(); m_session_list.pop_front();
@ -74,7 +74,7 @@ bool KServerPort::IsSignaled() const {
void KServerPort::EnqueueSession(KServerSession* session) { void KServerPort::EnqueueSession(KServerSession* session) {
ASSERT(!this->IsLight()); ASSERT(!this->IsLight());
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Add the session to our queue. // Add the session to our queue.
m_session_list.push_back(*session); m_session_list.push_back(*session);
@ -86,7 +86,7 @@ void KServerPort::EnqueueSession(KServerSession* session) {
KServerSession* KServerPort::AcceptSession() { KServerSession* KServerPort::AcceptSession() {
ASSERT(!this->IsLight()); ASSERT(!this->IsLight());
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Return the first session in the list. // Return the first session in the list.
if (m_session_list.empty()) { if (m_session_list.empty()) {

View file

@ -22,7 +22,7 @@ class KServerPort final : public KSynchronizationObject {
KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject); KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
public: public:
explicit KServerPort(KernelCore& kernel_); explicit KServerPort(KernelCore& kernel);
~KServerPort() override; ~KServerPort() override;
void Initialize(KPort* parent); void Initialize(KPort* parent);

View file

@ -28,8 +28,8 @@ namespace Kernel {
using ThreadQueueImplForKServerSessionRequest = KThreadQueue; using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
KServerSession::KServerSession(KernelCore& kernel_) KServerSession::KServerSession(KernelCore& kernel)
: KSynchronizationObject{kernel_}, m_lock{kernel_} {} : KSynchronizationObject{kernel}, m_lock{m_kernel} {}
KServerSession::~KServerSession() = default; KServerSession::~KServerSession() = default;
@ -56,7 +56,7 @@ void KServerSession::OnClientClosed() {
// Get the next request. // Get the next request.
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (m_current_request != nullptr && m_current_request != prev_request) { if (m_current_request != nullptr && m_current_request != prev_request) {
// Set the request, open a reference as we process it. // Set the request, open a reference as we process it.
@ -135,7 +135,7 @@ void KServerSession::OnClientClosed() {
} }
bool KServerSession::IsSignaled() const { bool KServerSession::IsSignaled() const {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// If the client is closed, we're always signaled. // If the client is closed, we're always signaled.
if (m_parent->IsClientClosed()) { if (m_parent->IsClientClosed()) {
@ -148,17 +148,17 @@ bool KServerSession::IsSignaled() const {
Result KServerSession::OnRequest(KSessionRequest* request) { Result KServerSession::OnRequest(KSessionRequest* request) {
// Create the wait queue. // Create the wait queue.
ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel};
{ {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Ensure that we can handle new requests. // Ensure that we can handle new requests.
R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed); R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed);
// Check that we're not terminating. // Check that we're not terminating.
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
// Get whether we're empty. // Get whether we're empty.
const bool was_empty = m_request_list.empty(); const bool was_empty = m_request_list.empty();
@ -176,11 +176,11 @@ Result KServerSession::OnRequest(KSessionRequest* request) {
R_SUCCEED_IF(request->GetEvent() != nullptr); R_SUCCEED_IF(request->GetEvent() != nullptr);
// This is a synchronous request, so we should wait for our request to complete. // This is a synchronous request, so we should wait for our request to complete.
GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
GetCurrentThread(kernel).BeginWait(&wait_queue); GetCurrentThread(m_kernel).BeginWait(&wait_queue);
} }
return GetCurrentThread(kernel).GetWaitResult(); return GetCurrentThread(m_kernel).GetWaitResult();
} }
Result KServerSession::SendReply(bool is_hle) { Result KServerSession::SendReply(bool is_hle) {
@ -190,7 +190,7 @@ Result KServerSession::SendReply(bool is_hle) {
// Get the request. // Get the request.
KSessionRequest* request; KSessionRequest* request;
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Get the current request. // Get the current request.
request = m_current_request; request = m_current_request;
@ -222,8 +222,8 @@ Result KServerSession::SendReply(bool is_hle) {
// HLE servers write directly to a pointer to the thread command buffer. Therefore // HLE servers write directly to a pointer to the thread command buffer. Therefore
// the reply has already been written in this case. // the reply has already been written in this case.
} else { } else {
Core::Memory::Memory& memory{kernel.System().Memory()}; Core::Memory::Memory& memory{m_kernel.System().Memory()};
KThread* server_thread{GetCurrentThreadPointer(kernel)}; KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
@ -264,7 +264,7 @@ Result KServerSession::SendReply(bool is_hle) {
event->Signal(); event->Signal();
} else { } else {
// End the client thread's wait. // End the client thread's wait.
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (!client_thread->IsTerminationRequested()) { if (!client_thread->IsTerminationRequested()) {
client_thread->EndWait(client_result); client_thread->EndWait(client_result);
@ -285,7 +285,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
KThread* client_thread; KThread* client_thread;
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Ensure that we can service the request. // Ensure that we can service the request.
R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed); R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed);
@ -319,18 +319,18 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
// bool recv_list_broken = false; // bool recv_list_broken = false;
// Receive the message. // Receive the message.
Core::Memory::Memory& memory{kernel.System().Memory()}; Core::Memory::Memory& memory{m_kernel.System().Memory()};
if (out_context != nullptr) { if (out_context != nullptr) {
// HLE request. // HLE request.
u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))}; u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
*out_context = *out_context =
std::make_shared<Service::HLERequestContext>(kernel, memory, this, client_thread); std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread);
(*out_context)->SetSessionRequestManager(manager); (*out_context)->SetSessionRequestManager(manager);
(*out_context) (*out_context)
->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(), ->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(),
cmd_buf); cmd_buf);
} else { } else {
KThread* server_thread{GetCurrentThreadPointer(kernel)}; KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
auto* src_msg_buffer = memory.GetPointer(client_message); auto* src_msg_buffer = memory.GetPointer(client_message);
@ -350,7 +350,7 @@ void KServerSession::CleanupRequests() {
// Get the next request. // Get the next request.
KSessionRequest* request = nullptr; KSessionRequest* request = nullptr;
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (m_current_request) { if (m_current_request) {
// Choose the current request if we have one. // Choose the current request if we have one.
@ -401,7 +401,7 @@ void KServerSession::CleanupRequests() {
event->Signal(); event->Signal();
} else { } else {
// End the client thread's wait. // End the client thread's wait.
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
if (!client_thread->IsTerminationRequested()) { if (!client_thread->IsTerminationRequested()) {
client_thread->EndWait(ResultSessionClosed); client_thread->EndWait(ResultSessionClosed);

View file

@ -33,7 +33,7 @@ class KServerSession final : public KSynchronizationObject,
friend class ServiceThread; friend class ServiceThread;
public: public:
explicit KServerSession(KernelCore& kernel_); explicit KServerSession(KernelCore& kernel);
~KServerSession() override; ~KServerSession() override;
void Destroy() override; void Destroy() override;

View file

@ -9,8 +9,8 @@
namespace Kernel { namespace Kernel {
KSession::KSession(KernelCore& kernel_) KSession::KSession(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_server{kernel_}, m_client{kernel_} {} : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
KSession::~KSession() = default; KSession::~KSession() = default;
void KSession::Initialize(KClientPort* client_port, uintptr_t name) { void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
@ -34,7 +34,7 @@ void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
// Set our owner process. // Set our owner process.
//! FIXME: this is the wrong process! //! FIXME: this is the wrong process!
m_process = kernel.ApplicationProcess(); m_process = m_kernel.ApplicationProcess();
m_process->Open(); m_process->Open();
// Set our port. // Set our port.

View file

@ -18,7 +18,7 @@ class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAut
KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
public: public:
explicit KSession(KernelCore& kernel_); explicit KSession(KernelCore& kernel);
~KSession() override; ~KSession() override;
void Initialize(KClientPort* port, uintptr_t name); void Initialize(KClientPort* port, uintptr_t name);

View file

@ -158,7 +158,7 @@ public:
}; };
public: public:
explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {} explicit KSessionRequest(KernelCore& kernel) : KAutoObject(kernel), m_mappings(kernel) {}
static KSessionRequest* Create(KernelCore& kernel) { static KSessionRequest* Create(KernelCore& kernel) {
KSessionRequest* req = KSessionRequest::Allocate(kernel); KSessionRequest* req = KSessionRequest::Allocate(kernel);
@ -170,13 +170,13 @@ public:
void Destroy() override { void Destroy() override {
this->Finalize(); this->Finalize();
KSessionRequest::Free(kernel, this); KSessionRequest::Free(m_kernel, this);
} }
void Initialize(KEvent* event, uintptr_t address, size_t size) { void Initialize(KEvent* event, uintptr_t address, size_t size) {
m_mappings.Initialize(); m_mappings.Initialize();
m_thread = GetCurrentThreadPointer(kernel); m_thread = GetCurrentThreadPointer(m_kernel);
m_event = event; m_event = event;
m_address = address; m_address = address;
m_size = size; m_size = size;

View file

@ -12,7 +12,7 @@
namespace Kernel { namespace Kernel {
KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
KSharedMemory::~KSharedMemory() = default; KSharedMemory::~KSharedMemory() = default;
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process, Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process,
@ -28,7 +28,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* ow
const size_t num_pages = Common::DivideUp(size, PageSize); const size_t num_pages = Common::DivideUp(size, PageSize);
// Get the resource limit. // Get the resource limit.
KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); KResourceLimit* reslimit = m_kernel.GetSystemResourceLimit();
// Reserve memory for ourselves. // Reserve memory for ourselves.
KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax, KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax,
@ -40,11 +40,11 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* ow
//! HACK: Open continuous mapping from sysmodule pool. //! HACK: Open continuous mapping from sysmodule pool.
auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure, auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure,
KMemoryManager::Direction::FromBack); KMemoryManager::Direction::FromBack);
m_physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option); m_physical_address = m_kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
R_UNLESS(m_physical_address != 0, ResultOutOfMemory); R_UNLESS(m_physical_address != 0, ResultOutOfMemory);
//! Insert the result into our page group. //! Insert the result into our page group.
m_page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); m_page_group.emplace(m_kernel, &m_kernel.GetSystemSystemResource().GetBlockInfoManager());
m_page_group->AddBlock(m_physical_address, num_pages); m_page_group->AddBlock(m_physical_address, num_pages);
// Commit our reservation. // Commit our reservation.

View file

@ -23,7 +23,7 @@ class KSharedMemory final
KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
public: public:
explicit KSharedMemory(KernelCore& kernel_); explicit KSharedMemory(KernelCore& kernel);
~KSharedMemory() override; ~KSharedMemory() override;
Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,

View file

@ -17,9 +17,9 @@ namespace {
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait { class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
public: public:
ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o, ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel, KSynchronizationObject** o,
KSynchronizationObject::ThreadListNode* n, s32 c) KSynchronizationObject::ThreadListNode* n, s32 c)
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {} : KThreadQueueWithoutEndWait(kernel), m_objects(o), m_nodes(n), m_count(c) {}
void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
Result wait_result) override { Result wait_result) override {
@ -144,13 +144,12 @@ Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
R_RETURN(thread->GetWaitResult()); R_RETURN(thread->GetWaitResult());
} }
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {}
: KAutoObjectWithList{kernel_} {}
KSynchronizationObject::~KSynchronizationObject() = default; KSynchronizationObject::~KSynchronizationObject() = default;
void KSynchronizationObject::NotifyAvailable(Result result) { void KSynchronizationObject::NotifyAvailable(Result result) {
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(m_kernel);
// If we're not signaled, we've nothing to notify. // If we're not signaled, we've nothing to notify.
if (!this->IsSignaled()) { if (!this->IsSignaled()) {
@ -168,7 +167,7 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co
// If debugging, dump the list of waiters. // If debugging, dump the list of waiters.
{ {
KScopedSchedulerLock lock(kernel); KScopedSchedulerLock lock(m_kernel);
for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
threads.emplace_back(cur_node->thread); threads.emplace_back(cur_node->thread);
} }

View file

@ -21,7 +21,7 @@ class KSystemResource : public KAutoObject {
KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject);
public: public:
explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} explicit KSystemResource(KernelCore& kernel) : KAutoObject(kernel) {}
protected: protected:
void SetSecureResource() { void SetSecureResource() {
@ -87,8 +87,8 @@ private:
class KSecureSystemResource final class KSecureSystemResource final
: public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> {
public: public:
explicit KSecureSystemResource(KernelCore& kernel_) explicit KSecureSystemResource(KernelCore& kernel)
: KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel) {
// Mark ourselves as being a secure resource. // Mark ourselves as being a secure resource.
this->SetSecureResource(); this->SetSecureResource();
} }

View file

@ -77,14 +77,14 @@ struct ThreadLocalRegion {
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
public: public:
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_) explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel)
: KThreadQueueWithoutEndWait(kernel_) {} : KThreadQueueWithoutEndWait(kernel) {}
}; };
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue { class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
public: public:
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl) explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl)
: KThreadQueue(kernel_), m_wait_list(wl) {} : KThreadQueue(kernel), m_wait_list(wl) {}
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
// Remove the thread from the wait list. // Remove the thread from the wait list.
@ -100,8 +100,8 @@ private:
} // namespace } // namespace
KThread::KThread(KernelCore& kernel_) KThread::KThread(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {}
KThread::~KThread() = default; KThread::~KThread() = default;
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
@ -236,7 +236,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
SetInExceptionHandler(); SetInExceptionHandler();
// Set thread ID. // Set thread ID.
thread_id = kernel.CreateNewThreadID(); thread_id = m_kernel.CreateNewThreadID();
// We initialized! // We initialized!
initialized = true; initialized = true;
@ -343,7 +343,7 @@ void KThread::Finalize() {
// Release any waiters. // Release any waiters.
{ {
ASSERT(waiting_lock_info == nullptr); ASSERT(waiting_lock_info == nullptr);
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Check that we have no kernel waiters. // Check that we have no kernel waiters.
ASSERT(num_kernel_waiters == 0); ASSERT(num_kernel_waiters == 0);
@ -374,7 +374,7 @@ void KThread::Finalize() {
it = held_lock_info_list.erase(it); it = held_lock_info_list.erase(it);
// Free the lock info. // Free the lock info.
LockWithPriorityInheritanceInfo::Free(kernel, lock_info); LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
} }
} }
@ -390,7 +390,7 @@ bool KThread::IsSignaled() const {
} }
void KThread::OnTimer() { void KThread::OnTimer() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// If we're waiting, cancel the wait. // If we're waiting, cancel the wait.
if (GetState() == ThreadState::Waiting) { if (GetState() == ThreadState::Waiting) {
@ -399,12 +399,12 @@ void KThread::OnTimer() {
} }
void KThread::StartTermination() { void KThread::StartTermination() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// Release user exception and unpin, if relevant. // Release user exception and unpin, if relevant.
if (parent != nullptr) { if (parent != nullptr) {
parent->ReleaseUserException(this); parent->ReleaseUserException(this);
if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) { if (parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
parent->UnpinCurrentThread(core_id); parent->UnpinCurrentThread(core_id);
} }
} }
@ -422,7 +422,7 @@ void KThread::StartTermination() {
KSynchronizationObject::NotifyAvailable(); KSynchronizationObject::NotifyAvailable();
// Clear previous thread in KScheduler. // Clear previous thread in KScheduler.
KScheduler::ClearPreviousThread(kernel, this); KScheduler::ClearPreviousThread(m_kernel, this);
// Register terminated dpc flag. // Register terminated dpc flag.
RegisterDpc(DpcFlag::Terminated); RegisterDpc(DpcFlag::Terminated);
@ -434,7 +434,7 @@ void KThread::FinishTermination() {
for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
KThread* core_thread{}; KThread* core_thread{};
do { do {
core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread(); core_thread = m_kernel.Scheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this); } while (core_thread == this);
} }
} }
@ -449,7 +449,7 @@ void KThread::DoWorkerTaskImpl() {
} }
void KThread::Pin(s32 current_core) { void KThread::Pin(s32 current_core) {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// Set ourselves as pinned. // Set ourselves as pinned.
GetStackParameters().is_pinned = true; GetStackParameters().is_pinned = true;
@ -472,7 +472,7 @@ void KThread::Pin(s32 current_core) {
if (active_core != current_core || physical_affinity_mask.GetAffinityMask() != if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
original_physical_affinity_mask.GetAffinityMask()) { original_physical_affinity_mask.GetAffinityMask()) {
KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask, KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, original_physical_affinity_mask,
active_core); active_core);
} }
} }
@ -492,7 +492,7 @@ void KThread::Pin(s32 current_core) {
} }
void KThread::Unpin() { void KThread::Unpin() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// Set ourselves as unpinned. // Set ourselves as unpinned.
GetStackParameters().is_pinned = false; GetStackParameters().is_pinned = false;
@ -520,7 +520,7 @@ void KThread::Unpin() {
std::countl_zero(physical_affinity_mask.GetAffinityMask()))); std::countl_zero(physical_affinity_mask.GetAffinityMask())));
} }
} }
KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
} }
} }
@ -549,7 +549,7 @@ u16 KThread::GetUserDisableCount() const {
return {}; return {};
} }
auto& memory = kernel.System().Memory(); auto& memory = m_kernel.System().Memory();
return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count)); return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count));
} }
@ -559,7 +559,7 @@ void KThread::SetInterruptFlag() {
return; return;
} }
auto& memory = kernel.System().Memory(); auto& memory = m_kernel.System().Memory();
memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
} }
@ -569,12 +569,12 @@ void KThread::ClearInterruptFlag() {
return; return;
} }
auto& memory = kernel.System().Memory(); auto& memory = m_kernel.System().Memory();
memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
} }
Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Get the virtual mask. // Get the virtual mask.
*out_ideal_core = virtual_ideal_core_id; *out_ideal_core = virtual_ideal_core_id;
@ -584,7 +584,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
} }
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
ASSERT(num_core_migration_disables >= 0); ASSERT(num_core_migration_disables >= 0);
// Select between core mask and original core mask. // Select between core mask and original core mask.
@ -607,7 +607,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
// Set the core mask. // Set the core mask.
u64 p_affinity_mask = 0; u64 p_affinity_mask = 0;
{ {
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(m_kernel);
ASSERT(num_core_migration_disables >= 0); ASSERT(num_core_migration_disables >= 0);
// If we're updating, set our ideal virtual core. // If we're updating, set our ideal virtual core.
@ -653,7 +653,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
std::countl_zero(physical_affinity_mask.GetAffinityMask())); std::countl_zero(physical_affinity_mask.GetAffinityMask()));
SetActiveCore(new_core); SetActiveCore(new_core);
} }
KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
} }
} else { } else {
// Otherwise, we edit the original affinity for restoration later. // Otherwise, we edit the original affinity for restoration later.
@ -663,12 +663,12 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
} }
// Update the pinned waiter list. // Update the pinned waiter list.
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list)); ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel, std::addressof(pinned_waiter_list));
{ {
bool retry_update{}; bool retry_update{};
do { do {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(m_kernel);
// Don't do any further management if our termination has been requested. // Don't do any further management if our termination has been requested.
R_SUCCEED_IF(IsTerminationRequested()); R_SUCCEED_IF(IsTerminationRequested());
@ -681,7 +681,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
s32 thread_core; s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
++thread_core) { ++thread_core) {
if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) { if (m_kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true; thread_is_current = true;
break; break;
} }
@ -693,12 +693,12 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
// If the thread is pinned, we want to wait until it's not pinned. // If the thread is pinned, we want to wait until it's not pinned.
if (GetStackParameters().is_pinned) { if (GetStackParameters().is_pinned) {
// Verify that the current thread isn't terminating. // Verify that the current thread isn't terminating.
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
ResultTerminationRequested); ResultTerminationRequested);
// Wait until the thread isn't pinned any more. // Wait until the thread isn't pinned any more.
pinned_waiter_list.push_back(GetCurrentThread(kernel)); pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_));
} else { } else {
// If the thread isn't pinned, release the scheduler lock and retry until it's // If the thread isn't pinned, release the scheduler lock and retry until it's
// not current. // not current.
@ -714,13 +714,13 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
void KThread::SetBasePriority(s32 value) { void KThread::SetBasePriority(s32 value) {
ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority); ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Change our base priority. // Change our base priority.
base_priority = value; base_priority = value;
// Perform a priority restoration. // Perform a priority restoration.
RestorePriority(kernel, this); RestorePriority(m_kernel, this);
} }
KThread* KThread::GetLockOwner() const { KThread* KThread::GetLockOwner() const {
@ -729,7 +729,7 @@ KThread* KThread::GetLockOwner() const {
void KThread::IncreaseBasePriority(s32 priority_) { void KThread::IncreaseBasePriority(s32 priority_) {
ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority); ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority);
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
ASSERT(!this->GetStackParameters().is_pinned); ASSERT(!this->GetStackParameters().is_pinned);
// Set our base priority. // Set our base priority.
@ -737,12 +737,12 @@ void KThread::IncreaseBasePriority(s32 priority_) {
base_priority = priority_; base_priority = priority_;
// Perform a priority restoration. // Perform a priority restoration.
RestorePriority(kernel, this); RestorePriority(m_kernel, this);
} }
} }
void KThread::RequestSuspend(SuspendType type) { void KThread::RequestSuspend(SuspendType type) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Note the request in our flags. // Note the request in our flags.
suspend_request_flags |= suspend_request_flags |=
@ -753,7 +753,7 @@ void KThread::RequestSuspend(SuspendType type) {
} }
void KThread::Resume(SuspendType type) { void KThread::Resume(SuspendType type) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Clear the request in our flags. // Clear the request in our flags.
suspend_request_flags &= suspend_request_flags &=
@ -764,7 +764,7 @@ void KThread::Resume(SuspendType type) {
} }
void KThread::WaitCancel() { void KThread::WaitCancel() {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Check if we're waiting and cancellable. // Check if we're waiting and cancellable.
if (this->GetState() == ThreadState::Waiting && cancellable) { if (this->GetState() == ThreadState::Waiting && cancellable) {
@ -777,7 +777,7 @@ void KThread::WaitCancel() {
} }
void KThread::TrySuspend() { void KThread::TrySuspend() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
ASSERT(IsSuspendRequested()); ASSERT(IsSuspendRequested());
// Ensure that we have no waiters. // Ensure that we have no waiters.
@ -791,7 +791,7 @@ void KThread::TrySuspend() {
} }
void KThread::UpdateState() { void KThread::UpdateState() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// Set our suspend flags in state. // Set our suspend flags in state.
const ThreadState old_state = thread_state.load(std::memory_order_relaxed); const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
@ -801,37 +801,37 @@ void KThread::UpdateState() {
// Note the state change in scheduler. // Note the state change in scheduler.
if (new_state != old_state) { if (new_state != old_state) {
KScheduler::OnThreadStateChanged(kernel, this, old_state); KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
} }
} }
void KThread::Continue() { void KThread::Continue() {
ASSERT(kernel.GlobalSchedulerContext().IsLocked()); ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
// Clear our suspend flags in state. // Clear our suspend flags in state.
const ThreadState old_state = thread_state.load(std::memory_order_relaxed); const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
// Note the state change in scheduler. // Note the state change in scheduler.
KScheduler::OnThreadStateChanged(kernel, this, old_state); KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
} }
void KThread::CloneFpuStatus() { void KThread::CloneFpuStatus() {
// We shouldn't reach here when starting kernel threads. // We shouldn't reach here when starting kernel threads.
ASSERT(this->GetOwnerProcess() != nullptr); ASSERT(this->GetOwnerProcess() != nullptr);
ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(kernel)); ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
if (this->GetOwnerProcess()->Is64BitProcess()) { if (this->GetOwnerProcess()->Is64BitProcess()) {
// Clone FPSR and FPCR. // Clone FPSR and FPCR.
ThreadContext64 cur_ctx{}; ThreadContext64 cur_ctx{};
kernel.System().CurrentArmInterface().SaveContext(cur_ctx); m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
this->GetContext64().fpcr = cur_ctx.fpcr; this->GetContext64().fpcr = cur_ctx.fpcr;
this->GetContext64().fpsr = cur_ctx.fpsr; this->GetContext64().fpsr = cur_ctx.fpsr;
} else { } else {
// Clone FPSCR. // Clone FPSCR.
ThreadContext32 cur_ctx{}; ThreadContext32 cur_ctx{};
kernel.System().CurrentArmInterface().SaveContext(cur_ctx); m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
this->GetContext32().fpscr = cur_ctx.fpscr; this->GetContext32().fpscr = cur_ctx.fpscr;
} }
@ -844,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Set the activity. // Set the activity.
{ {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(m_kernel);
// Verify our state. // Verify our state.
const auto cur_state = this->GetState(); const auto cur_state = this->GetState();
@ -871,13 +871,13 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
// If the thread is now paused, update the pinned waiter list. // If the thread is now paused, update the pinned waiter list.
if (activity == Svc::ThreadActivity::Paused) { if (activity == Svc::ThreadActivity::Paused) {
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel,
std::addressof(pinned_waiter_list)); std::addressof(pinned_waiter_list));
bool thread_is_current; bool thread_is_current;
do { do {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(m_kernel);
// Don't do any further management if our termination has been requested. // Don't do any further management if our termination has been requested.
R_SUCCEED_IF(this->IsTerminationRequested()); R_SUCCEED_IF(this->IsTerminationRequested());
@ -888,17 +888,17 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
// Check whether the thread is pinned. // Check whether the thread is pinned.
if (this->GetStackParameters().is_pinned) { if (this->GetStackParameters().is_pinned) {
// Verify that the current thread isn't terminating. // Verify that the current thread isn't terminating.
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
ResultTerminationRequested); ResultTerminationRequested);
// Wait until the thread isn't pinned any more. // Wait until the thread isn't pinned any more.
pinned_waiter_list.push_back(GetCurrentThread(kernel)); pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_));
} else { } else {
// Check if the thread is currently running. // Check if the thread is currently running.
// If it is, we'll need to retry. // If it is, we'll need to retry.
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) { if (m_kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true; thread_is_current = true;
break; break;
} }
@ -917,7 +917,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
// Get the context. // Get the context.
{ {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Verify that we're suspended. // Verify that we're suspended.
R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
@ -946,7 +946,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
} }
void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Set ourselves as the lock's owner. // Set ourselves as the lock's owner.
lock_info->SetOwner(this); lock_info->SetOwner(this);
@ -957,7 +957,7 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_, KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_,
bool is_kernel_address_key_) { bool is_kernel_address_key_) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Try to find an existing held lock. // Try to find an existing held lock.
for (auto& held_lock : held_lock_info_list) { for (auto& held_lock : held_lock_info_list) {
@ -971,7 +971,7 @@ KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_ke
} }
void KThread::AddWaiterImpl(KThread* thread) { void KThread::AddWaiterImpl(KThread* thread) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
ASSERT(thread->GetConditionVariableTree() == nullptr); ASSERT(thread->GetConditionVariableTree() == nullptr);
// Get the thread's address key. // Get the thread's address key.
@ -981,7 +981,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
// Keep track of how many kernel waiters we have. // Keep track of how many kernel waiters we have.
if (is_kernel_address_key_) { if (is_kernel_address_key_) {
ASSERT((num_kernel_waiters++) >= 0); ASSERT((num_kernel_waiters++) >= 0);
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(m_kernel);
} }
// Get the relevant lock info. // Get the relevant lock info.
@ -989,7 +989,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
if (lock_info == nullptr) { if (lock_info == nullptr) {
// Create a new lock for the address key. // Create a new lock for the address key.
lock_info = lock_info =
LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_); LockWithPriorityInheritanceInfo::Create(m_kernel, address_key_, is_kernel_address_key_);
// Add the new lock to our list. // Add the new lock to our list.
this->AddHeldLock(lock_info); this->AddHeldLock(lock_info);
@ -1000,12 +1000,12 @@ void KThread::AddWaiterImpl(KThread* thread) {
} }
void KThread::RemoveWaiterImpl(KThread* thread) { void KThread::RemoveWaiterImpl(KThread* thread) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Keep track of how many kernel waiters we have. // Keep track of how many kernel waiters we have.
if (thread->GetIsKernelAddressKey()) { if (thread->GetIsKernelAddressKey()) {
ASSERT((num_kernel_waiters--) > 0); ASSERT((num_kernel_waiters--) > 0);
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(m_kernel);
} }
// Get the info for the lock the thread is waiting on. // Get the info for the lock the thread is waiting on.
@ -1015,7 +1015,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
// Remove the waiter. // Remove the waiter.
if (lock_info->RemoveWaiter(thread)) { if (lock_info->RemoveWaiter(thread)) {
held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info));
LockWithPriorityInheritanceInfo::Free(kernel, lock_info); LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
} }
} }
@ -1076,7 +1076,7 @@ void KThread::AddWaiter(KThread* thread) {
// If the thread has a higher priority than us, we should inherit. // If the thread has a higher priority than us, we should inherit.
if (thread->GetPriority() < this->GetPriority()) { if (thread->GetPriority() < this->GetPriority()) {
RestorePriority(kernel, this); RestorePriority(m_kernel, this);
} }
} }
@ -1087,12 +1087,12 @@ void KThread::RemoveWaiter(KThread* thread) {
// lower priority. // lower priority.
if (this->GetPriority() == thread->GetPriority() && if (this->GetPriority() == thread->GetPriority() &&
this->GetPriority() < this->GetBasePriority()) { this->GetPriority() < this->GetBasePriority()) {
RestorePriority(kernel, this); RestorePriority(m_kernel, this);
} }
} }
KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) { KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
// Get the relevant lock info. // Get the relevant lock info.
auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_); auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_);
@ -1108,7 +1108,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
if (lock_info->GetIsKernelAddressKey()) { if (lock_info->GetIsKernelAddressKey()) {
num_kernel_waiters -= lock_info->GetWaiterCount(); num_kernel_waiters -= lock_info->GetWaiterCount();
ASSERT(num_kernel_waiters >= 0); ASSERT(num_kernel_waiters >= 0);
KScheduler::SetSchedulerUpdateNeeded(kernel); KScheduler::SetSchedulerUpdateNeeded(m_kernel);
} }
ASSERT(lock_info->GetWaiterCount() > 0); ASSERT(lock_info->GetWaiterCount() > 0);
@ -1120,7 +1120,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
*out_has_waiters = false; *out_has_waiters = false;
// Free the lock info, since it has no waiters. // Free the lock info, since it has no waiters.
LockWithPriorityInheritanceInfo::Free(kernel, lock_info); LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
} else { } else {
// There are additional waiters on the lock. // There are additional waiters on the lock.
*out_has_waiters = true; *out_has_waiters = true;
@ -1142,7 +1142,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
// to lower priority. // to lower priority.
if (this->GetPriority() == next_lock_owner->GetPriority() && if (this->GetPriority() == next_lock_owner->GetPriority() &&
this->GetPriority() < this->GetBasePriority()) { this->GetPriority() < this->GetBasePriority()) {
RestorePriority(kernel, this); RestorePriority(m_kernel, this);
// NOTE: No need to restore priority on the next lock owner, because it was already the // NOTE: No need to restore priority on the next lock owner, because it was already the
// highest priority waiter on the lock. // highest priority waiter on the lock.
} }
@ -1153,18 +1153,18 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
Result KThread::Run() { Result KThread::Run() {
while (true) { while (true) {
KScopedSchedulerLock lk{kernel}; KScopedSchedulerLock lk{m_kernel};
// If either this thread or the current thread are requesting termination, note it. // If either this thread or the current thread are requesting termination, note it.
R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested); R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested);
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
// Ensure our thread state is correct. // Ensure our thread state is correct.
R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState); R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState);
// If the current thread has been asked to suspend, suspend it and retry. // If the current thread has been asked to suspend, suspend it and retry.
if (GetCurrentThread(kernel).IsSuspended()) { if (GetCurrentThread(m_kernel).IsSuspended()) {
GetCurrentThread(kernel).UpdateState(); GetCurrentThread(m_kernel).UpdateState();
continue; continue;
} }
@ -1184,7 +1184,7 @@ Result KThread::Run() {
} }
void KThread::Exit() { void KThread::Exit() {
ASSERT(this == GetCurrentThreadPointer(kernel)); ASSERT(this == GetCurrentThreadPointer(m_kernel));
// Release the thread resource hint, running thread count from parent. // Release the thread resource hint, running thread count from parent.
if (parent != nullptr) { if (parent != nullptr) {
@ -1195,7 +1195,7 @@ void KThread::Exit() {
// Perform termination. // Perform termination.
{ {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Disallow all suspension. // Disallow all suspension.
suspend_allowed_flags = 0; suspend_allowed_flags = 0;
@ -1208,21 +1208,21 @@ void KThread::Exit() {
StartTermination(); StartTermination();
// Register the thread as a work task. // Register the thread as a work task.
KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); KWorkerTaskManager::AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
} }
UNREACHABLE_MSG("KThread::Exit() would return"); UNREACHABLE_MSG("KThread::Exit() would return");
} }
Result KThread::Terminate() { Result KThread::Terminate() {
ASSERT(this != GetCurrentThreadPointer(kernel)); ASSERT(this != GetCurrentThreadPointer(m_kernel));
// Request the thread terminate if it hasn't already. // Request the thread terminate if it hasn't already.
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
// If the thread isn't terminated, wait for it to terminate. // If the thread isn't terminated, wait for it to terminate.
s32 index; s32 index;
KSynchronizationObject* objects[] = {this}; KSynchronizationObject* objects[] = {this};
R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, R_TRY(KSynchronizationObject::Wait(m_kernel, std::addressof(index), objects, 1,
Svc::WaitInfinite)); Svc::WaitInfinite));
} }
@ -1230,9 +1230,9 @@ Result KThread::Terminate() {
} }
ThreadState KThread::RequestTerminate() { ThreadState KThread::RequestTerminate() {
ASSERT(this != GetCurrentThreadPointer(kernel)); ASSERT(this != GetCurrentThreadPointer(m_kernel));
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Determine if this is the first termination request. // Determine if this is the first termination request.
const bool first_request = [&]() -> bool { const bool first_request = [&]() -> bool {
@ -1268,10 +1268,10 @@ ThreadState KThread::RequestTerminate() {
// If the thread is runnable, send a termination interrupt to other cores. // If the thread is runnable, send a termination interrupt to other cores.
if (this->GetState() == ThreadState::Runnable) { if (this->GetState() == ThreadState::Runnable) {
if (const u64 core_mask = if (const u64 core_mask = physical_affinity_mask.GetAffinityMask() &
physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); ~(1ULL << GetCurrentCoreId(m_kernel));
core_mask != 0) { core_mask != 0) {
Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask);
} }
} }
@ -1285,15 +1285,15 @@ ThreadState KThread::RequestTerminate() {
} }
Result KThread::Sleep(s64 timeout) { Result KThread::Sleep(s64 timeout) {
ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); ASSERT(!m_kernel.GlobalSchedulerContext().IsLocked());
ASSERT(this == GetCurrentThreadPointer(kernel)); ASSERT(this == GetCurrentThreadPointer(m_kernel));
ASSERT(timeout > 0); ASSERT(timeout > 0);
ThreadQueueImplForKThreadSleep wait_queue_(kernel); ThreadQueueImplForKThreadSleep wait_queue_(m_kernel);
KHardwareTimer* timer{}; KHardwareTimer* timer{};
{ {
// Setup the scheduling lock and sleep. // Setup the scheduling lock and sleep.
KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), this, timeout); KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), this, timeout);
// Check if the thread should terminate. // Check if the thread should terminate.
if (this->IsTerminationRequested()) { if (this->IsTerminationRequested()) {
@ -1311,7 +1311,7 @@ Result KThread::Sleep(s64 timeout) {
} }
void KThread::RequestDummyThreadWait() { void KThread::RequestDummyThreadWait() {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
ASSERT(this->IsDummyThread()); ASSERT(this->IsDummyThread());
// We will block when the scheduler lock is released. // We will block when the scheduler lock is released.
@ -1319,7 +1319,7 @@ void KThread::RequestDummyThreadWait() {
} }
void KThread::DummyThreadBeginWait() { void KThread::DummyThreadBeginWait() {
if (!this->IsDummyThread() || kernel.IsPhantomModeForSingleCore()) { if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) {
// Occurs in single core mode. // Occurs in single core mode.
return; return;
} }
@ -1329,7 +1329,7 @@ void KThread::DummyThreadBeginWait() {
} }
void KThread::DummyThreadEndWait() { void KThread::DummyThreadEndWait() {
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
ASSERT(this->IsDummyThread()); ASSERT(this->IsDummyThread());
// Wake up the waiting thread. // Wake up the waiting thread.
@ -1347,7 +1347,7 @@ void KThread::BeginWait(KThreadQueue* queue) {
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) { void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(m_kernel);
// If we're waiting, notify our queue that we're available. // If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) { if (GetState() == ThreadState::Waiting) {
@ -1357,7 +1357,7 @@ void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wa
void KThread::EndWait(Result wait_result_) { void KThread::EndWait(Result wait_result_) {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(m_kernel);
// If we're waiting, notify our queue that we're available. // If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) { if (GetState() == ThreadState::Waiting) {
@ -1373,7 +1373,7 @@ void KThread::EndWait(Result wait_result_) {
void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) { void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
// Lock the scheduler. // Lock the scheduler.
KScopedSchedulerLock sl(kernel); KScopedSchedulerLock sl(m_kernel);
// If we're waiting, notify our queue that we're available. // If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) { if (GetState() == ThreadState::Waiting) {
@ -1382,7 +1382,7 @@ void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
} }
void KThread::SetState(ThreadState state) { void KThread::SetState(ThreadState state) {
KScopedSchedulerLock sl{kernel}; KScopedSchedulerLock sl{m_kernel};
// Clear debugging state // Clear debugging state
SetMutexWaitAddressForDebugging({}); SetMutexWaitAddressForDebugging({});
@ -1393,7 +1393,7 @@ void KThread::SetState(ThreadState state) {
static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)), static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
std::memory_order_relaxed); std::memory_order_relaxed);
if (thread_state.load(std::memory_order_relaxed) != old_state) { if (thread_state.load(std::memory_order_relaxed) != old_state) {
KScheduler::OnThreadStateChanged(kernel, this, old_state); KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
} }
} }
@ -1427,20 +1427,20 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
KScopedDisableDispatch::~KScopedDisableDispatch() { KScopedDisableDispatch::~KScopedDisableDispatch() {
// If we are shutting down the kernel, none of this is relevant anymore. // If we are shutting down the kernel, none of this is relevant anymore.
if (kernel.IsShuttingDown()) { if (m_kernel.IsShuttingDown()) {
return; return;
} }
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { if (GetCurrentThread(m_kernel).GetDisableDispatchCount() <= 1) {
auto* scheduler = kernel.CurrentScheduler(); auto* scheduler = m_kernel.CurrentScheduler();
if (scheduler && !kernel.IsPhantomModeForSingleCore()) { if (scheduler && !m_kernel.IsPhantomModeForSingleCore()) {
scheduler->RescheduleCurrentCore(); scheduler->RescheduleCurrentCore();
} else { } else {
KScheduler::RescheduleCurrentHLEThread(kernel); KScheduler::RescheduleCurrentHLEThread(m_kernel);
} }
} else { } else {
GetCurrentThread(kernel).EnableDispatch(); GetCurrentThread(m_kernel).EnableDispatch();
} }
} }

View file

@ -128,7 +128,7 @@ public:
static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1; static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2; static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
explicit KThread(KernelCore& kernel_); explicit KThread(KernelCore& kernel);
~KThread() override; ~KThread() override;
public: public:
@ -494,12 +494,12 @@ public:
} }
void DisableDispatch() { void DisableDispatch() {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++; this->GetStackParameters().disable_count++;
} }
void EnableDispatch() { void EnableDispatch() {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--; this->GetStackParameters().disable_count--;
} }
@ -970,9 +970,9 @@ public:
class KScopedDisableDispatch { class KScopedDisableDispatch {
public: public:
[[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} {
// If we are shutting down the kernel, none of this is relevant anymore. // If we are shutting down the kernel, none of this is relevant anymore.
if (kernel.IsShuttingDown()) { if (m_kernel.IsShuttingDown()) {
return; return;
} }
GetCurrentThread(kernel).DisableDispatch(); GetCurrentThread(kernel).DisableDispatch();
@ -981,7 +981,7 @@ public:
~KScopedDisableDispatch(); ~KScopedDisableDispatch();
private: private:
KernelCore& kernel; KernelCore& m_kernel;
}; };
inline void KTimerTask::OnTimer() { inline void KTimerTask::OnTimer() {

View file

@ -31,7 +31,7 @@ private:
class KThreadQueueWithoutEndWait : public KThreadQueue { class KThreadQueueWithoutEndWait : public KThreadQueue {
public: public:
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {} explicit KThreadQueueWithoutEndWait(KernelCore& kernel) : KThreadQueue(kernel) {}
void EndWait(KThread* waiting_thread, Result wait_result) override final; void EndWait(KThread* waiting_thread, Result wait_result) override final;
}; };

View file

@ -8,23 +8,23 @@
namespace Kernel { namespace Kernel {
KTransferMemory::KTransferMemory(KernelCore& kernel_) KTransferMemory::KTransferMemory(KernelCore& kernel)
: KAutoObjectWithSlabHeapAndContainer{kernel_} {} : KAutoObjectWithSlabHeapAndContainer{kernel} {}
KTransferMemory::~KTransferMemory() = default; KTransferMemory::~KTransferMemory() = default;
Result KTransferMemory::Initialize(VAddr address_, std::size_t size_, Result KTransferMemory::Initialize(VAddr address, std::size_t size,
Svc::MemoryPermission owner_perm_) { Svc::MemoryPermission owner_perm) {
// Set members. // Set members.
m_owner = GetCurrentProcessPointer(kernel); m_owner = GetCurrentProcessPointer(m_kernel);
// TODO(bunnei): Lock for transfer memory // TODO(bunnei): Lock for transfer memory
// Set remaining tracking members. // Set remaining tracking members.
m_owner->Open(); m_owner->Open();
m_owner_perm = owner_perm_; m_owner_perm = owner_perm;
m_address = address_; m_address = address;
m_size = size_; m_size = size;
m_is_initialized = true; m_is_initialized = true;
R_SUCCEED(); R_SUCCEED();

View file

@ -23,10 +23,10 @@ class KTransferMemory final
KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
public: public:
explicit KTransferMemory(KernelCore& kernel_); explicit KTransferMemory(KernelCore& kernel);
~KTransferMemory() override; ~KTransferMemory() override;
Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_); Result Initialize(VAddr address, std::size_t size, Svc::MemoryPermission owner_perm);
void Finalize() override; void Finalize() override;

View file

@ -9,7 +9,7 @@ namespace Kernel {
class KWorkerTask : public KSynchronizationObject { class KWorkerTask : public KSynchronizationObject {
public: public:
explicit KWorkerTask(KernelCore& kernel_); explicit KWorkerTask(KernelCore& kernel);
void DoWorkerTask(); void DoWorkerTask();
}; };

View file

@ -10,7 +10,7 @@
namespace Kernel { namespace Kernel {
KWorkerTask::KWorkerTask(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} KWorkerTask::KWorkerTask(KernelCore& kernel) : KSynchronizationObject{kernel} {}
void KWorkerTask::DoWorkerTask() { void KWorkerTask::DoWorkerTask() {
if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) { if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) {

View file

@ -20,7 +20,7 @@ public:
KWorkerTaskManager(); KWorkerTaskManager();
static void AddTask(KernelCore& kernel_, WorkerType type, KWorkerTask* task); static void AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task);
private: private:
void AddTask(KernelCore& kernel, KWorkerTask* task); void AddTask(KernelCore& kernel, KWorkerTask* task);

View file

@ -66,7 +66,7 @@ private:
} }
public: public:
explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} explicit KAutoObjectWithSlabHeap(KernelCore& kernel) : Base(kernel) {}
virtual ~KAutoObjectWithSlabHeap() = default; virtual ~KAutoObjectWithSlabHeap() = default;
virtual void Destroy() override { virtual void Destroy() override {
@ -76,7 +76,7 @@ public:
arg = this->GetPostDestroyArgument(); arg = this->GetPostDestroyArgument();
this->Finalize(); this->Finalize();
} }
Free(kernel, static_cast<Derived*>(this)); Free(Base::m_kernel, static_cast<Derived*>(this));
if (is_initialized) { if (is_initialized) {
Derived::PostDestroy(arg); Derived::PostDestroy(arg);
} }
@ -90,7 +90,7 @@ public:
} }
size_t GetSlabIndex() const { size_t GetSlabIndex() const {
return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
} }
public: public:
@ -125,9 +125,6 @@ public:
static size_t GetNumRemaining(KernelCore& kernel) { static size_t GetNumRemaining(KernelCore& kernel) {
return kernel.SlabHeap<Derived>().GetNumRemaining(); return kernel.SlabHeap<Derived>().GetNumRemaining();
} }
protected:
KernelCore& kernel;
}; };
template <typename Derived, typename Base> template <typename Derived, typename Base>
@ -144,18 +141,18 @@ private:
} }
public: public:
KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_) {} KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel) : Base(kernel) {}
virtual ~KAutoObjectWithSlabHeapAndContainer() {} virtual ~KAutoObjectWithSlabHeapAndContainer() {}
virtual void Destroy() override { virtual void Destroy() override {
const bool is_initialized = this->IsInitialized(); const bool is_initialized = this->IsInitialized();
uintptr_t arg = 0; uintptr_t arg = 0;
if (is_initialized) { if (is_initialized) {
Base::kernel.ObjectListContainer().Unregister(this); Base::m_kernel.ObjectListContainer().Unregister(this);
arg = this->GetPostDestroyArgument(); arg = this->GetPostDestroyArgument();
this->Finalize(); this->Finalize();
} }
Free(Base::kernel, static_cast<Derived*>(this)); Free(Base::m_kernel, static_cast<Derived*>(this));
if (is_initialized) { if (is_initialized) {
Derived::PostDestroy(arg); Derived::PostDestroy(arg);
} }
@ -169,7 +166,7 @@ public:
} }
size_t GetSlabIndex() const { size_t GetSlabIndex() const {
return SlabHeap<Derived>(Base::kernel).GetObjectIndex(static_cast<const Derived*>(this)); return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
} }
public: public: