diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp index b2780cc85..14f054de6 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp @@ -209,6 +209,10 @@ namespace ams::kern::arm64 { this->gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq; } + void EndOfInterrupt(u32 irq) const { + this->gicc->eoir = irq; + } + /* TODO: Implement more KInterruptController functionality. */ public: static constexpr ALWAYS_INLINE bool IsSoftware(s32 id) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 16b0c91fd..a505423c0 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -264,7 +264,12 @@ namespace ams::kern { } private: void Suspend(); + ALWAYS_INLINE void AddWaiterImpl(KThread *thread); + ALWAYS_INLINE void RemoveWaiterImpl(KThread *thread); + ALWAYS_INLINE static void RestorePriority(KThread *thread); public: + constexpr u64 GetId() const { return this->thread_id; } + constexpr KThreadContext *GetContext() { return std::addressof(this->thread_context); } constexpr const KThreadContext *GetContext() const { return std::addressof(this->thread_context); } constexpr const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } @@ -277,6 +282,8 @@ namespace ams::kern { constexpr s32 GetActiveCore() const { return this->core_id; } constexpr void SetActiveCore(s32 core) { this->core_id = core; } constexpr s32 GetPriority() const { return this->priority; } + constexpr void SetPriority(s32 prio) { this->priority = prio; } + constexpr s32 GetBasePriority() const { return this->base_priority; } constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; } constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return this->per_core_priority_queue_entry[core]; } @@ -285,8 +292,21 @@ namespace ams::kern { constexpr const QueueEntry &GetSleepingQueueEntry() const { return this->sleeping_queue_entry; } constexpr void SetSleepingQueue(KThreadQueue *q) { this->sleeping_queue = q; } + constexpr void /* TODO */ *GetConditionVariable() const { return this->cond_var_tree; } + constexpr s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; } + void AddWaiter(KThread *thread); + void RemoveWaiter(KThread *thread); + KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key); + + constexpr KProcessAddress GetAddressKey() const { return this->arbiter_key; } + constexpr void SetAddressKey(KProcessAddress key) { this->arbiter_key = key; } + constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; } + constexpr KThread *GetLockOwner() const { return this->lock_owner; } + + bool HasWaiters() const { return !this->waiter_list.empty(); } + constexpr s64 GetLastScheduledTick() const { return this->last_scheduled_tick; } constexpr void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; } @@ -307,6 +327,13 @@ namespace ams::kern { constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; } void RequestSuspend(SuspendType type); void TrySuspend(); + void Continue(); + + void ContinueIfHasKernelWaiters() { + if (this->GetNumKernelWaiters() > 0) { + this->Continue(); + } + } Result SetPriorityToIdle(); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp index 06f98b2ba..b9631e2cd 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp @@ -48,7 +48,7 @@ namespace ams::kern::arm64 { if (entry.handler != nullptr) { /* Set manual clear needed if relevant. */ if (entry.manually_cleared) { - this->interrupt_controller.Disable(irq); + this->interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low); entry.needs_clear = true; } @@ -78,6 +78,9 @@ namespace ams::kern::arm64 { MESOSPHERE_LOG("Invalid interrupt %d\n", irq); } + /* Acknowledge the interrupt. */ + this->interrupt_controller.EndOfInterrupt(raw_irq); + /* If we found no task, then we don't need to reschedule. */ if (task == nullptr) { return false; diff --git a/libraries/libmesosphere/source/kern_k_light_lock.cpp b/libraries/libmesosphere/source/kern_k_light_lock.cpp index 7e8fdcc66..9f91d67bf 100644 --- a/libraries/libmesosphere/source/kern_k_light_lock.cpp +++ b/libraries/libmesosphere/source/kern_k_light_lock.cpp @@ -17,12 +17,74 @@ namespace ams::kern { - void KLightLock::LockSlowPath(uintptr_t owner, uintptr_t cur_thread) { - MESOSPHERE_TODO_IMPLEMENT(); + void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { + KThread *cur_thread = reinterpret_cast(_cur_thread); + + /* Pend the current thread waiting on the owner thread. */ + { + KScopedSchedulerLock sl; + + /* Ensure we actually have locking to do. */ + if (AMS_UNLIKELY(this->tag.load(std::memory_order_relaxed) != _owner)) { + return; + } + + /* Add the current thread as a waiter on the owner. */ + KThread *owner_thread = reinterpret_cast(_owner & ~1ul); + cur_thread->SetAddressKey(reinterpret_cast(std::addressof(this->tag))); + owner_thread->AddWaiter(cur_thread); + + /* Set thread states. */ + if (AMS_LIKELY(cur_thread->GetState() == KThread::ThreadState_Runnable)) { + cur_thread->SetState(KThread::ThreadState_Waiting); + } + if (owner_thread->IsSuspended()) { + owner_thread->ContinueIfHasKernelWaiters(); + } + } + + /* We're no longer waiting on the lock owner. */ + { + KScopedSchedulerLock sl; + KThread *owner_thread = cur_thread->GetLockOwner(); + if (AMS_UNLIKELY(owner_thread)) { + owner_thread->RemoveWaiter(cur_thread); + } + } } - void KLightLock::UnlockSlowPath(uintptr_t cur_thread) { - MESOSPHERE_TODO_IMPLEMENT(); + void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { + KThread *owner_thread = reinterpret_cast(_cur_thread); + + /* Unlock. */ + { + KScopedSchedulerLock sl; + + /* Get the next owner. */ + s32 num_waiters = 0; + KThread *next_owner = owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast(std::addressof(this->tag))); + + /* Pass the lock to the next owner. */ + uintptr_t next_tag = 0; + if (next_owner) { + next_tag = reinterpret_cast(next_owner); + if (num_waiters > 1) { + next_tag |= 0x1; + } + + if (AMS_LIKELY(next_owner->GetState() == KThread::ThreadState_Waiting)) { + next_owner->SetState(KThread::ThreadState_Runnable); + } + } + + /* We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if so. */ + if (owner_thread->IsSuspended()) { + owner_thread->TrySuspend(); + } + + /* Write the new tag value. */ + this->tag.store(next_tag); + } } } diff --git a/libraries/libmesosphere/source/kern_k_scheduler.cpp b/libraries/libmesosphere/source/kern_k_scheduler.cpp index 50e778c6e..22677e0d9 100644 --- a/libraries/libmesosphere/source/kern_k_scheduler.cpp +++ b/libraries/libmesosphere/source/kern_k_scheduler.cpp @@ -255,6 +255,9 @@ namespace ams::kern { MESOSPHERE_TODO("KProcess::Switch"); } + /* Set the new thread. */ + SetCurrentThread(next_thread); + /* Set the new Thread Local region. */ cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress())); } diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index edc8e34b3..83bf3a36b 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -19,6 +19,11 @@ namespace ams::kern { namespace { + constexpr bool IsKernelAddressKey(KProcessAddress key) { + const uintptr_t key_uptr = GetInteger(key); + return KernelVirtualAddressSpaceBase <= key_uptr && key_uptr <= KernelVirtualAddressSpaceLast; + } + void CleanupKernelStack(uintptr_t stack_top) { const uintptr_t stack_bottom = stack_top - PageSize; @@ -302,6 +307,153 @@ namespace ams::kern { KScheduler::OnThreadStateChanged(this, old_state); } + void KThread::Continue() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Clear our suspend flags in state. */ + const auto old_state = this->thread_state; + this->thread_state = static_cast(old_state & ThreadState_Mask); + + /* Note the state change in scheduler. */ + KScheduler::OnThreadStateChanged(this, old_state); + } + + void KThread::AddWaiterImpl(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Find the right spot to insert the waiter. */ + auto it = this->waiter_list.begin(); + while (it != this->waiter_list.end()) { + if (it->GetPriority() > thread->GetPriority()) { + break; + } + it++; + } + + /* Keep track of how many kernel waiters we have. */ + if (IsKernelAddressKey(thread->GetAddressKey())) { + MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters++) >= 0); + } + + /* Insert the waiter. */ + this->waiter_list.insert(it, *thread); + thread->SetLockOwner(this); + } + + void KThread::RemoveWaiterImpl(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Keep track of how many kernel waiters we have. */ + if (IsKernelAddressKey(thread->GetAddressKey())) { + MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters--) > 0); + } + + /* Remove the waiter. */ + this->waiter_list.erase(this->waiter_list.iterator_to(*thread)); + thread->SetLockOwner(nullptr); + } + + void KThread::RestorePriority(KThread *thread) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + while (true) { + /* We want to inherit priority where possible. */ + s32 new_priority = thread->GetBasePriority(); + if (thread->HasWaiters()) { + new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); + } + + /* If the priority we would inherit is not different from ours, don't do anything. */ + if (new_priority == thread->GetPriority()) { + return; + } + + /* Ensure we don't violate condition variable red black tree invariants. */ + if (auto *cond_var = thread->GetConditionVariable(); cond_var != nullptr) { + MESOSPHERE_TODO("Remove from condvar tree"); + } + + /* Change the priority. */ + const s32 old_priority = thread->GetPriority(); + thread->SetPriority(new_priority); + + /* Restore the condition variable, if relevant. */ + if (auto *cond_var = thread->GetConditionVariable(); cond_var != nullptr) { + MESOSPHERE_TODO("Re-insert into condvar tree"); + } + + /* Update the scheduler. */ + KScheduler::OnThreadPriorityChanged(thread, old_priority); + + /* Keep the lock owner up to date. */ + KThread *lock_owner = thread->GetLockOwner(); + if (lock_owner == nullptr) { + return; + } + + /* Update the thread in the lock owner's sorted list, and continue inheriting. */ + lock_owner->RemoveWaiterImpl(thread); + lock_owner->AddWaiterImpl(thread); + thread = lock_owner; + } + } + + void KThread::AddWaiter(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + this->AddWaiterImpl(thread); + RestorePriority(this); + } + + void KThread::RemoveWaiter(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + this->RemoveWaiterImpl(thread); + RestorePriority(this); + } + + KThread *KThread::RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + s32 num_waiters = 0; + KThread *next_lock_owner = nullptr; + auto it = this->waiter_list.begin(); + while (it != this->waiter_list.end()) { + if (it->GetAddressKey() == key) { + KThread *thread = std::addressof(*it); + + /* Keep track of how many kernel waiters we have. */ + if (IsKernelAddressKey(thread->GetAddressKey())) { + MESOSPHERE_ABORT_UNLESS((this->num_kernel_waiters--) > 0); + } + it = this->waiter_list.erase(it); + + /* Update the next lock owner. */ + if (next_lock_owner == nullptr) { + next_lock_owner = thread; + next_lock_owner->SetLockOwner(nullptr); + } else { + next_lock_owner->AddWaiterImpl(thread); + } + num_waiters++; + } else { + it++; + } + } + + /* Do priority updates, if we have a next owner. */ + if (next_lock_owner) { + RestorePriority(this); + RestorePriority(next_lock_owner); + } + + /* Return output. */ + *out_num_waiters = num_waiters; + return next_lock_owner; + } + Result KThread::Run() { MESOSPHERE_ASSERT_THIS(); diff --git a/mesosphere/kernel/source/arch/arm64/kern_k_scheduler_asm.s b/mesosphere/kernel/source/arch/arm64/kern_k_scheduler_asm.s index ce2364895..8d860876d 100644 --- a/mesosphere/kernel/source/arch/arm64/kern_k_scheduler_asm.s +++ b/mesosphere/kernel/source/arch/arm64/kern_k_scheduler_asm.s @@ -259,8 +259,10 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv: /* If we don't, wait for an interrupt and check again. */ wfi + msr daifclr, #2 msr daifset, #2 + b 12b 13: /* We need scheduling again! */