From 05e9084e9395bb59feebadd9c15ef50b93224705 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 17 Sep 2021 16:12:01 -0700 Subject: [PATCH] kern: kill the interrupt task manager thread --- .../arch/arm64/kern_assembly_offsets.h | 10 +-- .../kern_k_interrupt_task_manager.hpp | 19 +++--- .../include/mesosphere/kern_k_scheduler.hpp | 66 +++++++++++-------- .../mesosphere/kern_k_scheduler_lock.hpp | 4 +- .../source/kern_k_interrupt_task_manager.cpp | 64 ++++++------------ .../libmesosphere/source/kern_k_scheduler.cpp | 31 +++------ libraries/libmesosphere/source/kern_main.cpp | 1 - .../source/svc/kern_svc_info.cpp | 2 +- .../source/arch/arm64/kern_k_scheduler_asm.s | 55 +++++++--------- 9 files changed, 112 insertions(+), 140 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h index e25b8aeb6..a49564e9b 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h @@ -156,7 +156,9 @@ /* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */ /* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */ -#define KSCHEDULER_NEEDS_SCHEDULING 0x00 -#define KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE 0x01 -#define KSCHEDULER_HIGHEST_PRIORITY_THREAD 0x10 -#define KSCHEDULER_IDLE_THREAD_STACK 0x18 +#define KSCHEDULER_NEEDS_SCHEDULING 0x00 +#define KSCHEDULER_INTERRUPT_TASK_RUNNABLE 0x01 +#define KSCHEDULER_HIGHEST_PRIORITY_THREAD 0x10 +#define KSCHEDULER_IDLE_THREAD_STACK 0x18 +#define KSCHEDULER_PREVIOUS_THREAD 0x20 +#define KSCHEDULER_INTERRUPT_TASK_MANAGER 0x28 diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp index dd1d95c4d..ca902b746 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp @@ -27,28 +27,25 @@ namespace ams::kern { KInterruptTask *m_head; KInterruptTask *m_tail; public: - constexpr TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ } - constexpr KInterruptTask *GetHead() { return m_head; } - constexpr bool IsEmpty() const { return m_head == nullptr; } - constexpr void Clear() { m_head = nullptr; m_tail = nullptr; } + constexpr ALWAYS_INLINE KInterruptTask *GetHead() { return m_head; } + constexpr ALWAYS_INLINE bool IsEmpty() const { return m_head == nullptr; } + constexpr ALWAYS_INLINE void Clear() { m_head = nullptr; m_tail = nullptr; } void Enqueue(KInterruptTask *task); void Dequeue(); }; private: TaskQueue m_task_queue; - KThread *m_thread; - private: - static void ThreadFunction(uintptr_t arg); - void ThreadFunctionImpl(); + s64 m_cpu_time; public: - constexpr KInterruptTaskManager() : m_task_queue(), m_thread(nullptr) { /* ... */ } + constexpr KInterruptTaskManager() : m_task_queue(), m_cpu_time(0) { /* ... */ } - constexpr KThread *GetThread() const { return m_thread; } + constexpr ALWAYS_INLINE s64 GetCpuTime() const { return m_cpu_time; } - NOINLINE void Initialize(); void EnqueueTask(KInterruptTask *task); + void DoTasks(); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp index 942365e22..87ffc48a2 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -17,6 +17,7 @@ #include #include #include +#include #include namespace ams::kern { @@ -39,11 +40,13 @@ namespace ams::kern { struct SchedulingState { std::atomic needs_scheduling; - bool interrupt_task_thread_runnable; + bool interrupt_task_runnable; bool should_count_idle; u64 idle_count; KThread *highest_priority_thread; void *idle_thread_stack; + KThread *prev_thread; + KInterruptTaskManager *interrupt_task_manager; }; private: friend class KScopedSchedulerLock; @@ -53,28 +56,29 @@ namespace ams::kern { SchedulingState m_state; bool m_is_active; s32 m_core_id; - KThread *m_prev_thread; s64 m_last_context_switch_time; KThread *m_idle_thread; std::atomic m_current_thread; public: constexpr KScheduler() - : m_state(), m_is_active(false), m_core_id(0), m_prev_thread(nullptr), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr) + : m_state(), m_is_active(false), m_core_id(0), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr) { - m_state.needs_scheduling = true; - m_state.interrupt_task_thread_runnable = false; - m_state.should_count_idle = false; - m_state.idle_count = 0; - m_state.idle_thread_stack = nullptr; + m_state.needs_scheduling = true; + m_state.interrupt_task_runnable = false; + m_state.should_count_idle = false; + m_state.idle_count = 0; + m_state.idle_thread_stack = nullptr; m_state.highest_priority_thread = nullptr; + m_state.prev_thread = nullptr; + m_state.interrupt_task_manager = nullptr; } NOINLINE void Initialize(KThread *idle_thread); NOINLINE void Activate(); ALWAYS_INLINE void SetInterruptTaskRunnable() { - m_state.interrupt_task_thread_runnable = true; - m_state.needs_scheduling = true; + m_state.interrupt_task_runnable = true; + m_state.needs_scheduling = true; } ALWAYS_INLINE void RequestScheduleOnInterrupt() { @@ -94,7 +98,7 @@ namespace ams::kern { } ALWAYS_INLINE KThread *GetPreviousThread() const { - return m_prev_thread; + return m_state.prev_thread; } ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const { @@ -108,8 +112,6 @@ namespace ams::kern { /* Static private API. */ static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; } static NOINLINE u64 UpdateHighestPriorityThreadsImpl(); - - static NOINLINE void InterruptTaskThreadToRunnable(); public: /* Static public API. */ static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; } @@ -124,13 +126,14 @@ namespace ams::kern { GetCurrentThread().DisableDispatch(); } - static NOINLINE void EnableScheduling(u64 cores_needing_scheduling) { + static ALWAYS_INLINE void EnableScheduling(u64 cores_needing_scheduling) { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 1); + GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling); + if (GetCurrentThread().GetDisableDispatchCount() > 1) { GetCurrentThread().EnableDispatch(); } else { - GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling); GetCurrentScheduler().RescheduleCurrentCore(); } } @@ -176,14 +179,23 @@ namespace ams::kern { ALWAYS_INLINE void RescheduleCurrentCore() { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); - { - /* Disable interrupts, and then context switch. */ - KScopedInterruptDisable intr_disable; - ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); }; - if (m_state.needs_scheduling.load()) { - Schedule(); - } + GetCurrentThread().EnableDispatch(); + + if (m_state.needs_scheduling.load()) { + /* Disable interrupts, and then check again if rescheduling is needed. */ + KScopedInterruptDisable intr_disable; + + GetCurrentScheduler().RescheduleCurrentCoreImpl(); + } + } + + ALWAYS_INLINE void RescheduleCurrentCoreImpl() { + /* Check that scheduling is needed. */ + if (AMS_LIKELY(m_state.needs_scheduling.load())) { + GetCurrentThread().DisableDispatch(); + this->Schedule(); + GetCurrentThread().EnableDispatch(); } } @@ -199,10 +211,12 @@ namespace ams::kern { }; consteval bool KScheduler::ValidateAssemblyOffsets() { - static_assert(__builtin_offsetof(KScheduler, m_state.needs_scheduling) == KSCHEDULER_NEEDS_SCHEDULING); - static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_thread_runnable) == KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE); - static_assert(__builtin_offsetof(KScheduler, m_state.highest_priority_thread) == KSCHEDULER_HIGHEST_PRIORITY_THREAD); - static_assert(__builtin_offsetof(KScheduler, m_state.idle_thread_stack) == KSCHEDULER_IDLE_THREAD_STACK); + static_assert(__builtin_offsetof(KScheduler, m_state.needs_scheduling) == KSCHEDULER_NEEDS_SCHEDULING); + static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_runnable) == KSCHEDULER_INTERRUPT_TASK_RUNNABLE); + static_assert(__builtin_offsetof(KScheduler, m_state.highest_priority_thread) == KSCHEDULER_HIGHEST_PRIORITY_THREAD); + static_assert(__builtin_offsetof(KScheduler, m_state.idle_thread_stack) == KSCHEDULER_IDLE_THREAD_STACK); + static_assert(__builtin_offsetof(KScheduler, m_state.prev_thread) == KSCHEDULER_PREVIOUS_THREAD); + static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_manager) == KSCHEDULER_INTERRUPT_TASK_MANAGER); return true; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp index 0f9e27f94..e8097d368 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp @@ -45,7 +45,7 @@ namespace ams::kern { return m_owner_thread == GetCurrentThreadPointer(); } - void Lock() { + NOINLINE void Lock() { MESOSPHERE_ASSERT_THIS(); if (this->IsLockedByCurrentThread()) { @@ -67,7 +67,7 @@ namespace ams::kern { } } - void Unlock() { + NOINLINE void Unlock() { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(m_lock_count > 0); diff --git a/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp index 1fe3a6492..d370b2f52 100644 --- a/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp @@ -59,50 +59,6 @@ namespace ams::kern { #endif } - void KInterruptTaskManager::ThreadFunction(uintptr_t arg) { - reinterpret_cast(arg)->ThreadFunctionImpl(); - } - - void KInterruptTaskManager::ThreadFunctionImpl() { - MESOSPHERE_ASSERT_THIS(); - - while (true) { - /* Get a task. */ - KInterruptTask *task = nullptr; - { - KScopedInterruptDisable di; - - task = m_task_queue.GetHead(); - if (task == nullptr) { - m_thread->SetState(KThread::ThreadState_Waiting); - continue; - } - - m_task_queue.Dequeue(); - } - - /* Do the task. */ - task->DoTask(); - - /* Destroy any objects we may need to close. */ - m_thread->DestroyClosedObjects(); - } - } - - void KInterruptTaskManager::Initialize() { - /* Reserve a thread from the system limit. */ - MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); - - /* Create and initialize the thread. */ - m_thread = KThread::Create(); - MESOSPHERE_ABORT_UNLESS(m_thread != nullptr); - MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeHighPriorityThread(m_thread, ThreadFunction, reinterpret_cast(this))); - KThread::Register(m_thread); - - /* Run the thread. */ - m_thread->Run(); - } - void KInterruptTaskManager::EnqueueTask(KInterruptTask *task) { MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); @@ -111,4 +67,24 @@ namespace ams::kern { Kernel::GetScheduler().SetInterruptTaskRunnable(); } + void KInterruptTaskManager::DoTasks() { + /* Execute pending tasks. */ + const s64 start_time = KHardwareTimer::GetTick(); + for (KInterruptTask *task = m_task_queue.GetHead(); task != nullptr; task = m_task_queue.GetHead()) { + /* Dequeue the task. */ + m_task_queue.Dequeue(); + + /* Do the task with interrupts temporarily enabled. */ + { + KScopedInterruptEnable ei; + + task->DoTask(); + } + } + const s64 end_time = KHardwareTimer::GetTick(); + + /* Increment the time we've spent executing. */ + m_cpu_time += end_time - start_time; + } + } diff --git a/libraries/libmesosphere/source/kern_k_scheduler.cpp b/libraries/libmesosphere/source/kern_k_scheduler.cpp index cce622c96..560bf7b43 100644 --- a/libraries/libmesosphere/source/kern_k_scheduler.cpp +++ b/libraries/libmesosphere/source/kern_k_scheduler.cpp @@ -55,10 +55,11 @@ namespace ams::kern { } void KScheduler::Initialize(KThread *idle_thread) { - /* Set core ID and idle thread. */ - m_core_id = GetCurrentCoreId(); - m_idle_thread = idle_thread; - m_state.idle_thread_stack = m_idle_thread->GetStackTop(); + /* Set core ID/idle thread/interrupt task manager. */ + m_core_id = GetCurrentCoreId(); + m_idle_thread = idle_thread; + m_state.idle_thread_stack = m_idle_thread->GetStackTop(); + m_state.interrupt_task_manager = std::addressof(Kernel::GetInterruptTaskManager()); /* Insert the main thread into the priority queue. */ { @@ -212,19 +213,9 @@ namespace ams::kern { return cores_needing_scheduling; } - void KScheduler::InterruptTaskThreadToRunnable() { - MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); - - KThread *task_thread = Kernel::GetInterruptTaskManager().GetThread(); - { - KScopedSchedulerLock sl; - task_thread->SetState(KThread::ThreadState_Runnable); - } - } - void KScheduler::SwitchThread(KThread *next_thread) { - KProcess *cur_process = GetCurrentProcessPointer(); - KThread *cur_thread = GetCurrentThreadPointer(); + KProcess * const cur_process = GetCurrentProcessPointer(); + KThread * const cur_thread = GetCurrentThreadPointer(); /* We never want to schedule a null thread, so use the idle thread if we don't have a next. */ if (next_thread == nullptr) { @@ -257,12 +248,10 @@ namespace ams::kern { if (cur_process != nullptr) { /* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */ if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) { - m_prev_thread = cur_thread; + m_state.prev_thread = cur_thread; } else { - m_prev_thread = nullptr; + m_state.prev_thread = nullptr; } - } else if (cur_thread == m_idle_thread) { - m_prev_thread = nullptr; } MESOSPHERE_KTRACE_THREAD_SWITCH(next_thread); @@ -284,7 +273,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); for (size_t i = 0; i < cpu::NumCores; ++i) { /* Get an atomic reference to the core scheduler's previous thread. */ - std::atomic_ref prev_thread(Kernel::GetScheduler(static_cast(i)).m_prev_thread); + std::atomic_ref prev_thread(Kernel::GetScheduler(static_cast(i)).m_state.prev_thread); static_assert(std::atomic_ref::is_always_lock_free); /* Atomically clear the previous thread if it's our target. */ diff --git a/libraries/libmesosphere/source/kern_main.cpp b/libraries/libmesosphere/source/kern_main.cpp index dc351c708..6450b0cac 100644 --- a/libraries/libmesosphere/source/kern_main.cpp +++ b/libraries/libmesosphere/source/kern_main.cpp @@ -99,7 +99,6 @@ namespace ams::kern { DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA { KThread::Register(std::addressof(Kernel::GetMainThread(core_id))); KThread::Register(std::addressof(Kernel::GetIdleThread(core_id))); - Kernel::GetInterruptTaskManager().Initialize(); }); /* Activate the scheduler and enable interrupts. */ diff --git a/libraries/libmesosphere/source/svc/kern_svc_info.cpp b/libraries/libmesosphere/source/svc/kern_svc_info.cpp index c5424095f..598ee229e 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_info.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_info.cpp @@ -189,7 +189,7 @@ namespace ams::kern::svc { R_UNLESS(core_valid, svc::ResultInvalidCombination()); /* Get the idle tick count. */ - *out = Kernel::GetScheduler().GetIdleThread()->GetCpuTime(); + *out = Kernel::GetScheduler().GetIdleThread()->GetCpuTime() - Kernel::GetInterruptTaskManager().GetCpuTime(); } break; case ams::svc::InfoType_RandomEntropy: diff --git a/mesosphere/kernel/source/arch/arm64/kern_k_scheduler_asm.s b/mesosphere/kernel/source/arch/arm64/kern_k_scheduler_asm.s index 998022cd3..bb86dcfa7 100644 --- a/mesosphere/kernel/source/arch/arm64/kern_k_scheduler_asm.s +++ b/mesosphere/kernel/source/arch/arm64/kern_k_scheduler_asm.s @@ -112,29 +112,16 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv: /* KScheduler layout has state at +0x0, this is guaranteed statically by assembly offsets. */ mov x1, x0 - /* First thing we want to do is check whether the interrupt task thread is runnable. */ - ldrb w3, [x1, #(KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE)] - cbz w3, 0f - - /* If it is, we want to call KScheduler::InterruptTaskThreadToRunnable() to change its state to runnable. */ - stp x0, x1, [sp, #-16]! - stp x30, xzr, [sp, #-16]! - bl _ZN3ams4kern10KScheduler29InterruptTaskThreadToRunnableEv - ldp x30, xzr, [sp], 16 - ldp x0, x1, [sp], 16 - - /* Clear the interrupt task thread as runnable. */ - strb wzr, [x1, #(KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE)] - -0: /* Interrupt task thread runnable checked. */ - /* Now we want to check if there's any scheduling to do. */ - /* First, clear the need's scheduling bool (and dmb ish after, as it's an atomic). */ /* TODO: Should this be a stlrb? Nintendo does not do one. */ strb wzr, [x1] dmb ish - /* Check if the highest priority thread is the same as the current thread. */ + /* Check whether there are runnable interrupt tasks. */ + ldrb w8, [x1, #(KSCHEDULER_INTERRUPT_TASK_RUNNABLE)] + cbnz w8, 0f + + /* If it isn't, we want to check if the highest priority thread is the same as the current thread. */ ldr x7, [x1, #(KSCHEDULER_HIGHEST_PRIORITY_THREAD)] cmp x7, x18 b.ne 1f @@ -142,6 +129,10 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv: /* If they're the same, then we can just return as there's nothing to do. */ ret +0: /* The interrupt task thread is runnable. */ + /* We want to switch to the interrupt task/idle thread. */ + mov x7, #0 + 1: /* The highest priority thread is not the same as the current thread. */ /* Get a reference to the current thread's stack parameters. */ add x2, sp, #0x1000 @@ -271,12 +262,19 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv: /* Call ams::kern::KScheduler::SwitchThread(ams::kern::KThread *) */ bl _ZN3ams4kern10KScheduler12SwitchThreadEPNS0_7KThreadE -12: /* We've switched to the idle thread, so we want to loop until we schedule a non-idle thread. */ - /* Check if we need scheduling. */ - ldarb w3, [x20] // ldarb w3, [x20, #(KSCHEDULER_NEEDS_SCHEDULING)] +12: /* We've switched to the idle thread, so we want to process interrupt tasks until we schedule a non-idle thread. */ + /* Check whether there are runnable interrupt tasks. */ + ldrb w3, [x20, #(KSCHEDULER_INTERRUPT_TASK_RUNNABLE)] cbnz w3, 13f - /* If we don't, wait for an interrupt and check again. */ + /* Check if we need scheduling. */ + ldarb w3, [x20] // ldarb w3, [x20, #(KSCHEDULER_NEEDS_SCHEDULING)] + cbnz w3, 4b + + /* Clear the previous thread. */ + str xzr, [x20, #(KSCHEDULER_PREVIOUS_THREAD)] + + /* Wait for an interrupt and check again. */ wfi msr daifclr, #2 @@ -284,16 +282,13 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv: b 12b -13: /* We need scheduling again! */ - /* Check whether the interrupt task thread needs to be set runnable. */ - ldrb w3, [x20, #(KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE)] - cbz w3, 4b - - /* It does, so do so. We're using the idle thread stack so no register state preserve needed. */ - bl _ZN3ams4kern10KScheduler29InterruptTaskThreadToRunnableEv +13: /* We have interrupt tasks to execute! */ + /* Execute any pending interrupt tasks. */ + ldr x0, [x20, #(KSCHEDULER_INTERRUPT_TASK_MANAGER)] + bl _ZN3ams4kern21KInterruptTaskManager7DoTasksEv /* Clear the interrupt task thread as runnable. */ - strb wzr, [x20, #(KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE)] + strb wzr, [x20, #(KSCHEDULER_INTERRUPT_TASK_RUNNABLE)] /* Retry the scheduling loop. */ b 4b