2020-01-29 22:26:24 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#pragma once
|
2020-01-31 04:56:24 +00:00
|
|
|
#include <mesosphere/kern_select_cpu.hpp>
|
2020-01-29 22:26:24 +00:00
|
|
|
#include <mesosphere/kern_k_thread.hpp>
|
2020-01-31 04:56:24 +00:00
|
|
|
#include <mesosphere/kern_k_priority_queue.hpp>
|
2020-02-01 00:25:17 +00:00
|
|
|
#include <mesosphere/kern_k_scheduler_lock.hpp>
|
2020-01-29 22:26:24 +00:00
|
|
|
|
|
|
|
namespace ams::kern {
|
|
|
|
|
2020-01-31 04:56:24 +00:00
|
|
|
using KSchedulerPriorityQueue = KPriorityQueue<KThread, cpu::NumCores, ams::svc::LowestThreadPriority, ams::svc::HighestThreadPriority>;
|
|
|
|
static_assert(std::is_same<KSchedulerPriorityQueue::AffinityMaskType, KAffinityMask>::value);
|
|
|
|
static_assert(KSchedulerPriorityQueue::NumCores == cpu::NumCores);
|
|
|
|
static_assert(KSchedulerPriorityQueue::NumPriority == BITSIZEOF(u64));
|
|
|
|
|
2020-02-01 00:25:17 +00:00
|
|
|
class KScopedSchedulerLock;
|
2020-02-06 13:34:38 +00:00
|
|
|
class KScopedSchedulerLockAndSleep;
|
2020-02-01 00:25:17 +00:00
|
|
|
|
2020-01-29 22:26:24 +00:00
|
|
|
class KScheduler {
|
|
|
|
NON_COPYABLE(KScheduler);
|
|
|
|
NON_MOVEABLE(KScheduler);
|
|
|
|
public:
|
2020-02-01 00:25:17 +00:00
|
|
|
using LockType = KAbstractSchedulerLock<KScheduler>;
|
|
|
|
|
2020-02-05 21:02:35 +00:00
|
|
|
static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
|
|
|
|
static_assert(ams::svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority);
|
|
|
|
static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
|
|
|
|
|
2020-01-29 22:26:24 +00:00
|
|
|
struct SchedulingState {
|
|
|
|
std::atomic<bool> needs_scheduling;
|
|
|
|
bool interrupt_task_thread_runnable;
|
|
|
|
bool should_count_idle;
|
|
|
|
u64 idle_count;
|
|
|
|
KThread *highest_priority_thread;
|
|
|
|
void *idle_thread_stack;
|
|
|
|
};
|
2020-02-01 00:25:17 +00:00
|
|
|
private:
|
|
|
|
friend class KScopedSchedulerLock;
|
2020-02-06 13:34:38 +00:00
|
|
|
friend class KScopedSchedulerLockAndSleep;
|
2020-02-14 06:05:20 +00:00
|
|
|
static bool s_scheduler_update_needed;
|
|
|
|
static LockType s_scheduler_lock;
|
|
|
|
static KSchedulerPriorityQueue s_priority_queue;
|
2020-01-29 22:26:24 +00:00
|
|
|
private:
|
|
|
|
SchedulingState state;
|
|
|
|
bool is_active;
|
|
|
|
s32 core_id;
|
|
|
|
KThread *prev_thread;
|
2020-02-05 21:02:35 +00:00
|
|
|
s64 last_context_switch_time;
|
2020-01-29 22:26:24 +00:00
|
|
|
KThread *idle_thread;
|
|
|
|
public:
|
2020-02-05 21:02:35 +00:00
|
|
|
constexpr KScheduler()
|
|
|
|
: state(), is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr)
|
|
|
|
{
|
|
|
|
this->state.needs_scheduling = true;
|
|
|
|
this->state.interrupt_task_thread_runnable = false;
|
|
|
|
this->state.should_count_idle = false;
|
|
|
|
this->state.idle_count = 0;
|
|
|
|
this->state.idle_thread_stack = nullptr;
|
|
|
|
this->state.highest_priority_thread = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
NOINLINE void Initialize(KThread *idle_thread);
|
|
|
|
NOINLINE void Activate();
|
2020-02-08 10:49:32 +00:00
|
|
|
|
|
|
|
ALWAYS_INLINE void RequestScheduleOnInterrupt() {
|
|
|
|
SetSchedulerUpdateNeeded();
|
|
|
|
|
|
|
|
if (CanSchedule()) {
|
|
|
|
this->ScheduleOnInterrupt();
|
|
|
|
}
|
|
|
|
}
|
2020-02-05 21:02:35 +00:00
|
|
|
private:
|
|
|
|
/* Static private API. */
|
|
|
|
static ALWAYS_INLINE bool IsSchedulerUpdateNeeded() { return s_scheduler_update_needed; }
|
|
|
|
static ALWAYS_INLINE void SetSchedulerUpdateNeeded() { s_scheduler_update_needed = true; }
|
|
|
|
static ALWAYS_INLINE void ClearSchedulerUpdateNeeded() { s_scheduler_update_needed = false; }
|
|
|
|
static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; }
|
|
|
|
|
|
|
|
static NOINLINE u64 UpdateHighestPriorityThreadsImpl();
|
2020-02-01 00:25:17 +00:00
|
|
|
public:
|
2020-02-05 21:02:35 +00:00
|
|
|
/* Static public API. */
|
|
|
|
static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; }
|
|
|
|
static ALWAYS_INLINE bool IsSchedulerLockedByCurrentThread() { return s_scheduler_lock.IsLockedByCurrentThread(); }
|
|
|
|
|
2020-02-21 21:05:16 +00:00
|
|
|
static NOINLINE void SetInterruptTaskThreadRunnable();
|
|
|
|
|
2020-02-05 21:02:35 +00:00
|
|
|
static ALWAYS_INLINE void DisableScheduling() {
|
|
|
|
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0);
|
|
|
|
GetCurrentThread().DisableDispatch();
|
|
|
|
}
|
|
|
|
|
|
|
|
static NOINLINE void EnableScheduling(u64 cores_needing_scheduling) {
|
|
|
|
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 1);
|
|
|
|
|
|
|
|
if (GetCurrentThread().GetDisableDispatchCount() > 1) {
|
|
|
|
GetCurrentThread().EnableDispatch();
|
|
|
|
} else {
|
|
|
|
GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling);
|
|
|
|
GetCurrentScheduler().RescheduleCurrentCore();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static ALWAYS_INLINE u64 UpdateHighestPriorityThreads() {
|
|
|
|
if (IsSchedulerUpdateNeeded()) {
|
|
|
|
return UpdateHighestPriorityThreadsImpl();
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static NOINLINE void OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state);
|
|
|
|
static NOINLINE void OnThreadPriorityChanged(KThread *thread, s32 old_priority);
|
|
|
|
static NOINLINE void OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core);
|
|
|
|
|
|
|
|
/* TODO: Yield operations */
|
2020-02-08 03:16:09 +00:00
|
|
|
static NOINLINE void RotateScheduledQueue(s32 priority, s32 core_id);
|
2020-02-05 21:02:35 +00:00
|
|
|
private:
|
|
|
|
/* Instanced private API. */
|
|
|
|
void ScheduleImpl();
|
|
|
|
void SwitchThread(KThread *next_thread);
|
|
|
|
|
|
|
|
ALWAYS_INLINE void Schedule() {
|
|
|
|
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
|
|
|
MESOSPHERE_ASSERT(this->core_id == GetCurrentCoreId());
|
|
|
|
|
|
|
|
this->ScheduleImpl();
|
|
|
|
}
|
|
|
|
|
2020-02-08 10:49:32 +00:00
|
|
|
ALWAYS_INLINE void ScheduleOnInterrupt() {
|
|
|
|
KScopedDisableDispatch dd;
|
|
|
|
this->Schedule();
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:07:51 +00:00
|
|
|
void RescheduleOtherCores(u64 cores_needing_scheduling);
|
2020-02-05 21:02:35 +00:00
|
|
|
|
|
|
|
ALWAYS_INLINE void RescheduleCurrentCore() {
|
|
|
|
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
|
|
|
|
{
|
|
|
|
/* Disable interrupts, and then context switch. */
|
|
|
|
KScopedInterruptDisable intr_disable;
|
|
|
|
ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); };
|
|
|
|
|
|
|
|
if (this->state.needs_scheduling) {
|
|
|
|
Schedule();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
NOINLINE u64 UpdateHighestPriorityThread(KThread *thread);
|
2020-02-01 00:25:17 +00:00
|
|
|
};
|
|
|
|
|
2020-02-01 00:37:58 +00:00
|
|
|
class KScopedSchedulerLock : KScopedLock<KScheduler::LockType> {
|
2020-02-05 21:02:35 +00:00
|
|
|
public:
|
|
|
|
explicit ALWAYS_INLINE KScopedSchedulerLock() : KScopedLock(KScheduler::s_scheduler_lock) { /* ... */ }
|
|
|
|
ALWAYS_INLINE ~KScopedSchedulerLock() { /* ... */ }
|
2020-01-29 22:26:24 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
}
|