1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-11-17 17:36:44 +00:00

kern: implement thread pinning/SvcSynchronizePreemptionState

This commit is contained in:
Michael Scire 2020-07-27 17:32:04 -07:00 committed by SciresM
parent b1f38be3ae
commit 787964f7e7
10 changed files with 230 additions and 19 deletions

View file

@ -221,6 +221,12 @@ namespace ams::kern {
data[id / BitsPerWord] &= ~(1ul << (id % BitsPerWord));
}
static constexpr ALWAYS_INLINE bool GetSvcAllowedImpl(u8 *data, u32 id) {
constexpr size_t BitsPerWord = BITSIZEOF(*data);
MESOSPHERE_ASSERT(id < svc::SvcId_Count);
return (data[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0;
}
bool SetSvcAllowed(u32 id) {
if (id < BITSIZEOF(this->svc_access_flags)) {
SetSvcAllowedImpl(this->svc_access_flags, id);
@ -266,16 +272,46 @@ namespace ams::kern {
ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const {
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
/* Copy permissions. */
std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags));
/* Clear specific SVCs based on our state. */
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState);
if (sp.is_preemption_state_pinned) {
if (sp.is_pinned) {
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo);
}
}
ALWAYS_INLINE void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) const {
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
/* Clear all permissions. */
std::memset(sp.svc_permission, 0, sizeof(this->svc_access_flags));
/* Set specific SVCs based on our state. */
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState);
if (GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException)) {
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo);
}
}
ALWAYS_INLINE void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) const {
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
/* Get whether we have access to return from exception. */
const bool return_from_exception = GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
/* Copy permissions. */
std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags));
/* Clear/Set specific SVCs based on our state. */
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState);
if (return_from_exception) {
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
}
}
constexpr bool IsPermittedInterrupt(u32 id) const {
constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]);
if (id < BITSIZEOF(this->irq_access_flags)) {

View file

@ -204,10 +204,32 @@ namespace ams::kern {
return this->pinned_threads[core_id];
}
void PinThread(s32 core_id, KThread *thread) {
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
MESOSPHERE_ASSERT(thread != nullptr);
MESOSPHERE_ASSERT(this->pinned_threads[core_id] == nullptr);
this->pinned_threads[core_id] = thread;
}
void UnpinThread(s32 core_id, KThread *thread) {
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
MESOSPHERE_ASSERT(thread != nullptr);
MESOSPHERE_ASSERT(this->pinned_threads[core_id] == thread);
this->pinned_threads[core_id] = nullptr;
}
void CopySvcPermissionsTo(KThread::StackParameters &sp) {
this->capabilities.CopySvcPermissionsTo(sp);
}
void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) {
this->capabilities.CopyPinnedSvcPermissionsTo(sp);
}
void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) {
this->capabilities.CopyUnpinnedSvcPermissionsTo(sp);
}
constexpr KResourceLimit *GetResourceLimit() const { return this->resource_limit; }
bool ReserveResource(ams::svc::LimitableResource which, s64 value);

View file

@ -131,6 +131,9 @@ namespace ams::kern {
static NOINLINE void ClearPreviousThread(KThread *thread);
static NOINLINE void PinCurrentThread(KProcess *cur_process);
static NOINLINE void UnpinCurrentThread(KProcess *cur_process);
static NOINLINE void OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state);
static NOINLINE void OnThreadPriorityChanged(KThread *thread, s32 old_priority);
static NOINLINE void OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core);

View file

@ -87,7 +87,7 @@ namespace ams::kern {
u8 current_svc_id;
bool is_calling_svc;
bool is_in_exception_handler;
bool is_preemption_state_pinned;
bool is_pinned;
s32 disable_count;
KThreadContext *context;
};
@ -171,7 +171,7 @@ namespace ams::kern {
using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
WaiterList waiter_list{};
WaiterList paused_waiter_list{};
WaiterList pinned_waiter_list{};
KThread *lock_owner{};
ConditionVariableThreadTree *condvar_tree{};
uintptr_t debug_params[3]{};
@ -249,6 +249,9 @@ namespace ams::kern {
this->GetStackParameters().disable_count--;
}
void Pin();
void Unpin();
NOINLINE void DisableCoreMigration();
NOINLINE void EnableCoreMigration();
@ -281,7 +284,7 @@ namespace ams::kern {
ALWAYS_INLINE bool HasDpc() const {
MESOSPHERE_ASSERT_THIS();
return this->GetDpc() != 0;;
return this->GetDpc() != 0;
}
private:
void Suspend();

View file

@ -150,7 +150,7 @@ namespace ams::kern::arch::arm64 {
KScopedSchedulerLock lk;
/* Pin the current thread. */
GetCurrentProcess().PinCurrentThread();
KScheduler::PinCurrentThread(GetCurrentProcessPointer());
/* Set the interrupt flag for the thread. */
GetCurrentThread().SetInterruptFlag();

View file

@ -180,7 +180,7 @@ namespace ams::kern::arch::arm64 {
KScopedSchedulerLock sl;
/* Pin the current thread. */
GetCurrentProcess().PinCurrentThread();
KScheduler::PinCurrentThread(GetCurrentProcessPointer());
/* Set the interrupt flag for the thread. */
GetCurrentThread().SetInterruptFlag();

View file

@ -980,10 +980,6 @@ namespace ams::kern {
}
}
void KProcess::PinCurrentThread() {
MESOSPHERE_UNIMPLEMENTED();
}
KProcess *KProcess::GetProcessFromId(u64 process_id) {
/* Lock the list. */
KProcess::ListAccessor accessor;

View file

@ -121,10 +121,10 @@ namespace ams::kern {
/* If the thread has no waiters, we need to check if the process has a thread pinned. */
if (top_thread->GetNumKernelWaiters() == 0) {
if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) {
if (KThread *suggested = parent->GetPinnedThread(core_id); suggested != nullptr && suggested != top_thread && suggested->GetNumKernelWaiters() == 0) {
if (KThread *pinned = parent->GetPinnedThread(core_id); pinned != nullptr && pinned != top_thread) {
/* We prefer our parent's pinned thread if possible. However, we also don't want to schedule un-runnable threads. */
if (suggested->GetRawState() == KThread::ThreadState_Runnable) {
top_thread = suggested;
if (pinned->GetRawState() == KThread::ThreadState_Runnable) {
top_thread = pinned;
} else {
top_thread = nullptr;
}
@ -274,6 +274,36 @@ namespace ams::kern {
}
}
void KScheduler::PinCurrentThread(KProcess *cur_process) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Get the current thread. */
const s32 core_id = GetCurrentCoreId();
KThread *cur_thread = GetCurrentThreadPointer();
/* Pin it. */
cur_process->PinThread(core_id, cur_thread);
cur_thread->Pin();
/* An update is needed. */
SetSchedulerUpdateNeeded();
}
void KScheduler::UnpinCurrentThread(KProcess *cur_process) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Get the current thread. */
const s32 core_id = GetCurrentCoreId();
KThread *cur_thread = GetCurrentThreadPointer();
/* Unpin it. */
cur_thread->Unpin();
cur_process->UnpinThread(core_id, cur_thread);
/* An update is needed. */
SetSchedulerUpdateNeeded();
}
void KScheduler::OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());

View file

@ -324,12 +324,11 @@ namespace ams::kern {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Release user exception, if relevant. */
/* Release user exception and unpin, if relevant. */
if (this->parent != nullptr) {
this->parent->ReleaseUserException(this);
if (this->parent->GetPinnedThread(GetCurrentCoreId()) == this) {
/* TODO: this->parent->UnpinCurrentThread(); */
MESOSPHERE_UNIMPLEMENTED();
KScheduler::UnpinCurrentThread(this->parent);
}
}
@ -376,6 +375,113 @@ namespace ams::kern {
this->FinishTermination();
}
void KThread::Pin() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Set ourselves as pinned. */
this->GetStackParameters().is_pinned = true;
/* Disable core migration. */
MESOSPHERE_ASSERT(this->num_core_migration_disables == 0);
{
++this->num_core_migration_disables;
/* Save our ideal state to restore when we're unpinned. */
this->original_ideal_core_id = this->ideal_core_id;
this->original_affinity_mask = this->affinity_mask;
/* Bind ourselves to this core. */
const s32 active_core = this->GetActiveCore();
const s32 current_core = GetCurrentCoreId();
this->SetActiveCore(current_core);
this->ideal_core_id = current_core;
this->affinity_mask.SetAffinityMask(1ul << current_core);
if (active_core != current_core || this->affinity_mask.GetAffinityMask() != this->original_affinity_mask.GetAffinityMask()) {
KScheduler::OnThreadAffinityMaskChanged(this, this->original_affinity_mask, active_core);
}
}
/* Disallow performing thread suspension. */
{
/* Update our allow flags. */
this->suspend_allowed_flags &= ~(1 << (SuspendType_Thread + ThreadState_SuspendShift));
/* Update our state. */
const ThreadState old_state = this->thread_state;
this->thread_state = static_cast<ThreadState>(this->GetSuspendFlags() | (old_state & ThreadState_Mask));
if (this->thread_state != old_state) {
KScheduler::OnThreadStateChanged(this, old_state);
}
}
/* Update our SVC access permissions. */
MESOSPHERE_ASSERT(this->parent != nullptr);
this->parent->CopyPinnedSvcPermissionsTo(this->GetStackParameters());
}
void KThread::Unpin() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Set ourselves as unpinned. */
this->GetStackParameters().is_pinned = false;
/* Enable core migration. */
MESOSPHERE_ASSERT(this->num_core_migration_disables == 1);
{
--this->num_core_migration_disables;
/* Restore our original state. */
const KAffinityMask old_mask = this->affinity_mask;
this->ideal_core_id = this->original_ideal_core_id;
this->affinity_mask = this->original_affinity_mask;
if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
const s32 active_core = this->GetActiveCore();
if (!this->affinity_mask.GetAffinity(active_core)) {
if (this->ideal_core_id >= 0) {
this->SetActiveCore(this->ideal_core_id);
} else {
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask()));
}
}
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
}
}
/* Allow performing thread suspension (if termination hasn't been requested). */
{
/* Update our allow flags. */
if (!this->IsTerminationRequested()) {
this->suspend_allowed_flags |= (1 << (SuspendType_Thread + ThreadState_SuspendShift));
}
/* Update our state. */
const ThreadState old_state = this->thread_state;
this->thread_state = static_cast<ThreadState>(this->GetSuspendFlags() | (old_state & ThreadState_Mask));
if (this->thread_state != old_state) {
KScheduler::OnThreadStateChanged(this, old_state);
}
}
/* Update our SVC access permissions. */
MESOSPHERE_ASSERT(this->parent != nullptr);
this->parent->CopyUnpinnedSvcPermissionsTo(this->GetStackParameters());
/* Resume any threads that began waiting on us while we were pinned. */
for (auto it = this->pinned_waiter_list.begin(); it != this->pinned_waiter_list.end(); ++it) {
if (it->GetState() == ThreadState_Waiting) {
it->SetState(ThreadState_Runnable);
}
}
}
void KThread::DisableCoreMigration() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
@ -387,7 +493,7 @@ namespace ams::kern {
this->original_ideal_core_id = this->ideal_core_id;
this->original_affinity_mask = this->affinity_mask;
/* Bind outselves to this core. */
/* Bind ourselves to this core. */
const s32 active_core = this->GetActiveCore();
this->ideal_core_id = active_core;
this->affinity_mask.SetAffinityMask(1ul << active_core);

View file

@ -112,6 +112,21 @@ namespace ams::kern::svc {
return ResultSuccess();
}
void SynchronizePreemptionState() {
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* If the current thread is pinned, unpin it. */
KProcess *cur_process = GetCurrentProcessPointer();
if (cur_process->GetPinnedThread(GetCurrentCoreId()) == GetCurrentThreadPointer()) {
/* Clear the current thread's interrupt flag. */
GetCurrentThread().ClearInterruptFlag();
/* Unpin the current thread. */
KScheduler::UnpinCurrentThread(cur_process);
}
}
}
/* ============================= 64 ABI ============================= */
@ -133,7 +148,7 @@ namespace ams::kern::svc {
}
void SynchronizePreemptionState64() {
MESOSPHERE_PANIC("Stubbed SvcSynchronizePreemptionState64 was called.");
return SynchronizePreemptionState();
}
/* ============================= 64From32 ABI ============================= */
@ -155,7 +170,7 @@ namespace ams::kern::svc {
}
void SynchronizePreemptionState64From32() {
MESOSPHERE_PANIC("Stubbed SvcSynchronizePreemptionState64From32 was called.");
return SynchronizePreemptionState();
}
}