From 28f9b534b60b2da1a852c9c22669ad1a259f641a Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Tue, 1 Dec 2020 15:54:31 -0800 Subject: [PATCH] kern: implement 64-virtual-core interface --- .../board/nintendo/nx/kern_cpu_map.hpp | 33 ++++ .../include/mesosphere/kern_k_thread.hpp | 32 ++-- .../include/mesosphere/kern_select_cpu.hpp | 21 +++ .../libmesosphere/source/kern_k_process.cpp | 2 +- .../libmesosphere/source/kern_k_thread.cpp | 157 ++++++++++-------- .../source/svc/kern_svc_debug.cpp | 12 +- .../source/svc/kern_svc_info.cpp | 37 +++-- .../source/svc/kern_svc_processor.cpp | 26 ++- 8 files changed, 224 insertions(+), 96 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_cpu_map.hpp diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_cpu_map.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_cpu_map.hpp new file mode 100644 index 000000000..8b4ee7598 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_cpu_map.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::board::nintendo::nx::impl::cpu { + + /* Virtual to Physical core map. */ + constexpr inline const s32 VirtualToPhysicalCoreMap[BITSIZEOF(u64)] = { + 0, 1, 2, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 3, + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index db75c317b..71a01dba7 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -152,7 +152,8 @@ namespace ams::kern { ConditionVariableThreadTree *condvar_tree{}; uintptr_t condvar_key{}; - KAffinityMask affinity_mask{}; + u64 virtual_affinity_mask{}; + KAffinityMask physical_affinity_mask{}; u64 thread_id{}; std::atomic cpu_time{}; KSynchronizationObject *synced_object{}; @@ -181,12 +182,13 @@ namespace ams::kern { Result wait_result; Result debug_exception_result; s32 base_priority{}; - s32 ideal_core_id{}; + s32 physical_ideal_core_id{}; + s32 virtual_ideal_core_id{}; s32 num_kernel_waiters{}; s32 current_core_id{}; s32 core_id{}; - KAffinityMask original_affinity_mask{}; - s32 original_ideal_core_id{}; + KAffinityMask original_physical_affinity_mask{}; + s32 original_physical_ideal_core_id{}; s32 num_core_migration_disables{}; ThreadState thread_state{}; std::atomic termination_requested{}; @@ -202,21 +204,21 @@ namespace ams::kern { virtual ~KThread() { /* ... */ } - Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type); + Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type); private: - static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type); + static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type); public: - static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 core) { - return InitializeThread(thread, func, arg, Null, prio, core, nullptr, ThreadType_Kernel); + static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 virt_core) { + return InitializeThread(thread, func, arg, Null, prio, virt_core, nullptr, ThreadType_Kernel); } static Result InitializeHighPriorityThread(KThread *thread, KThreadFunction func, uintptr_t arg) { return InitializeThread(thread, func, arg, Null, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority); } - static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner) { - return InitializeThread(thread, func, arg, user_stack_top, prio, core, owner, ThreadType_User); + static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner) { + return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, ThreadType_User); } static void ResumeThreadsSuspendedForInit(); @@ -323,10 +325,14 @@ namespace ams::kern { constexpr KThreadContext &GetContext() { return this->thread_context; } constexpr const KThreadContext &GetContext() const { return this->thread_context; } - constexpr const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } + constexpr const u64 GetVirtualAffinityMask() const { return this->virtual_affinity_mask; } + constexpr const KAffinityMask &GetAffinityMask() const { return this->physical_affinity_mask; } + Result GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask); Result SetCoreMask(int32_t ideal_core, u64 affinity_mask); + Result GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask); + constexpr ThreadState GetState() const { return static_cast(this->thread_state & ThreadState_Mask); } constexpr ThreadState GetRawState() const { return this->thread_state; } NOINLINE void SetState(ThreadState state); @@ -374,7 +380,9 @@ namespace ams::kern { return this->condvar_tree != nullptr; } - constexpr s32 GetIdealCore() const { return this->ideal_core_id; } + constexpr s32 GetIdealVirtualCore() const { return this->virtual_ideal_core_id; } + constexpr s32 GetIdealPhysicalCore() const { return this->physical_ideal_core_id; } + constexpr s32 GetActiveCore() const { return this->core_id; } constexpr void SetActiveCore(s32 core) { this->core_id = core; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp index 276ed1439..3bd1b054c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp @@ -28,3 +28,24 @@ #else #error "Unknown architecture for CPU" #endif + +#ifdef ATMOSPHERE_BOARD_NINTENDO_NX + + #include + + namespace ams::kern::cpu { + + using namespace ams::kern::board::nintendo::nx::impl::cpu; + + } + +#else + #error "Unknown board for CPU Map" +#endif + +namespace ams::kern { + + static_assert(cpu::NumCores <= static_cast(BITSIZEOF(u64))); + static_assert(util::size(cpu::VirtualToPhysicalCoreMap) == BITSIZEOF(u64)); + +} diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index 7a7a7ff58..7a01aad9b 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -922,7 +922,7 @@ namespace ams::kern { mem_reservation.Commit(); /* Note for debug that we're running a new process. */ - MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", this->process_id, this->name, main_thread->GetId(), main_thread->GetAffinityMask().GetAffinityMask(), main_thread->GetIdealCore(), main_thread->GetActiveCore()); + MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", this->process_id, this->name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore()); return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index 2a484c7a2..1e2f7e581 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -38,13 +38,17 @@ namespace ams::kern { } - Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) { + Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type) { /* Assert parameters are valid. */ MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(kern_stack_top != nullptr); MESOSPHERE_ASSERT((type == ThreadType_Main) || (ams::svc::HighestThreadPriority <= prio && prio <= ams::svc::LowestThreadPriority)); MESOSPHERE_ASSERT((owner != nullptr) || (type != ThreadType_User)); - MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); + MESOSPHERE_ASSERT(0 <= virt_core && virt_core < static_cast(BITSIZEOF(u64))); + + /* Convert the virtual core to a physical core. */ + const s32 phys_core = cpu::VirtualToPhysicalCoreMap[virt_core]; + MESOSPHERE_ASSERT(0 <= phys_core && phys_core < static_cast(cpu::NumCores)); /* First, clear the TLS address. */ this->tls_address = Null; @@ -60,7 +64,7 @@ namespace ams::kern { [[fallthrough]]; case ThreadType_HighPriority: { - MESOSPHERE_ASSERT(core == GetCurrentCoreId()); + MESOSPHERE_ASSERT(phys_core == GetCurrentCoreId()); } [[fallthrough]]; case ThreadType_Kernel: @@ -71,8 +75,8 @@ namespace ams::kern { [[fallthrough]]; case ThreadType_User: { - MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ul << core)) == owner->GetCoreMask())); - MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetPriorityMask() | (1ul << prio)) == owner->GetPriorityMask())); + MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ul << virt_core)) == owner->GetCoreMask())); + MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetPriorityMask() | (1ul << prio)) == owner->GetPriorityMask())); } break; default: @@ -81,8 +85,10 @@ namespace ams::kern { } /* Set the ideal core ID and affinity mask. */ - this->ideal_core_id = core; - this->affinity_mask.SetAffinity(core, true); + this->virtual_ideal_core_id = virt_core; + this->physical_ideal_core_id = phys_core; + this->virtual_affinity_mask = (static_cast(1) << virt_core); + this->physical_affinity_mask.SetAffinity(phys_core, true); /* Set the thread state. */ this->thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized; @@ -103,7 +109,7 @@ namespace ams::kern { this->cancellable = false; /* Set core ID and wait result. */ - this->core_id = this->ideal_core_id; + this->core_id = phys_core; this->wait_result = svc::ResultNoSynchronizationObject(); /* Set the stack top. */ @@ -141,7 +147,7 @@ namespace ams::kern { this->num_kernel_waiters = 0; /* Set our current core id. */ - this->current_core_id = core; + this->current_core_id = phys_core; /* We haven't released our resource limit hint, and we've spent no time on the cpu. */ this->resource_limit_release_hint = 0; @@ -390,20 +396,19 @@ namespace ams::kern { ++this->num_core_migration_disables; /* Save our ideal state to restore when we're unpinned. */ - this->original_ideal_core_id = this->ideal_core_id; - this->original_affinity_mask = this->affinity_mask; + this->original_physical_ideal_core_id = this->physical_ideal_core_id; + this->original_physical_affinity_mask = this->physical_affinity_mask; /* Bind ourselves to this core. */ const s32 active_core = this->GetActiveCore(); const s32 current_core = GetCurrentCoreId(); this->SetActiveCore(current_core); - this->ideal_core_id = current_core; + this->physical_ideal_core_id = current_core; + this->physical_affinity_mask.SetAffinityMask(1ul << current_core); - this->affinity_mask.SetAffinityMask(1ul << current_core); - - if (active_core != current_core || this->affinity_mask.GetAffinityMask() != this->original_affinity_mask.GetAffinityMask()) { - KScheduler::OnThreadAffinityMaskChanged(this, this->original_affinity_mask, active_core); + if (active_core != current_core || this->physical_affinity_mask.GetAffinityMask() != this->original_physical_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(this, this->original_physical_affinity_mask, active_core); } } @@ -438,19 +443,19 @@ namespace ams::kern { --this->num_core_migration_disables; /* Restore our original state. */ - const KAffinityMask old_mask = this->affinity_mask; + const KAffinityMask old_mask = this->physical_affinity_mask; - this->ideal_core_id = this->original_ideal_core_id; - this->affinity_mask = this->original_affinity_mask; + this->physical_ideal_core_id = this->original_physical_ideal_core_id; + this->physical_affinity_mask = this->original_physical_affinity_mask; - if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = this->GetActiveCore(); - if (!this->affinity_mask.GetAffinity(active_core)) { - if (this->ideal_core_id >= 0) { - this->SetActiveCore(this->ideal_core_id); + if (!this->physical_affinity_mask.GetAffinity(active_core)) { + if (this->physical_ideal_core_id >= 0) { + this->SetActiveCore(this->physical_ideal_core_id); } else { - this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask())); + this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask())); } } KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); @@ -492,16 +497,16 @@ namespace ams::kern { MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0); if ((this->num_core_migration_disables++) == 0) { /* Save our ideal state to restore when we can migrate again. */ - this->original_ideal_core_id = this->ideal_core_id; - this->original_affinity_mask = this->affinity_mask; + this->original_physical_ideal_core_id = this->physical_ideal_core_id; + this->original_physical_affinity_mask = this->physical_affinity_mask; /* Bind ourselves to this core. */ const s32 active_core = this->GetActiveCore(); - this->ideal_core_id = active_core; - this->affinity_mask.SetAffinityMask(1ul << active_core); + this->physical_ideal_core_id = active_core; + this->physical_affinity_mask.SetAffinityMask(1ul << active_core); - if (this->affinity_mask.GetAffinityMask() != this->original_affinity_mask.GetAffinityMask()) { - KScheduler::OnThreadAffinityMaskChanged(this, this->original_affinity_mask, active_core); + if (this->physical_affinity_mask.GetAffinityMask() != this->original_physical_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(this, this->original_physical_affinity_mask, active_core); } } } @@ -513,20 +518,20 @@ namespace ams::kern { KScopedSchedulerLock sl; MESOSPHERE_ASSERT(this->num_core_migration_disables > 0); if ((--this->num_core_migration_disables) == 0) { - const KAffinityMask old_mask = this->affinity_mask; + const KAffinityMask old_mask = this->physical_affinity_mask; /* Restore our ideals. */ - this->ideal_core_id = this->original_ideal_core_id; - this->affinity_mask = this->original_affinity_mask; + this->physical_ideal_core_id = this->original_physical_ideal_core_id; + this->physical_affinity_mask = this->original_physical_affinity_mask; - if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = this->GetActiveCore(); - if (!this->affinity_mask.GetAffinity(active_core)) { - if (this->ideal_core_id >= 0) { - this->SetActiveCore(this->ideal_core_id); + if (!this->physical_affinity_mask.GetAffinity(active_core)) { + if (this->physical_ideal_core_id >= 0) { + this->SetActiveCore(this->physical_ideal_core_id); } else { - this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask())); + this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask())); } } KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); @@ -535,6 +540,19 @@ namespace ams::kern { } Result KThread::GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) { + MESOSPHERE_ASSERT_THIS(); + { + KScopedSchedulerLock sl; + + /* Get the virtual mask. */ + *out_ideal_core = this->virtual_ideal_core_id; + *out_affinity_mask = this->virtual_affinity_mask; + } + + return ResultSuccess(); + } + + Result KThread::GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) { MESOSPHERE_ASSERT_THIS(); { KScopedSchedulerLock sl; @@ -542,63 +560,72 @@ namespace ams::kern { /* Select between core mask and original core mask. */ if (this->num_core_migration_disables == 0) { - *out_ideal_core = this->ideal_core_id; - *out_affinity_mask = this->affinity_mask.GetAffinityMask(); + *out_ideal_core = this->physical_ideal_core_id; + *out_affinity_mask = this->physical_affinity_mask.GetAffinityMask(); } else { - *out_ideal_core = this->original_ideal_core_id; - *out_affinity_mask = this->original_affinity_mask.GetAffinityMask(); + *out_ideal_core = this->original_physical_ideal_core_id; + *out_affinity_mask = this->original_physical_affinity_mask.GetAffinityMask(); } } return ResultSuccess(); } - Result KThread::SetCoreMask(int32_t ideal_core, u64 affinity_mask) { + Result KThread::SetCoreMask(int32_t core_id, u64 v_affinity_mask) { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(this->parent != nullptr); - MESOSPHERE_ASSERT(affinity_mask != 0); + MESOSPHERE_ASSERT(v_affinity_mask != 0); KScopedLightLock lk(this->activity_pause_lock); /* Set the core mask. */ + u64 p_affinity_mask = 0; { KScopedSchedulerLock sl; MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0); /* If the core id is no-update magic, preserve the ideal core id. */ - if (ideal_core == ams::svc::IdealCoreNoUpdate) { - if (this->num_core_migration_disables == 0) { - ideal_core = this->ideal_core_id; - } else { - ideal_core = this->original_ideal_core_id; - } + if (core_id == ams::svc::IdealCoreNoUpdate) { + core_id = this->virtual_ideal_core_id; + R_UNLESS(((1ul << core_id) & v_affinity_mask) != 0, svc::ResultInvalidCombination()); + } - R_UNLESS(((1ul << ideal_core) & affinity_mask) != 0, svc::ResultInvalidCombination()); + /* Set the virtual core/affinity mask. */ + this->virtual_ideal_core_id = core_id; + this->virtual_affinity_mask = v_affinity_mask; + + /* Translate the virtual core to a physical core. */ + if (core_id >= 0) { + core_id = cpu::VirtualToPhysicalCoreMap[core_id]; + } + + /* Translate the virtual affinity mask to a physical one. */ + while (v_affinity_mask != 0) { + const u64 next = __builtin_ctzll(v_affinity_mask); + v_affinity_mask &= ~(1ul << next); + p_affinity_mask |= (1ul << cpu::VirtualToPhysicalCoreMap[next]); } /* If we haven't disabled migration, perform an affinity change. */ if (this->num_core_migration_disables == 0) { - const KAffinityMask old_mask = this->affinity_mask; + const KAffinityMask old_mask = this->physical_affinity_mask; /* Set our new ideals. */ - this->ideal_core_id = ideal_core; - this->affinity_mask.SetAffinityMask(affinity_mask); + this->physical_ideal_core_id = core_id; + this->physical_affinity_mask.SetAffinityMask(p_affinity_mask); - if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + if (this->physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = this->GetActiveCore(); - if (active_core >= 0) { - if (!this->affinity_mask.GetAffinity(active_core)) { - this->SetActiveCore(this->ideal_core_id); - } else { - this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask())); - } + if (active_core >= 0 && !this->physical_affinity_mask.GetAffinity(active_core)) { + const s32 new_core = this->physical_ideal_core_id >= 0 ? this->physical_ideal_core_id : BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->physical_affinity_mask.GetAffinityMask()); + this->SetActiveCore(new_core); } KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); } } else { /* Otherwise, we edit the original affinity for restoration later. */ - this->original_ideal_core_id = ideal_core; - this->original_affinity_mask.SetAffinityMask(affinity_mask); + this->original_physical_ideal_core_id = core_id; + this->original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); } } @@ -627,7 +654,7 @@ namespace ams::kern { } /* If the thread is currently running, check whether it's no longer allowed under the new mask. */ - if (thread_is_current && ((1ul << thread_core) & affinity_mask) == 0) { + if (thread_is_current && ((1ul << thread_core) & p_affinity_mask) == 0) { /* If the thread is pinned, we want to wait until it's not pinned. */ if (this->GetStackParameters().is_pinned) { /* Verify that the current thread isn't terminating. */ @@ -1127,7 +1154,7 @@ namespace ams::kern { /* If the thread is runnable, send a termination interrupt to other cores. */ if (this->GetState() == ThreadState_Runnable) { - if (const u64 core_mask = this->affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) { + if (const u64 core_mask = this->physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) { cpu::DataSynchronizationBarrier(); Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask); } diff --git a/libraries/libmesosphere/source/svc/kern_svc_debug.cpp b/libraries/libmesosphere/source/svc/kern_svc_debug.cpp index 2377117e0..b2156d386 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_debug.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_debug.cpp @@ -363,7 +363,11 @@ namespace ams::kern::svc { case ams::svc::DebugThreadParam_IdealCore: { /* Get the ideal core. */ - *out_32 = thread->GetIdealCore(); + s32 core_id; + u64 affinity_mask; + thread->GetPhysicalCoreMask(std::addressof(core_id), std::addressof(affinity_mask)); + + *out_32 = core_id; } break; case ams::svc::DebugThreadParam_CurrentCore: @@ -375,7 +379,11 @@ namespace ams::kern::svc { case ams::svc::DebugThreadParam_AffinityMask: { /* Get the affinity mask. */ - *out_32 = thread->GetAffinityMask().GetAffinityMask(); + s32 core_id; + u64 affinity_mask; + thread->GetPhysicalCoreMask(std::addressof(core_id), std::addressof(affinity_mask)); + + *out_32 = affinity_mask; } break; default: diff --git a/libraries/libmesosphere/source/svc/kern_svc_info.cpp b/libraries/libmesosphere/source/svc/kern_svc_info.cpp index 42a8b4cd3..adab13b27 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_info.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_info.cpp @@ -206,28 +206,35 @@ namespace ams::kern::svc { case ams::svc::InfoType_ThreadTickCount: { /* Verify the requested core is valid. */ - const bool core_valid = (info_subtype == static_cast(-1ul)) || (info_subtype < cpu::NumCores); + const bool core_valid = (info_subtype == static_cast(-1ul)) || (info_subtype < util::size(cpu::VirtualToPhysicalCoreMap)); R_UNLESS(core_valid, svc::ResultInvalidCombination()); /* Get the thread from its handle. */ KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(handle); R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); - /* Get the tick count. */ + /* Disable interrupts while we get the tick count. */ s64 tick_count; - if (info_subtype == static_cast(-1ul)) { - tick_count = thread->GetCpuTime(); - if (GetCurrentThreadPointer() == thread.GetPointerUnsafe()) { - const s64 cur_tick = KHardwareTimer::GetTick(); - const s64 prev_switch = Kernel::GetScheduler().GetLastContextSwitchTime(); - tick_count += (cur_tick - prev_switch); - } - } else { - tick_count = thread->GetCpuTime(static_cast(info_subtype)); - if (GetCurrentThreadPointer() == thread.GetPointerUnsafe() && static_cast(info_subtype) == GetCurrentCoreId()) { - const s64 cur_tick = KHardwareTimer::GetTick(); - const s64 prev_switch = Kernel::GetScheduler().GetLastContextSwitchTime(); - tick_count += (cur_tick - prev_switch); + { + KScopedInterruptDisable di; + + if (info_subtype == static_cast(-1ul)) { + tick_count = thread->GetCpuTime(); + if (GetCurrentThreadPointer() == thread.GetPointerUnsafe()) { + const s64 cur_tick = KHardwareTimer::GetTick(); + const s64 prev_switch = Kernel::GetScheduler().GetLastContextSwitchTime(); + tick_count += (cur_tick - prev_switch); + } + } else { + const s32 phys_core = cpu::VirtualToPhysicalCoreMap[info_subtype]; + MESOSPHERE_ABORT_UNLESS(phys_core < static_cast(cpu::NumCores)); + + tick_count = thread->GetCpuTime(phys_core); + if (GetCurrentThreadPointer() == thread.GetPointerUnsafe() && phys_core == GetCurrentCoreId()) { + const s64 cur_tick = KHardwareTimer::GetTick(); + const s64 prev_switch = Kernel::GetScheduler().GetLastContextSwitchTime(); + tick_count += (cur_tick - prev_switch); + } } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_processor.cpp b/libraries/libmesosphere/source/svc/kern_svc_processor.cpp index 61e25b72b..f590a56c9 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_processor.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_processor.cpp @@ -22,7 +22,31 @@ namespace ams::kern::svc { namespace { int32_t GetCurrentProcessorNumber() { - return GetCurrentCoreId(); + /* Setup variables to track affinity information. */ + s32 current_phys_core; + u64 v_affinity_mask = 0; + + /* Forever try to get the affinity. */ + while (true) { + /* Update affinity information if we've run out. */ + while (v_affinity_mask == 0) { + current_phys_core = GetCurrentCoreId(); + v_affinity_mask = GetCurrentThread().GetVirtualAffinityMask(); + if ((v_affinity_mask & (1ul << current_phys_core)) != 0) { + return current_phys_core; + } + } + + /* Check the next virtual bit. */ + do { + const s32 next_virt_core = static_cast(__builtin_ctzll(v_affinity_mask)); + if (current_phys_core == cpu::VirtualToPhysicalCoreMap[next_virt_core]) { + return next_virt_core; + } + + v_affinity_mask &= ~(1ul << next_virt_core); + } while (v_affinity_mask != 0); + } } }