From b5f2698bf0f5904a11518228f95446c3d88dcbfb Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Mon, 3 Aug 2020 18:39:32 -0700 Subject: [PATCH] kern: fix multicore instruction cache invalidation --- .../mesosphere/arch/arm64/kern_cpu.hpp | 8 +- .../source/arch/arm64/kern_cpu.cpp | 97 +++++++++++++------ .../kernel_ldr/source/kern_init_loader.cpp | 2 +- 3 files changed, 72 insertions(+), 35 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index e3e519020..4182c5197 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -59,11 +59,6 @@ namespace ams::kern::arch::arm64::cpu { InstructionMemoryBarrier(); } - ALWAYS_INLINE void InvalidateEntireInstructionCache() { - __asm__ __volatile__("ic iallu" ::: "memory"); - EnsureInstructionConsistency(); - } - ALWAYS_INLINE void Yield() { __asm__ __volatile__("yield" ::: "memory"); } @@ -179,6 +174,7 @@ namespace ams::kern::arch::arm64::cpu { void ClearPageToZeroImpl(void *); void FlushEntireDataCacheSharedForInit(); void FlushEntireDataCacheLocalForInit(); + void InvalidateEntireInstructionCacheForInit(); void StoreEntireCacheForInit(); void FlushEntireDataCache(); @@ -188,6 +184,8 @@ namespace ams::kern::arch::arm64::cpu { Result FlushDataCache(const void *addr, size_t size); Result InvalidateInstructionCache(void *addr, size_t size); + void InvalidateEntireInstructionCache(); + ALWAYS_INLINE void ClearPageToZero(void *page) { MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast(page), PageSize)); MESOSPHERE_ASSERT(page != nullptr); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp index 8f571d374..8b53899c0 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp @@ -155,43 +155,56 @@ namespace ams::kern::arch::arm64::cpu { void RequestOperation(Operation op) { KScopedLightLock lk(this->lock); - MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle); - /* Send and wait for acknowledgement of request. */ - { - KScopedLightLock cv_lk(this->cv_lock); + + /* Create core masks for us to use. */ + constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul; + const u64 other_cores_mask = AllCoresMask & ~(1ul << GetCurrentCoreId()); + + if ((op == Operation::InvalidateInstructionCache) || (Kernel::GetState() == Kernel::State::Initializing)) { + /* Check that there's no on-going operation. */ + MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle); MESOSPHERE_ABORT_UNLESS(this->target_cores == 0); /* Set operation. */ this->operation = op; - /* Create core masks for us to use. */ - constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul; - const u64 other_cores_mask = AllCoresMask & ~(1ul << GetCurrentCoreId()); + /* For certain operations, we want to send an interrupt. */ + this->target_cores = other_cores_mask; - if ((op == Operation::InvalidateInstructionCache) || (Kernel::GetState() == Kernel::State::Initializing)) { - /* For certain operations, we want to send an interrupt. */ - this->target_cores = other_cores_mask; - DataSynchronizationBarrier(); - const u64 target_mask = this->target_cores; - DataSynchronizationBarrier(); - Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask); - this->ProcessOperation(); - while (this->target_cores != 0) { - cpu::Yield(); - } - } else { - /* Request all cores. */ - this->target_cores = AllCoresMask; + const u64 target_mask = this->target_cores; + DataSynchronizationBarrier(); + Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask); - /* Use the condvar. */ - this->cv.Broadcast(); - while (this->target_cores != 0) { - this->cv.Wait(std::addressof(this->cv_lock)); - } + this->ProcessOperation(); + while (this->target_cores != 0) { + cpu::Yield(); } + + /* Go idle again. */ + this->operation = Operation::Idle; + } else { + /* Lock condvar so that we can send and wait for acknowledgement of request. */ + KScopedLightLock cv_lk(this->cv_lock); + + /* Check that there's no on-going operation. */ + MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle); + MESOSPHERE_ABORT_UNLESS(this->target_cores == 0); + + /* Set operation. */ + this->operation = op; + + /* Request all cores. */ + this->target_cores = AllCoresMask; + + /* Use the condvar. */ + this->cv.Broadcast(); + while (this->target_cores != 0) { + this->cv.Wait(std::addressof(this->cv_lock)); + } + + /* Go idle again. */ + this->operation = Operation::Idle; } - /* Go idle again. */ - this->operation = Operation::Idle; } }; @@ -285,6 +298,8 @@ namespace ams::kern::arch::arm64::cpu { DataSynchronizationBarrier(); break; } + + this->target_cores &= ~(1ul << GetCurrentCoreId()); } ALWAYS_INLINE void SetEventLocally() { @@ -327,6 +342,14 @@ namespace ams::kern::arch::arm64::cpu { return ResultSuccess(); } + ALWAYS_INLINE void InvalidateEntireInstructionCacheLocalImpl() { + __asm__ __volatile__("ic iallu" ::: "memory"); + } + + ALWAYS_INLINE void InvalidateEntireInstructionCacheGlobalImpl() { + __asm__ __volatile__("ic ialluis" ::: "memory"); + } + } void FlushEntireDataCacheSharedForInit() { @@ -337,11 +360,16 @@ namespace ams::kern::arch::arm64::cpu { return PerformCacheOperationBySetWayLocal(FlushDataCacheLineBySetWayImpl); } + void InvalidateEntireInstructionCacheForInit() { + InvalidateEntireInstructionCacheLocalImpl(); + EnsureInstructionConsistency(); + } + void StoreEntireCacheForInit() { PerformCacheOperationBySetWayLocal(StoreDataCacheLineBySetWayImpl); PerformCacheOperationBySetWayShared(StoreDataCacheLineBySetWayImpl); DataSynchronizationBarrierInnerShareable(); - InvalidateEntireInstructionCache(); + InvalidateEntireInstructionCacheForInit(); } void FlushEntireDataCache() { @@ -401,6 +429,17 @@ namespace ams::kern::arch::arm64::cpu { return ResultSuccess(); } + void InvalidateEntireInstructionCache() { + KScopedCoreMigrationDisable dm; + + /* Invalidate the instruction cache on all cores. */ + InvalidateEntireInstructionCacheGlobalImpl(); + EnsureInstructionConsistency(); + + /* Request the interrupt helper to invalidate, too. */ + g_cache_operation_handler.RequestOperation(KCacheHelperInterruptHandler::Operation::InvalidateInstructionCache); + } + void InitializeInterruptThreads(s32 core_id) { /* Initialize the cache operation handler. */ g_cache_operation_handler.Initialize(core_id); diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index cafe8a0b7..658b2a39e 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -81,7 +81,7 @@ namespace ams::kern::init::loader { cpu::DataSynchronizationBarrier(); /* Invalidate entire instruction cache. */ - cpu::InvalidateEntireInstructionCache(); + cpu::InvalidateEntireInstructionCacheForInit(); /* Invalidate entire TLB. */ cpu::InvalidateEntireTlb();