mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-21 18:02:06 +00:00
kern: fix multicore instruction cache invalidation
This commit is contained in:
parent
f058536b59
commit
b5f2698bf0
3 changed files with 72 additions and 35 deletions
|
@ -59,11 +59,6 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
InstructionMemoryBarrier();
|
InstructionMemoryBarrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void InvalidateEntireInstructionCache() {
|
|
||||||
__asm__ __volatile__("ic iallu" ::: "memory");
|
|
||||||
EnsureInstructionConsistency();
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE void Yield() {
|
ALWAYS_INLINE void Yield() {
|
||||||
__asm__ __volatile__("yield" ::: "memory");
|
__asm__ __volatile__("yield" ::: "memory");
|
||||||
}
|
}
|
||||||
|
@ -179,6 +174,7 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
void ClearPageToZeroImpl(void *);
|
void ClearPageToZeroImpl(void *);
|
||||||
void FlushEntireDataCacheSharedForInit();
|
void FlushEntireDataCacheSharedForInit();
|
||||||
void FlushEntireDataCacheLocalForInit();
|
void FlushEntireDataCacheLocalForInit();
|
||||||
|
void InvalidateEntireInstructionCacheForInit();
|
||||||
void StoreEntireCacheForInit();
|
void StoreEntireCacheForInit();
|
||||||
|
|
||||||
void FlushEntireDataCache();
|
void FlushEntireDataCache();
|
||||||
|
@ -188,6 +184,8 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
Result FlushDataCache(const void *addr, size_t size);
|
Result FlushDataCache(const void *addr, size_t size);
|
||||||
Result InvalidateInstructionCache(void *addr, size_t size);
|
Result InvalidateInstructionCache(void *addr, size_t size);
|
||||||
|
|
||||||
|
void InvalidateEntireInstructionCache();
|
||||||
|
|
||||||
ALWAYS_INLINE void ClearPageToZero(void *page) {
|
ALWAYS_INLINE void ClearPageToZero(void *page) {
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(page), PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(page), PageSize));
|
||||||
MESOSPHERE_ASSERT(page != nullptr);
|
MESOSPHERE_ASSERT(page != nullptr);
|
||||||
|
|
|
@ -155,43 +155,56 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
|
|
||||||
void RequestOperation(Operation op) {
|
void RequestOperation(Operation op) {
|
||||||
KScopedLightLock lk(this->lock);
|
KScopedLightLock lk(this->lock);
|
||||||
MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle);
|
|
||||||
/* Send and wait for acknowledgement of request. */
|
/* Create core masks for us to use. */
|
||||||
{
|
constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul;
|
||||||
KScopedLightLock cv_lk(this->cv_lock);
|
const u64 other_cores_mask = AllCoresMask & ~(1ul << GetCurrentCoreId());
|
||||||
|
|
||||||
|
if ((op == Operation::InvalidateInstructionCache) || (Kernel::GetState() == Kernel::State::Initializing)) {
|
||||||
|
/* Check that there's no on-going operation. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle);
|
||||||
MESOSPHERE_ABORT_UNLESS(this->target_cores == 0);
|
MESOSPHERE_ABORT_UNLESS(this->target_cores == 0);
|
||||||
|
|
||||||
/* Set operation. */
|
/* Set operation. */
|
||||||
this->operation = op;
|
this->operation = op;
|
||||||
|
|
||||||
/* Create core masks for us to use. */
|
/* For certain operations, we want to send an interrupt. */
|
||||||
constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul;
|
this->target_cores = other_cores_mask;
|
||||||
const u64 other_cores_mask = AllCoresMask & ~(1ul << GetCurrentCoreId());
|
|
||||||
|
|
||||||
if ((op == Operation::InvalidateInstructionCache) || (Kernel::GetState() == Kernel::State::Initializing)) {
|
const u64 target_mask = this->target_cores;
|
||||||
/* For certain operations, we want to send an interrupt. */
|
DataSynchronizationBarrier();
|
||||||
this->target_cores = other_cores_mask;
|
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
|
||||||
DataSynchronizationBarrier();
|
|
||||||
const u64 target_mask = this->target_cores;
|
|
||||||
DataSynchronizationBarrier();
|
|
||||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
|
|
||||||
this->ProcessOperation();
|
|
||||||
while (this->target_cores != 0) {
|
|
||||||
cpu::Yield();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* Request all cores. */
|
|
||||||
this->target_cores = AllCoresMask;
|
|
||||||
|
|
||||||
/* Use the condvar. */
|
this->ProcessOperation();
|
||||||
this->cv.Broadcast();
|
while (this->target_cores != 0) {
|
||||||
while (this->target_cores != 0) {
|
cpu::Yield();
|
||||||
this->cv.Wait(std::addressof(this->cv_lock));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Go idle again. */
|
||||||
|
this->operation = Operation::Idle;
|
||||||
|
} else {
|
||||||
|
/* Lock condvar so that we can send and wait for acknowledgement of request. */
|
||||||
|
KScopedLightLock cv_lk(this->cv_lock);
|
||||||
|
|
||||||
|
/* Check that there's no on-going operation. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(this->operation == Operation::Idle);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(this->target_cores == 0);
|
||||||
|
|
||||||
|
/* Set operation. */
|
||||||
|
this->operation = op;
|
||||||
|
|
||||||
|
/* Request all cores. */
|
||||||
|
this->target_cores = AllCoresMask;
|
||||||
|
|
||||||
|
/* Use the condvar. */
|
||||||
|
this->cv.Broadcast();
|
||||||
|
while (this->target_cores != 0) {
|
||||||
|
this->cv.Wait(std::addressof(this->cv_lock));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Go idle again. */
|
||||||
|
this->operation = Operation::Idle;
|
||||||
}
|
}
|
||||||
/* Go idle again. */
|
|
||||||
this->operation = Operation::Idle;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -285,6 +298,8 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
DataSynchronizationBarrier();
|
DataSynchronizationBarrier();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this->target_cores &= ~(1ul << GetCurrentCoreId());
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void SetEventLocally() {
|
ALWAYS_INLINE void SetEventLocally() {
|
||||||
|
@ -327,6 +342,14 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void InvalidateEntireInstructionCacheLocalImpl() {
|
||||||
|
__asm__ __volatile__("ic iallu" ::: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void InvalidateEntireInstructionCacheGlobalImpl() {
|
||||||
|
__asm__ __volatile__("ic ialluis" ::: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushEntireDataCacheSharedForInit() {
|
void FlushEntireDataCacheSharedForInit() {
|
||||||
|
@ -337,11 +360,16 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
return PerformCacheOperationBySetWayLocal<true>(FlushDataCacheLineBySetWayImpl);
|
return PerformCacheOperationBySetWayLocal<true>(FlushDataCacheLineBySetWayImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InvalidateEntireInstructionCacheForInit() {
|
||||||
|
InvalidateEntireInstructionCacheLocalImpl();
|
||||||
|
EnsureInstructionConsistency();
|
||||||
|
}
|
||||||
|
|
||||||
void StoreEntireCacheForInit() {
|
void StoreEntireCacheForInit() {
|
||||||
PerformCacheOperationBySetWayLocal<true>(StoreDataCacheLineBySetWayImpl);
|
PerformCacheOperationBySetWayLocal<true>(StoreDataCacheLineBySetWayImpl);
|
||||||
PerformCacheOperationBySetWayShared<true>(StoreDataCacheLineBySetWayImpl);
|
PerformCacheOperationBySetWayShared<true>(StoreDataCacheLineBySetWayImpl);
|
||||||
DataSynchronizationBarrierInnerShareable();
|
DataSynchronizationBarrierInnerShareable();
|
||||||
InvalidateEntireInstructionCache();
|
InvalidateEntireInstructionCacheForInit();
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushEntireDataCache() {
|
void FlushEntireDataCache() {
|
||||||
|
@ -401,6 +429,17 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InvalidateEntireInstructionCache() {
|
||||||
|
KScopedCoreMigrationDisable dm;
|
||||||
|
|
||||||
|
/* Invalidate the instruction cache on all cores. */
|
||||||
|
InvalidateEntireInstructionCacheGlobalImpl();
|
||||||
|
EnsureInstructionConsistency();
|
||||||
|
|
||||||
|
/* Request the interrupt helper to invalidate, too. */
|
||||||
|
g_cache_operation_handler.RequestOperation(KCacheHelperInterruptHandler::Operation::InvalidateInstructionCache);
|
||||||
|
}
|
||||||
|
|
||||||
void InitializeInterruptThreads(s32 core_id) {
|
void InitializeInterruptThreads(s32 core_id) {
|
||||||
/* Initialize the cache operation handler. */
|
/* Initialize the cache operation handler. */
|
||||||
g_cache_operation_handler.Initialize(core_id);
|
g_cache_operation_handler.Initialize(core_id);
|
||||||
|
|
|
@ -81,7 +81,7 @@ namespace ams::kern::init::loader {
|
||||||
cpu::DataSynchronizationBarrier();
|
cpu::DataSynchronizationBarrier();
|
||||||
|
|
||||||
/* Invalidate entire instruction cache. */
|
/* Invalidate entire instruction cache. */
|
||||||
cpu::InvalidateEntireInstructionCache();
|
cpu::InvalidateEntireInstructionCacheForInit();
|
||||||
|
|
||||||
/* Invalidate entire TLB. */
|
/* Invalidate entire TLB. */
|
||||||
cpu::InvalidateEntireTlb();
|
cpu::InvalidateEntireTlb();
|
||||||
|
|
Loading…
Reference in a new issue