mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-18 08:22:04 +00:00
kern: update for new hw maintenance semantics
This commit is contained in:
parent
6e17317d5d
commit
9d89835ff8
19 changed files with 386 additions and 244 deletions
|
@ -279,20 +279,21 @@ namespace ams::kern::arch::arm64::init {
|
||||||
|
|
||||||
/* Invalidate the entire tlb. */
|
/* Invalidate the entire tlb. */
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
cpu::InvalidateEntireTlbInnerShareable();
|
cpu::InvalidateEntireTlb();
|
||||||
|
|
||||||
/* Copy data, if we should. */
|
/* Copy data, if we should. */
|
||||||
const u64 negative_block_size_for_mask = static_cast<u64>(-static_cast<s64>(block_size));
|
const u64 negative_block_size_for_mask = static_cast<u64>(-static_cast<s64>(block_size));
|
||||||
const u64 offset_mask = negative_block_size_for_mask & ((1ul << 48) - 1);
|
const u64 offset_mask = negative_block_size_for_mask & ((1ul << 48) - 1);
|
||||||
const KVirtualAddress copy_src_addr = KVirtualAddress(src_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
|
const KVirtualAddress copy_src_addr = KVirtualAddress(src_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
|
||||||
const KVirtualAddress copy_dst_addr = KVirtualAddress(dst_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
|
const KVirtualAddress copy_dst_addr = KVirtualAddress(dst_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
|
||||||
if (block_size && do_copy) {
|
if (do_copy) {
|
||||||
u8 tmp[0x100];
|
u8 tmp[0x100];
|
||||||
for (size_t ofs = 0; ofs < block_size; ofs += sizeof(tmp)) {
|
for (size_t ofs = 0; ofs < block_size; ofs += sizeof(tmp)) {
|
||||||
std::memcpy(tmp, GetVoidPointer(copy_src_addr + ofs), sizeof(tmp));
|
std::memcpy(tmp, GetVoidPointer(copy_src_addr + ofs), sizeof(tmp));
|
||||||
std::memcpy(GetVoidPointer(copy_src_addr + ofs), GetVoidPointer(copy_dst_addr + ofs), sizeof(tmp));
|
std::memcpy(GetVoidPointer(copy_src_addr + ofs), GetVoidPointer(copy_dst_addr + ofs), sizeof(tmp));
|
||||||
std::memcpy(GetVoidPointer(copy_dst_addr + ofs), tmp, sizeof(tmp));
|
std::memcpy(GetVoidPointer(copy_dst_addr + ofs), tmp, sizeof(tmp));
|
||||||
}
|
}
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Swap the mappings. */
|
/* Swap the mappings. */
|
||||||
|
@ -339,7 +340,6 @@ namespace ams::kern::arch::arm64::init {
|
||||||
/* Can we make an L1 block? */
|
/* Can we make an L1 block? */
|
||||||
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
|
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
|
||||||
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
|
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
|
||||||
|
|
||||||
virt_addr += L1BlockSize;
|
virt_addr += L1BlockSize;
|
||||||
phys_addr += L1BlockSize;
|
phys_addr += L1BlockSize;
|
||||||
|
@ -350,8 +350,8 @@ namespace ams::kern::arch::arm64::init {
|
||||||
/* If we don't already have an L2 table, we need to make a new one. */
|
/* If we don't already have an L2 table, we need to make a new one. */
|
||||||
if (!l1_entry->IsTable()) {
|
if (!l1_entry->IsTable()) {
|
||||||
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
|
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
|
||||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
|
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
|
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
|
||||||
}
|
}
|
||||||
|
|
||||||
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||||
|
@ -365,14 +365,12 @@ namespace ams::kern::arch::arm64::init {
|
||||||
phys_addr += L2BlockSize;
|
phys_addr += L2BlockSize;
|
||||||
size -= L2BlockSize;
|
size -= L2BlockSize;
|
||||||
}
|
}
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Can we make an L2 block? */
|
/* Can we make an L2 block? */
|
||||||
if (util::IsAligned(GetInteger(virt_addr), L2BlockSize) && util::IsAligned(GetInteger(phys_addr), L2BlockSize) && size >= L2BlockSize) {
|
if (util::IsAligned(GetInteger(virt_addr), L2BlockSize) && util::IsAligned(GetInteger(phys_addr), L2BlockSize) && size >= L2BlockSize) {
|
||||||
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
|
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
|
||||||
|
|
||||||
virt_addr += L2BlockSize;
|
virt_addr += L2BlockSize;
|
||||||
phys_addr += L2BlockSize;
|
phys_addr += L2BlockSize;
|
||||||
|
@ -383,8 +381,8 @@ namespace ams::kern::arch::arm64::init {
|
||||||
/* If we don't already have an L3 table, we need to make a new one. */
|
/* If we don't already have an L3 table, we need to make a new one. */
|
||||||
if (!l2_entry->IsTable()) {
|
if (!l2_entry->IsTable()) {
|
||||||
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
|
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
|
||||||
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
|
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
|
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
|
||||||
}
|
}
|
||||||
|
|
||||||
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||||
|
@ -398,17 +396,18 @@ namespace ams::kern::arch::arm64::init {
|
||||||
phys_addr += L3BlockSize;
|
phys_addr += L3BlockSize;
|
||||||
size -= L3BlockSize;
|
size -= L3BlockSize;
|
||||||
}
|
}
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make an L3 block. */
|
/* Make an L3 block. */
|
||||||
*l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
|
*l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
|
||||||
virt_addr += L3BlockSize;
|
virt_addr += L3BlockSize;
|
||||||
phys_addr += L3BlockSize;
|
phys_addr += L3BlockSize;
|
||||||
size -= L3BlockSize;
|
size -= L3BlockSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Ensure data consistency after our mapping is added. */
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
}
|
}
|
||||||
|
|
||||||
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
||||||
|
@ -556,9 +555,6 @@ namespace ams::kern::arch::arm64::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Reprotect(KVirtualAddress virt_addr, size_t size, const PageTableEntry &attr_before, const PageTableEntry &attr_after) {
|
void Reprotect(KVirtualAddress virt_addr, size_t size, const PageTableEntry &attr_before, const PageTableEntry &attr_after) {
|
||||||
/* Ensure data consistency before we begin reprotection. */
|
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
|
||||||
|
|
||||||
/* Ensure that addresses and sizes are page aligned. */
|
/* Ensure that addresses and sizes are page aligned. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||||
|
@ -699,7 +695,7 @@ namespace ams::kern::arch::arm64::init {
|
||||||
this->PhysicallyRandomize(virt_addr, size, L2BlockSize, do_copy);
|
this->PhysicallyRandomize(virt_addr, size, L2BlockSize, do_copy);
|
||||||
this->PhysicallyRandomize(virt_addr, size, L3ContiguousBlockSize, do_copy);
|
this->PhysicallyRandomize(virt_addr, size, L3ContiguousBlockSize, do_copy);
|
||||||
this->PhysicallyRandomize(virt_addr, size, L3BlockSize, do_copy);
|
this->PhysicallyRandomize(virt_addr, size, L3BlockSize, do_copy);
|
||||||
cpu::StoreEntireCacheForInit();
|
cpu::StoreCacheForInit(GetVoidPointer(virt_addr), size);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -48,6 +48,10 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
__asm__ __volatile__("dsb ish" ::: "memory");
|
__asm__ __volatile__("dsb ish" ::: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void DataSynchronizationBarrierInnerShareableStore() {
|
||||||
|
__asm__ __volatile__("dsb ishst" ::: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void DataMemoryBarrier() {
|
ALWAYS_INLINE void DataMemoryBarrier() {
|
||||||
__asm__ __volatile__("dmb sy" ::: "memory");
|
__asm__ __volatile__("dmb sy" ::: "memory");
|
||||||
}
|
}
|
||||||
|
@ -56,16 +60,20 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
__asm__ __volatile__("dmb ish" ::: "memory");
|
__asm__ __volatile__("dmb ish" ::: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void DataMemoryBarrierInnerShareableStore() {
|
||||||
|
__asm__ __volatile__("dmb ishst" ::: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void InstructionMemoryBarrier() {
|
ALWAYS_INLINE void InstructionMemoryBarrier() {
|
||||||
__asm__ __volatile__("isb" ::: "memory");
|
__asm__ __volatile__("isb" ::: "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void EnsureInstructionConsistencyInnerShareable() {
|
ALWAYS_INLINE void EnsureInstructionConsistency() {
|
||||||
DataSynchronizationBarrierInnerShareable();
|
DataSynchronizationBarrierInnerShareable();
|
||||||
InstructionMemoryBarrier();
|
InstructionMemoryBarrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void EnsureInstructionConsistency() {
|
ALWAYS_INLINE void EnsureInstructionConsistencyFullSystem() {
|
||||||
DataSynchronizationBarrier();
|
DataSynchronizationBarrier();
|
||||||
InstructionMemoryBarrier();
|
InstructionMemoryBarrier();
|
||||||
}
|
}
|
||||||
|
@ -182,28 +190,23 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
NOINLINE void SynchronizeAllCores();
|
NOINLINE void SynchronizeAllCores();
|
||||||
|
|
||||||
/* Cache management helpers. */
|
/* Cache management helpers. */
|
||||||
void StoreEntireCacheForInit();
|
void StoreCacheForInit(void *addr, size_t size);
|
||||||
void FlushEntireCacheForInit();
|
|
||||||
|
|
||||||
void FlushEntireDataCache();
|
void FlushEntireDataCache();
|
||||||
|
|
||||||
Result InvalidateDataCache(void *addr, size_t size);
|
Result InvalidateDataCache(void *addr, size_t size);
|
||||||
Result StoreDataCache(const void *addr, size_t size);
|
Result StoreDataCache(const void *addr, size_t size);
|
||||||
Result FlushDataCache(const void *addr, size_t size);
|
Result FlushDataCache(const void *addr, size_t size);
|
||||||
Result InvalidateInstructionCache(void *addr, size_t size);
|
|
||||||
|
|
||||||
void InvalidateEntireInstructionCache();
|
void InvalidateEntireInstructionCache();
|
||||||
|
|
||||||
|
void ClearPageToZeroImpl(void *);
|
||||||
|
|
||||||
ALWAYS_INLINE void ClearPageToZero(void * const page) {
|
ALWAYS_INLINE void ClearPageToZero(void * const page) {
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(page), PageSize));
|
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(page), PageSize));
|
||||||
MESOSPHERE_ASSERT(page != nullptr);
|
MESOSPHERE_ASSERT(page != nullptr);
|
||||||
|
|
||||||
uintptr_t cur = reinterpret_cast<uintptr_t>(__builtin_assume_aligned(page, PageSize));
|
ClearPageToZeroImpl(page);
|
||||||
const uintptr_t last = cur + PageSize - DataCacheLineSize;
|
|
||||||
|
|
||||||
for (/* ... */; cur <= last; cur += DataCacheLineSize) {
|
|
||||||
__asm__ __volatile__("dc zva, %[cur]" :: [cur]"r"(cur) : "memory");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) {
|
ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) {
|
||||||
|
@ -223,20 +226,15 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
EnsureInstructionConsistency();
|
EnsureInstructionConsistency();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void InvalidateEntireTlbInnerShareable() {
|
|
||||||
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
|
||||||
EnsureInstructionConsistencyInnerShareable();
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE void InvalidateEntireTlbDataOnly() {
|
ALWAYS_INLINE void InvalidateEntireTlbDataOnly() {
|
||||||
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
|
||||||
DataSynchronizationBarrier();
|
DataSynchronizationBarrierInnerShareable();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void InvalidateTlbByVaDataOnly(KProcessAddress virt_addr) {
|
ALWAYS_INLINE void InvalidateTlbByVaDataOnly(KProcessAddress virt_addr) {
|
||||||
const u64 value = ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
|
const u64 value = ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
|
||||||
__asm__ __volatile__("tlbi vaae1is, %[value]" :: [value]"r"(value) : "memory");
|
__asm__ __volatile__("tlbi vaae1is, %[value]" :: [value]"r"(value) : "memory");
|
||||||
DataSynchronizationBarrier();
|
DataSynchronizationBarrierInnerShareable();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE uintptr_t GetCurrentThreadPointerValue() {
|
ALWAYS_INLINE uintptr_t GetCurrentThreadPointerValue() {
|
||||||
|
|
|
@ -96,8 +96,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void HandleInterrupt(bool user_mode);
|
static void HandleInterrupt(bool user_mode);
|
||||||
|
|
||||||
/* Implement more KInterruptManager functionality. */
|
|
||||||
private:
|
private:
|
||||||
Result BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
Result BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
||||||
Result BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear);
|
Result BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear);
|
||||||
|
|
|
@ -174,7 +174,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
static NOINLINE void Initialize(s32 core_id);
|
static NOINLINE void Initialize(s32 core_id);
|
||||||
|
|
||||||
ALWAYS_INLINE void Activate(u32 proc_id) {
|
ALWAYS_INLINE void Activate(u32 proc_id) {
|
||||||
cpu::DataSynchronizationBarrier();
|
|
||||||
cpu::SwitchProcess(m_ttbr, proc_id);
|
cpu::SwitchProcess(m_ttbr, proc_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,12 +218,13 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll);
|
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
static ALWAYS_INLINE void PteDataSynchronizationBarrier() {
|
static ALWAYS_INLINE void PteDataMemoryBarrier() {
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataMemoryBarrierInnerShareableStore();
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) {
|
static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) {
|
||||||
cpu::ClearPageToZero(GetVoidPointer(table));
|
cpu::ClearPageToZero(GetVoidPointer(table));
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void OnTableUpdated() const {
|
ALWAYS_INLINE void OnTableUpdated() const {
|
||||||
|
@ -239,22 +239,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
cpu::InvalidateTlbByVaDataOnly(virt_addr);
|
cpu::InvalidateTlbByVaDataOnly(virt_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void NoteUpdated() const {
|
void NoteUpdated() const;
|
||||||
cpu::DataSynchronizationBarrier();
|
void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const;
|
||||||
|
|
||||||
if (this->IsKernel()) {
|
|
||||||
this->OnKernelTableUpdated();
|
|
||||||
} else {
|
|
||||||
this->OnTableUpdated();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
|
|
||||||
MESOSPHERE_ASSERT(this->IsKernel());
|
|
||||||
|
|
||||||
cpu::DataSynchronizationBarrier();
|
|
||||||
this->OnKernelTableSinglePageUpdated(virt_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) const {
|
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) const {
|
||||||
KVirtualAddress table = this->GetPageTableManager().Allocate();
|
KVirtualAddress table = this->GetPageTableManager().Allocate();
|
||||||
|
|
|
@ -46,7 +46,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
static bool StoreDataCache(uintptr_t start, uintptr_t end);
|
static bool StoreDataCache(uintptr_t start, uintptr_t end);
|
||||||
static bool FlushDataCache(uintptr_t start, uintptr_t end);
|
static bool FlushDataCache(uintptr_t start, uintptr_t end);
|
||||||
static bool InvalidateDataCache(uintptr_t start, uintptr_t end);
|
static bool InvalidateDataCache(uintptr_t start, uintptr_t end);
|
||||||
static bool InvalidateInstructionCache(uintptr_t start, uintptr_t end);
|
|
||||||
|
|
||||||
static bool ReadIoMemory32Bit(void *dst, const void *src, size_t size);
|
static bool ReadIoMemory32Bit(void *dst, const void *src, size_t size);
|
||||||
static bool ReadIoMemory16Bit(void *dst, const void *src, size_t size);
|
static bool ReadIoMemory16Bit(void *dst, const void *src, size_t size);
|
||||||
|
|
|
@ -35,7 +35,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
ALWAYS_INLINE void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
|
ALWAYS_INLINE void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
|
||||||
if (const u64 core_mask = cores_needing_scheduling & ~(1ul << m_core_id); core_mask != 0) {
|
if (const u64 core_mask = cores_needing_scheduling & ~(1ul << m_core_id); core_mask != 0) {
|
||||||
cpu::DataSynchronizationBarrier();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_Scheduler, core_mask);
|
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_Scheduler, core_mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -176,7 +176,7 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
|
|
||||||
const u64 target_mask = m_target_cores.Load();
|
const u64 target_mask = m_target_cores.Load();
|
||||||
|
|
||||||
DataSynchronizationBarrier();
|
DataSynchronizationBarrierInnerShareable();
|
||||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
|
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
|
||||||
|
|
||||||
this->ProcessOperation();
|
this->ProcessOperation();
|
||||||
|
@ -213,32 +213,37 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Instances of the interrupt handlers. */
|
/* Instances of the interrupt handlers. */
|
||||||
KThreadTerminationInterruptHandler g_thread_termination_handler;
|
constinit KThreadTerminationInterruptHandler g_thread_termination_handler;
|
||||||
KCacheHelperInterruptHandler g_cache_operation_handler;
|
constinit KCacheHelperInterruptHandler g_cache_operation_handler;
|
||||||
KPerformanceCounterInterruptHandler g_performance_counter_handler[cpu::NumCores];
|
constinit KPerformanceCounterInterruptHandler g_performance_counter_handler[cpu::NumCores];
|
||||||
|
|
||||||
/* Expose this as a global, for asm to use. */
|
/* Expose this as a global, for asm to use. */
|
||||||
s32 g_all_core_sync_count;
|
constinit s32 g_all_core_sync_count;
|
||||||
|
|
||||||
template<bool Init, typename F>
|
template<typename F>
|
||||||
ALWAYS_INLINE void PerformCacheOperationBySetWayImpl(int level, F f) {
|
ALWAYS_INLINE void PerformCacheOperationBySetWayImpl(int level, F f) {
|
||||||
/* Used in multiple locations. */
|
/* Used in multiple locations. */
|
||||||
const u64 level_sel_value = static_cast<u64>(level << 1);
|
const u64 level_sel_value = static_cast<u64>(level << 1);
|
||||||
|
|
||||||
|
/* Get the cache size id register value with interrupts disabled. */
|
||||||
u64 ccsidr_value;
|
u64 ccsidr_value;
|
||||||
if constexpr (Init) {
|
{
|
||||||
/* During init, we can just set the selection register directly. */
|
/* Disable interrupts. */
|
||||||
cpu::SetCsselrEl1(level_sel_value);
|
|
||||||
cpu::InstructionMemoryBarrier();
|
|
||||||
ccsidr_value = cpu::GetCcsidrEl1();
|
|
||||||
} else {
|
|
||||||
/* After init, we need to care about interrupts. */
|
|
||||||
KScopedInterruptDisable di;
|
KScopedInterruptDisable di;
|
||||||
|
|
||||||
|
/* Configure the cache select register for our level. */
|
||||||
cpu::SetCsselrEl1(level_sel_value);
|
cpu::SetCsselrEl1(level_sel_value);
|
||||||
|
|
||||||
|
/* Ensure our configuration takes before reading the cache size id register. */
|
||||||
cpu::InstructionMemoryBarrier();
|
cpu::InstructionMemoryBarrier();
|
||||||
|
|
||||||
|
/* Get the cache size id register. */
|
||||||
ccsidr_value = cpu::GetCcsidrEl1();
|
ccsidr_value = cpu::GetCcsidrEl1();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Ensure that no memory inconsistencies occur between cache management invocations. */
|
||||||
|
cpu::DataSynchronizationBarrier();
|
||||||
|
|
||||||
/* Get cache size id info. */
|
/* Get cache size id info. */
|
||||||
CacheSizeIdRegisterAccessor ccsidr_el1(ccsidr_value);
|
CacheSizeIdRegisterAccessor ccsidr_el1(ccsidr_value);
|
||||||
const int num_sets = ccsidr_el1.GetNumberOfSets();
|
const int num_sets = ccsidr_el1.GetNumberOfSets();
|
||||||
|
@ -266,13 +271,11 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
}
|
}
|
||||||
|
|
||||||
void StoreDataCacheBySetWay(int level) {
|
void StoreDataCacheBySetWay(int level) {
|
||||||
PerformCacheOperationBySetWayImpl<false>(level, StoreDataCacheLineBySetWayImpl);
|
PerformCacheOperationBySetWayImpl(level, StoreDataCacheLineBySetWayImpl);
|
||||||
cpu::DataSynchronizationBarrier();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushDataCacheBySetWay(int level) {
|
void FlushDataCacheBySetWay(int level) {
|
||||||
PerformCacheOperationBySetWayImpl<false>(level, FlushDataCacheLineBySetWayImpl);
|
PerformCacheOperationBySetWayImpl(level, FlushDataCacheLineBySetWayImpl);
|
||||||
cpu::DataSynchronizationBarrier();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KCacheHelperInterruptHandler::ProcessOperation() {
|
void KCacheHelperInterruptHandler::ProcessOperation() {
|
||||||
|
@ -284,9 +287,11 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
break;
|
break;
|
||||||
case Operation::StoreDataCache:
|
case Operation::StoreDataCache:
|
||||||
StoreDataCacheBySetWay(0);
|
StoreDataCacheBySetWay(0);
|
||||||
|
cpu::DataSynchronizationBarrier();
|
||||||
break;
|
break;
|
||||||
case Operation::FlushDataCache:
|
case Operation::FlushDataCache:
|
||||||
FlushDataCacheBySetWay(0);
|
FlushDataCacheBySetWay(0);
|
||||||
|
cpu::DataSynchronizationBarrier();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -323,14 +328,6 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE Result InvalidateInstructionCacheRange(uintptr_t start, uintptr_t end) {
|
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(start, InstructionCacheLineSize));
|
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(end, InstructionCacheLineSize));
|
|
||||||
R_UNLESS(UserspaceAccess::InvalidateInstructionCache(start, end), svc::ResultInvalidCurrentMemory());
|
|
||||||
EnsureInstructionConsistency();
|
|
||||||
R_SUCCEED();
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE void InvalidateEntireInstructionCacheLocalImpl() {
|
ALWAYS_INLINE void InvalidateEntireInstructionCacheLocalImpl() {
|
||||||
__asm__ __volatile__("ic iallu" ::: "memory");
|
__asm__ __volatile__("ic iallu" ::: "memory");
|
||||||
}
|
}
|
||||||
|
@ -341,26 +338,12 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void StoreEntireCacheForInit() {
|
void StoreCacheForInit(void *addr, size_t size) {
|
||||||
/* Store local. */
|
/* Store the data cache for the specified range. */
|
||||||
{
|
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||||
CacheLineIdRegisterAccessor clidr_el1;
|
const uintptr_t end = start + size;
|
||||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
for (uintptr_t cur = start; cur < end; cur += DataCacheLineSize) {
|
||||||
|
__asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(cur) : "memory");
|
||||||
for (int level = 0; level != levels_of_unification; ++level) {
|
|
||||||
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Store shared. */
|
|
||||||
{
|
|
||||||
CacheLineIdRegisterAccessor clidr_el1;
|
|
||||||
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
|
|
||||||
const int levels_of_unification = clidr_el1.GetLevelsOfUnification();
|
|
||||||
|
|
||||||
for (int level = levels_of_unification; level <= levels_of_coherency; ++level) {
|
|
||||||
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Data synchronization barrier. */
|
/* Data synchronization barrier. */
|
||||||
|
@ -370,36 +353,7 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
InvalidateEntireInstructionCacheLocalImpl();
|
InvalidateEntireInstructionCacheLocalImpl();
|
||||||
|
|
||||||
/* Ensure local instruction consistency. */
|
/* Ensure local instruction consistency. */
|
||||||
DataSynchronizationBarrierInnerShareable();
|
|
||||||
InstructionMemoryBarrier();
|
|
||||||
}
|
|
||||||
|
|
||||||
void FlushEntireCacheForInit() {
|
|
||||||
/* Flush data cache. */
|
|
||||||
{
|
|
||||||
/* Get levels of coherence/unificaiton. */
|
|
||||||
CacheLineIdRegisterAccessor clidr_el1;
|
|
||||||
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
|
|
||||||
|
|
||||||
/* Store cache from L1 up to (level of coherence - 1). */
|
|
||||||
for (int level = 0; level < levels_of_coherency - 1; ++level) {
|
|
||||||
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
|
|
||||||
DataSynchronizationBarrier();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Flush cache from (level of coherence - 1) down to L0. */
|
|
||||||
for (int level = levels_of_coherency; level > 0; --level) {
|
|
||||||
PerformCacheOperationBySetWayImpl<true>(level - 1, FlushDataCacheLineBySetWayImpl);
|
|
||||||
DataSynchronizationBarrier();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Invalidate instruction cache. */
|
|
||||||
InvalidateEntireInstructionCacheLocalImpl();
|
|
||||||
EnsureInstructionConsistency();
|
EnsureInstructionConsistency();
|
||||||
|
|
||||||
/* Invalidate entire TLB. */
|
|
||||||
InvalidateEntireTlb();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushEntireDataCache() {
|
void FlushEntireDataCache() {
|
||||||
|
@ -417,10 +371,17 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
for (int level = levels_of_coherency; level > 1; --level) {
|
for (int level = levels_of_coherency; level > 1; --level) {
|
||||||
FlushDataCacheBySetWay(level - 1);
|
FlushDataCacheBySetWay(level - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Data synchronization barrier for full system. */
|
||||||
|
DataSynchronizationBarrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result InvalidateDataCache(void *addr, size_t size) {
|
Result InvalidateDataCache(void *addr, size_t size) {
|
||||||
KScopedCoreMigrationDisable dm;
|
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
|
||||||
|
__asm__ __volatile__("" ::: "memory");
|
||||||
|
GetCurrentThread().SetInCacheMaintenanceOperation();
|
||||||
|
ON_SCOPE_EXIT { GetCurrentThread().ClearInCacheMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||||
|
|
||||||
const uintptr_t start = reinterpret_cast<uintptr_t>(addr);
|
const uintptr_t start = reinterpret_cast<uintptr_t>(addr);
|
||||||
const uintptr_t end = start + size;
|
const uintptr_t end = start + size;
|
||||||
uintptr_t aligned_start = util::AlignDown(start, DataCacheLineSize);
|
uintptr_t aligned_start = util::AlignDown(start, DataCacheLineSize);
|
||||||
|
@ -444,7 +405,11 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result StoreDataCache(const void *addr, size_t size) {
|
Result StoreDataCache(const void *addr, size_t size) {
|
||||||
KScopedCoreMigrationDisable dm;
|
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
|
||||||
|
__asm__ __volatile__("" ::: "memory");
|
||||||
|
GetCurrentThread().SetInCacheMaintenanceOperation();
|
||||||
|
ON_SCOPE_EXIT { GetCurrentThread().ClearInCacheMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||||
|
|
||||||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||||
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
|
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
|
||||||
|
|
||||||
|
@ -452,26 +417,17 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result FlushDataCache(const void *addr, size_t size) {
|
Result FlushDataCache(const void *addr, size_t size) {
|
||||||
KScopedCoreMigrationDisable dm;
|
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
|
||||||
|
__asm__ __volatile__("" ::: "memory");
|
||||||
|
GetCurrentThread().SetInCacheMaintenanceOperation();
|
||||||
|
ON_SCOPE_EXIT { GetCurrentThread().ClearInCacheMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||||
|
|
||||||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
|
||||||
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
|
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
|
||||||
|
|
||||||
R_RETURN(FlushDataCacheRange(start, end));
|
R_RETURN(FlushDataCacheRange(start, end));
|
||||||
}
|
}
|
||||||
|
|
||||||
Result InvalidateInstructionCache(void *addr, size_t size) {
|
|
||||||
KScopedCoreMigrationDisable dm;
|
|
||||||
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), InstructionCacheLineSize);
|
|
||||||
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, InstructionCacheLineSize);
|
|
||||||
|
|
||||||
R_TRY(InvalidateInstructionCacheRange(start, end));
|
|
||||||
|
|
||||||
/* Request the interrupt helper to perform an instruction memory barrier. */
|
|
||||||
g_cache_operation_handler.RequestOperation(KCacheHelperInterruptHandler::Operation::InstructionMemoryBarrier);
|
|
||||||
|
|
||||||
R_SUCCEED();
|
|
||||||
}
|
|
||||||
|
|
||||||
void InvalidateEntireInstructionCache() {
|
void InvalidateEntireInstructionCache() {
|
||||||
KScopedCoreMigrationDisable dm;
|
KScopedCoreMigrationDisable dm;
|
||||||
|
|
||||||
|
|
|
@ -61,3 +61,138 @@ _ZN3ams4kern4arch5arm643cpu23SynchronizeAllCoresImplEPii:
|
||||||
5:
|
5:
|
||||||
stlr wzr, [x0]
|
stlr wzr, [x0]
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
/* ams::kern::arch::arm64::cpu::ClearPageToZeroImpl(void *) */
|
||||||
|
.section .text._ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv, "ax", %progbits
|
||||||
|
.global _ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv
|
||||||
|
.type _ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv, %function
|
||||||
|
_ZN3ams4kern4arch5arm643cpu19ClearPageToZeroImplEPv:
|
||||||
|
/* Efficiently clear the page using dc zva. */
|
||||||
|
dc zva, x0
|
||||||
|
add x8, x0, #0x040
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x080
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x0c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x100
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x140
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x180
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x1c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x200
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x240
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x280
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x2c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x300
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x340
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x380
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x3c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x400
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x440
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x480
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x4c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x500
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x540
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x580
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x5c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x600
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x640
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x680
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x6c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x700
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x740
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x780
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x7c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x800
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x840
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x880
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x8c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x900
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x940
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x980
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0x9c0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xa00
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xa40
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xa80
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xac0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xb00
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xb40
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xb80
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xbc0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xc00
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xc40
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xc80
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xcc0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xd00
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xd40
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xd80
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xdc0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xe00
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xe40
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xe80
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xec0
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xf00
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xf40
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xf80
|
||||||
|
dc zva, x8
|
||||||
|
add x8, x0, #0xfc0
|
||||||
|
dc zva, x8
|
||||||
|
ret
|
||||||
|
|
|
@ -257,21 +257,21 @@ namespace ams::kern::arch::arm64 {
|
||||||
#define MESOSPHERE_SET_HW_BREAK_POINT(ID, FLAGS, VALUE) \
|
#define MESOSPHERE_SET_HW_BREAK_POINT(ID, FLAGS, VALUE) \
|
||||||
({ \
|
({ \
|
||||||
cpu::SetDbgBcr##ID##El1(0); \
|
cpu::SetDbgBcr##ID##El1(0); \
|
||||||
cpu::EnsureInstructionConsistency(); \
|
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||||
cpu::SetDbgBvr##ID##El1(VALUE); \
|
cpu::SetDbgBvr##ID##El1(VALUE); \
|
||||||
cpu::EnsureInstructionConsistency(); \
|
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||||
cpu::SetDbgBcr##ID##El1(FLAGS); \
|
cpu::SetDbgBcr##ID##El1(FLAGS); \
|
||||||
cpu::EnsureInstructionConsistency(); \
|
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define MESOSPHERE_SET_HW_WATCH_POINT(ID, FLAGS, VALUE) \
|
#define MESOSPHERE_SET_HW_WATCH_POINT(ID, FLAGS, VALUE) \
|
||||||
({ \
|
({ \
|
||||||
cpu::SetDbgWcr##ID##El1(0); \
|
cpu::SetDbgWcr##ID##El1(0); \
|
||||||
cpu::EnsureInstructionConsistency(); \
|
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||||
cpu::SetDbgWvr##ID##El1(VALUE); \
|
cpu::SetDbgWvr##ID##El1(VALUE); \
|
||||||
cpu::EnsureInstructionConsistency(); \
|
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||||
cpu::SetDbgWcr##ID##El1(FLAGS); \
|
cpu::SetDbgWcr##ID##El1(FLAGS); \
|
||||||
cpu::EnsureInstructionConsistency(); \
|
cpu::EnsureInstructionConsistencyFullSystem(); \
|
||||||
})
|
})
|
||||||
|
|
||||||
Result KDebug::SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, u64 flags, u64 value) {
|
Result KDebug::SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, u64 flags, u64 value) {
|
||||||
|
|
|
@ -158,6 +158,32 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void KPageTable::NoteUpdated() const {
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||||
|
|
||||||
|
/* Mark ourselves as in a tlb maintenance operation. */
|
||||||
|
GetCurrentThread().SetInTlbMaintenanceOperation();
|
||||||
|
ON_SCOPE_EXIT { GetCurrentThread().ClearInTlbMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||||
|
|
||||||
|
if (this->IsKernel()) {
|
||||||
|
this->OnKernelTableUpdated();
|
||||||
|
} else {
|
||||||
|
this->OnTableUpdated();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void KPageTable::NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
|
||||||
|
MESOSPHERE_ASSERT(this->IsKernel());
|
||||||
|
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||||
|
|
||||||
|
/* Mark ourselves as in a tlb maintenance operation. */
|
||||||
|
GetCurrentThread().SetInTlbMaintenanceOperation();
|
||||||
|
ON_SCOPE_EXIT { GetCurrentThread().ClearInTlbMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
|
||||||
|
|
||||||
|
this->OnKernelTableSinglePageUpdated(virt_addr);
|
||||||
|
}
|
||||||
|
|
||||||
void KPageTable::Initialize(s32 core_id) {
|
void KPageTable::Initialize(s32 core_id) {
|
||||||
/* Nothing actually needed here. */
|
/* Nothing actually needed here. */
|
||||||
MESOSPHERE_UNUSED(core_id);
|
MESOSPHERE_UNUSED(core_id);
|
||||||
|
@ -412,9 +438,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Set the entry. */
|
/* Set the entry. */
|
||||||
l2_phys = GetPageTablePhysicalAddress(l2_virt);
|
l2_phys = GetPageTablePhysicalAddress(l2_virt);
|
||||||
PteDataSynchronizationBarrier();
|
PteDataMemoryBarrier();
|
||||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
||||||
PteDataSynchronizationBarrier();
|
|
||||||
} else {
|
} else {
|
||||||
l2_virt = GetPageTableVirtualAddress(l2_phys);
|
l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||||
}
|
}
|
||||||
|
@ -477,9 +502,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Set the entry. */
|
/* Set the entry. */
|
||||||
l2_phys = GetPageTablePhysicalAddress(l2_virt);
|
l2_phys = GetPageTablePhysicalAddress(l2_virt);
|
||||||
PteDataSynchronizationBarrier();
|
PteDataMemoryBarrier();
|
||||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
||||||
PteDataSynchronizationBarrier();
|
|
||||||
l2_allocated = true;
|
l2_allocated = true;
|
||||||
} else {
|
} else {
|
||||||
l2_virt = GetPageTableVirtualAddress(l2_phys);
|
l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||||
|
@ -505,9 +529,8 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Set the entry. */
|
/* Set the entry. */
|
||||||
l3_phys = GetPageTablePhysicalAddress(l3_virt);
|
l3_phys = GetPageTablePhysicalAddress(l3_virt);
|
||||||
PteDataSynchronizationBarrier();
|
PteDataMemoryBarrier();
|
||||||
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
|
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
|
||||||
PteDataSynchronizationBarrier();
|
|
||||||
l2_open_count++;
|
l2_open_count++;
|
||||||
} else {
|
} else {
|
||||||
l3_virt = GetPageTableVirtualAddress(l3_phys);
|
l3_virt = GetPageTableVirtualAddress(l3_phys);
|
||||||
|
@ -631,7 +654,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
for (size_t i = 0; i < num_l2_blocks; i++) {
|
for (size_t i = 0; i < num_l2_blocks; i++) {
|
||||||
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry;
|
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry;
|
||||||
}
|
}
|
||||||
PteDataSynchronizationBarrier();
|
PteDataMemoryBarrier();
|
||||||
|
|
||||||
/* Close references to the L2 table. */
|
/* Close references to the L2 table. */
|
||||||
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
||||||
|
@ -665,7 +688,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
for (size_t i = 0; i < num_l3_blocks; i++) {
|
for (size_t i = 0; i < num_l3_blocks; i++) {
|
||||||
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry;
|
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry;
|
||||||
}
|
}
|
||||||
PteDataSynchronizationBarrier();
|
PteDataMemoryBarrier();
|
||||||
|
|
||||||
/* Close references to the L3 table. */
|
/* Close references to the L3 table. */
|
||||||
if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
||||||
|
@ -783,6 +806,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Wait for pending stores to complete. */
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||||
|
|
||||||
/* Open references to the pages, if we should. */
|
/* Open references to the pages, if we should. */
|
||||||
if (IsHeapPhysicalAddress(orig_phys_addr)) {
|
if (IsHeapPhysicalAddress(orig_phys_addr)) {
|
||||||
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
|
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
|
||||||
|
@ -878,6 +904,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
this->MergePages(orig_virt_addr + (num_pages - 1) * PageSize, page_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Wait for pending stores to complete. */
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareableStore();
|
||||||
|
|
||||||
/* We succeeded! We want to persist the reference to the pages. */
|
/* We succeeded! We want to persist the reference to the pages. */
|
||||||
spg.CancelClose();
|
spg.CancelClose();
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
|
@ -967,7 +996,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled());
|
auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled());
|
||||||
|
|
||||||
/* Merge! */
|
/* Merge! */
|
||||||
PteDataSynchronizationBarrier();
|
|
||||||
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false);
|
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false);
|
||||||
|
|
||||||
/* Note that we updated. */
|
/* Note that we updated. */
|
||||||
|
@ -1049,7 +1077,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled());
|
auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_entry->IsHeadMergeDisabled(), head_entry->IsHeadAndBodyMergeDisabled(), tail_entry->IsTailMergeDisabled());
|
||||||
|
|
||||||
/* Merge! */
|
/* Merge! */
|
||||||
/* NOTE: As of 13.1.0, Nintendo does not do: PteDataSynchronizationBarrier(); */
|
|
||||||
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false);
|
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false);
|
||||||
|
|
||||||
/* Note that we updated. */
|
/* Note that we updated. */
|
||||||
|
@ -1097,7 +1124,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
this->GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize);
|
this->GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize);
|
||||||
|
|
||||||
/* Replace the L1 entry with one to the new table. */
|
/* Replace the L1 entry with one to the new table. */
|
||||||
PteDataSynchronizationBarrier();
|
PteDataMemoryBarrier();
|
||||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true);
|
||||||
this->NoteUpdated();
|
this->NoteUpdated();
|
||||||
}
|
}
|
||||||
|
@ -1147,7 +1174,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
this->GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize);
|
this->GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize);
|
||||||
|
|
||||||
/* Replace the L2 entry with one to the new table. */
|
/* Replace the L2 entry with one to the new table. */
|
||||||
PteDataSynchronizationBarrier();
|
PteDataMemoryBarrier();
|
||||||
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
|
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, l3_phys, this->IsKernel(), true);
|
||||||
this->NoteUpdated();
|
this->NoteUpdated();
|
||||||
}
|
}
|
||||||
|
|
|
@ -577,26 +577,6 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess19InvalidateDataCacheEmm:
|
||||||
mov x0, #1
|
mov x0, #1
|
||||||
ret
|
ret
|
||||||
|
|
||||||
/* ams::kern::arch::arm64::UserspaceAccess::InvalidateInstructionCache(uintptr_t start, uintptr_t end) */
|
|
||||||
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm, "ax", %progbits
|
|
||||||
.global _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm
|
|
||||||
.type _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm, %function
|
|
||||||
.balign 0x10
|
|
||||||
_ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm:
|
|
||||||
/* Check if we have any work to do. */
|
|
||||||
cmp x1, x0
|
|
||||||
b.eq 2f
|
|
||||||
|
|
||||||
1: /* Loop, invalidating each cache line. */
|
|
||||||
ic ivau, x0
|
|
||||||
add x0, x0, #0x40
|
|
||||||
cmp x1, x0
|
|
||||||
b.ne 1b
|
|
||||||
|
|
||||||
2: /* We're done! */
|
|
||||||
mov x0, #1
|
|
||||||
ret
|
|
||||||
|
|
||||||
/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory32Bit(void *dst, const void *src, size_t size) */
|
/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory32Bit(void *dst, const void *src, size_t size) */
|
||||||
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm, "ax", %progbits
|
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm, "ax", %progbits
|
||||||
.global _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm
|
.global _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm
|
||||||
|
|
|
@ -278,6 +278,9 @@ _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm:
|
||||||
.global _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm
|
.global _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm
|
||||||
.type _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, %function
|
.type _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, %function
|
||||||
_ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm:
|
_ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm:
|
||||||
|
/* cpu::DataSynchronizationBarrier(); */
|
||||||
|
dsb sy
|
||||||
|
|
||||||
/* const u64 level_sel_value = level << 1; */
|
/* const u64 level_sel_value = level << 1; */
|
||||||
lsl x8, x0, #1
|
lsl x8, x0, #1
|
||||||
|
|
||||||
|
|
|
@ -179,13 +179,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flush caches. */
|
|
||||||
/* NOTE: This seems incorrect according to arm spec, which says not to flush via set/way after boot. */
|
|
||||||
/* However, Nintendo flushes the entire cache here and not doing so has caused reports of abort with ESR_EL1 */
|
|
||||||
/* as 0x02000000 (unknown abort) to occur. */
|
|
||||||
MESOSPHERE_UNUSED(params);
|
MESOSPHERE_UNUSED(params);
|
||||||
cpu::FlushEntireDataCache();
|
|
||||||
cpu::InvalidateEntireInstructionCache();
|
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1315,7 +1315,7 @@ namespace ams::kern {
|
||||||
/* If the thread is runnable, send a termination interrupt to other cores. */
|
/* If the thread is runnable, send a termination interrupt to other cores. */
|
||||||
if (this->GetState() == ThreadState_Runnable) {
|
if (this->GetState() == ThreadState_Runnable) {
|
||||||
if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) {
|
if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) {
|
||||||
cpu::DataSynchronizationBarrier();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask);
|
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,13 @@ namespace ams::os::impl {
|
||||||
/* Calculate cache line size. */
|
/* Calculate cache line size. */
|
||||||
cache_line_size = 4 << ((cache_type_register >> 16) & 0xF);
|
cache_line_size = 4 << ((cache_type_register >> 16) & 0xF);
|
||||||
|
|
||||||
|
/* Get the thread local region. */
|
||||||
|
auto * const tlr = svc::GetThreadLocalRegion();
|
||||||
|
|
||||||
|
/* Note to the kernel that we're performing cache maintenance, in case we get interrupted while touching cache lines. */
|
||||||
|
tlr->cache_maintenance_flag = 1;
|
||||||
|
ON_SCOPE_EXIT { tlr->cache_maintenance_flag = 0; }
|
||||||
|
|
||||||
/* Iterate, flushing cache lines. */
|
/* Iterate, flushing cache lines. */
|
||||||
for (uintptr_t cur = reinterpret_cast<uintptr_t>(addr) & ~(cache_line_size - 1); cur < end_addr; cur += cache_line_size) {
|
for (uintptr_t cur = reinterpret_cast<uintptr_t>(addr) & ~(cache_line_size - 1); cur < end_addr; cur += cache_line_size) {
|
||||||
__asm__ __volatile__ ("dc civac, %[cur]" :: [cur]"r"(cur));
|
__asm__ __volatile__ ("dc civac, %[cur]" :: [cur]"r"(cur));
|
||||||
|
|
|
@ -31,6 +31,15 @@ namespace ams::dd::impl {
|
||||||
__asm__ __volatile__("mrs %[ctr_el0], ctr_el0" : [ctr_el0]"=r"(ctr_el0));
|
__asm__ __volatile__("mrs %[ctr_el0], ctr_el0" : [ctr_el0]"=r"(ctr_el0));
|
||||||
const uintptr_t cache_line_size = 4 << ((ctr_el0 >> 16) & 0xF);
|
const uintptr_t cache_line_size = 4 << ((ctr_el0 >> 16) & 0xF);
|
||||||
|
|
||||||
|
#if defined(ATMOSPHERE_IS_STRATOSPHERE)
|
||||||
|
/* Get the thread local region. */
|
||||||
|
auto * const tlr = svc::GetThreadLocalRegion();
|
||||||
|
|
||||||
|
/* Note to the kernel that we're performing cache maintenance, in case we get interrupted while touching cache lines. */
|
||||||
|
tlr->cache_maintenance_flag = 1;
|
||||||
|
ON_SCOPE_EXIT { tlr->cache_maintenance_flag = 0; }
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Invalidate the cache. */
|
/* Invalidate the cache. */
|
||||||
const uintptr_t start_addr = reinterpret_cast<uintptr_t>(addr) & ~(cache_line_size - 1);
|
const uintptr_t start_addr = reinterpret_cast<uintptr_t>(addr) & ~(cache_line_size - 1);
|
||||||
const uintptr_t end_addr = reinterpret_cast<uintptr_t>(addr) + size;
|
const uintptr_t end_addr = reinterpret_cast<uintptr_t>(addr) + size;
|
||||||
|
@ -62,6 +71,15 @@ namespace ams::dd::impl {
|
||||||
__asm__ __volatile__("mrs %[ctr_el0], ctr_el0" : [ctr_el0]"=r"(ctr_el0));
|
__asm__ __volatile__("mrs %[ctr_el0], ctr_el0" : [ctr_el0]"=r"(ctr_el0));
|
||||||
const uintptr_t cache_line_size = 4 << ((ctr_el0 >> 16) & 0xF);
|
const uintptr_t cache_line_size = 4 << ((ctr_el0 >> 16) & 0xF);
|
||||||
|
|
||||||
|
#if defined(ATMOSPHERE_IS_STRATOSPHERE)
|
||||||
|
/* Get the thread local region. */
|
||||||
|
auto * const tlr = svc::GetThreadLocalRegion();
|
||||||
|
|
||||||
|
/* Note to the kernel that we're performing cache maintenance, in case we get interrupted while touching cache lines. */
|
||||||
|
tlr->cache_maintenance_flag = 1;
|
||||||
|
ON_SCOPE_EXIT { tlr->cache_maintenance_flag = 0; }
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Invalidate the cache. */
|
/* Invalidate the cache. */
|
||||||
const uintptr_t start_addr = reinterpret_cast<uintptr_t>(addr) & ~(cache_line_size - 1);
|
const uintptr_t start_addr = reinterpret_cast<uintptr_t>(addr) & ~(cache_line_size - 1);
|
||||||
const uintptr_t end_addr = reinterpret_cast<uintptr_t>(addr) + size;
|
const uintptr_t end_addr = reinterpret_cast<uintptr_t>(addr) + size;
|
||||||
|
|
|
@ -570,13 +570,13 @@ namespace ams::kern::init {
|
||||||
cpu::DebugFeatureRegisterAccessor aa64dfr0;
|
cpu::DebugFeatureRegisterAccessor aa64dfr0;
|
||||||
const auto num_watchpoints = aa64dfr0.GetNumWatchpoints();
|
const auto num_watchpoints = aa64dfr0.GetNumWatchpoints();
|
||||||
const auto num_breakpoints = aa64dfr0.GetNumBreakpoints();
|
const auto num_breakpoints = aa64dfr0.GetNumBreakpoints();
|
||||||
cpu::EnsureInstructionConsistency();
|
cpu::EnsureInstructionConsistencyFullSystem();
|
||||||
|
|
||||||
/* Clear the debug monitor register and the os lock access register. */
|
/* Clear the debug monitor register and the os lock access register. */
|
||||||
cpu::MonitorDebugSystemControlRegisterAccessor(0).Store();
|
cpu::MonitorDebugSystemControlRegisterAccessor(0).Store();
|
||||||
cpu::EnsureInstructionConsistency();
|
cpu::EnsureInstructionConsistencyFullSystem();
|
||||||
cpu::OsLockAccessRegisterAccessor(0).Store();
|
cpu::OsLockAccessRegisterAccessor(0).Store();
|
||||||
cpu::EnsureInstructionConsistency();
|
cpu::EnsureInstructionConsistencyFullSystem();
|
||||||
|
|
||||||
/* Clear all debug watchpoints/breakpoints. */
|
/* Clear all debug watchpoints/breakpoints. */
|
||||||
#define FOR_I_IN_15_TO_1(HANDLER, ...) \
|
#define FOR_I_IN_15_TO_1(HANDLER, ...) \
|
||||||
|
@ -620,22 +620,22 @@ namespace ams::kern::init {
|
||||||
#undef MESOSPHERE_INITIALIZE_BREAKPOINT_CASE
|
#undef MESOSPHERE_INITIALIZE_BREAKPOINT_CASE
|
||||||
#undef FOR_I_IN_15_TO_1
|
#undef FOR_I_IN_15_TO_1
|
||||||
|
|
||||||
cpu::EnsureInstructionConsistency();
|
cpu::EnsureInstructionConsistencyFullSystem();
|
||||||
|
|
||||||
/* Initialize the context id register to all 1s. */
|
/* Initialize the context id register to all 1s. */
|
||||||
cpu::ContextIdRegisterAccessor(0).SetProcId(std::numeric_limits<u32>::max()).Store();
|
cpu::ContextIdRegisterAccessor(0).SetProcId(std::numeric_limits<u32>::max()).Store();
|
||||||
cpu::EnsureInstructionConsistency();
|
cpu::EnsureInstructionConsistencyFullSystem();
|
||||||
|
|
||||||
/* Configure the debug monitor register. */
|
/* Configure the debug monitor register. */
|
||||||
cpu::MonitorDebugSystemControlRegisterAccessor(0).SetMde(true).SetTdcc(true).Store();
|
cpu::MonitorDebugSystemControlRegisterAccessor(0).SetMde(true).SetTdcc(true).Store();
|
||||||
cpu::EnsureInstructionConsistency();
|
cpu::EnsureInstructionConsistencyFullSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeExceptionVectors() {
|
void InitializeExceptionVectors() {
|
||||||
cpu::SetVbarEl1(reinterpret_cast<uintptr_t>(::ams::kern::ExceptionVectors));
|
cpu::SetVbarEl1(reinterpret_cast<uintptr_t>(::ams::kern::ExceptionVectors));
|
||||||
cpu::SetTpidrEl1(0);
|
cpu::SetTpidrEl1(0);
|
||||||
cpu::SetExceptionThreadStackTop(0);
|
cpu::SetExceptionThreadStackTop(0);
|
||||||
cpu::EnsureInstructionConsistency();
|
cpu::EnsureInstructionConsistencyFullSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetMiscUnknownDebugRegionSize() {
|
size_t GetMiscUnknownDebugRegionSize() {
|
||||||
|
|
|
@ -84,17 +84,31 @@ _ZN3ams4kern4init10StartCore0Emm:
|
||||||
mov x20, x1
|
mov x20, x1
|
||||||
|
|
||||||
/* Check our current EL. We want to be executing out of EL1. */
|
/* Check our current EL. We want to be executing out of EL1. */
|
||||||
/* If we're in EL2, we'll need to deprivilege ourselves. */
|
|
||||||
mrs x1, currentel
|
mrs x1, currentel
|
||||||
|
|
||||||
|
/* Check if we're EL1. */
|
||||||
cmp x1, #0x4
|
cmp x1, #0x4
|
||||||
b.eq core0_el1
|
b.eq 2f
|
||||||
|
|
||||||
|
/* Check if we're EL2. */
|
||||||
cmp x1, #0x8
|
cmp x1, #0x8
|
||||||
b.eq core0_el2
|
b.eq 1f
|
||||||
core0_el3:
|
|
||||||
b core0_el3
|
0: /* We're EL3. This is a panic condition. */
|
||||||
core0_el2:
|
b 0b
|
||||||
|
|
||||||
|
1: /* We're EL2. */
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||||
|
/* On NX board, this is a panic condition. */
|
||||||
|
b 1b
|
||||||
|
#else
|
||||||
|
/* Otherwise, deprivilege to EL2. */
|
||||||
|
/* TODO: Does N still have this? We need it for qemu emulation/unit testing, we should come up with a better solution maybe. */
|
||||||
bl _ZN3ams4kern4init16JumpFromEL2ToEL1Ev
|
bl _ZN3ams4kern4init16JumpFromEL2ToEL1Ev
|
||||||
core0_el1:
|
#endif
|
||||||
|
|
||||||
|
2: /* We're EL1. */
|
||||||
|
/* Disable the MMU/Caches. */
|
||||||
bl _ZN3ams4kern4init19DisableMmuAndCachesEv
|
bl _ZN3ams4kern4init19DisableMmuAndCachesEv
|
||||||
|
|
||||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||||
|
@ -103,14 +117,18 @@ core0_el1:
|
||||||
mov w1, #65000
|
mov w1, #65000
|
||||||
smc #1
|
smc #1
|
||||||
cmp x0, #0
|
cmp x0, #0
|
||||||
0:
|
3:
|
||||||
b.ne 0b
|
b.ne 3b
|
||||||
|
|
||||||
/* Store the target firmware. */
|
/* Store the target firmware. */
|
||||||
adr x0, __metadata_target_firmware
|
adr x0, __metadata_target_firmware
|
||||||
str w1, [x0]
|
str w1, [x0]
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Get the unknown debug region. */
|
||||||
|
/* TODO: This is always zero in release kernels -- what is this? Is it the device tree buffer? */
|
||||||
|
mov x21, #0
|
||||||
|
|
||||||
/* We want to invoke kernel loader. */
|
/* We want to invoke kernel loader. */
|
||||||
adr x0, _start
|
adr x0, _start
|
||||||
adr x1, __metadata_kernel_layout
|
adr x1, __metadata_kernel_layout
|
||||||
|
@ -126,7 +144,7 @@ core0_el1:
|
||||||
/* Next thing to do is to set up our memory management and slabheaps -- all the other core initialization. */
|
/* Next thing to do is to set up our memory management and slabheaps -- all the other core initialization. */
|
||||||
/* Call ams::kern::init::InitializeCore(uintptr_t, void **) */
|
/* Call ams::kern::init::InitializeCore(uintptr_t, void **) */
|
||||||
mov x1, x0 /* Kernelldr returns a state object for the kernel to re-use. */
|
mov x1, x0 /* Kernelldr returns a state object for the kernel to re-use. */
|
||||||
mov x0, xzr /* Official kernel always passes zero, when this is non-zero the address is mapped. */
|
mov x0, x21 /* Use the address we determined earlier. */
|
||||||
bl _ZN3ams4kern4init14InitializeCoreEmPPv
|
bl _ZN3ams4kern4init14InitializeCoreEmPPv
|
||||||
|
|
||||||
/* Get the init arguments for core 0. */
|
/* Get the init arguments for core 0. */
|
||||||
|
@ -144,17 +162,31 @@ _ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE:
|
||||||
mov x20, x0
|
mov x20, x0
|
||||||
|
|
||||||
/* Check our current EL. We want to be executing out of EL1. */
|
/* Check our current EL. We want to be executing out of EL1. */
|
||||||
/* If we're in EL2, we'll need to deprivilege ourselves. */
|
|
||||||
mrs x1, currentel
|
mrs x1, currentel
|
||||||
|
|
||||||
|
/* Check if we're EL1. */
|
||||||
cmp x1, #0x4
|
cmp x1, #0x4
|
||||||
b.eq othercore_el1
|
b.eq 2f
|
||||||
|
|
||||||
|
/* Check if we're EL2. */
|
||||||
cmp x1, #0x8
|
cmp x1, #0x8
|
||||||
b.eq othercore_el2
|
b.eq 1f
|
||||||
othercore_el3:
|
|
||||||
b othercore_el3
|
0: /* We're EL3. This is a panic condition. */
|
||||||
othercore_el2:
|
b 0b
|
||||||
|
|
||||||
|
1: /* We're EL2. */
|
||||||
|
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||||
|
/* On NX board, this is a panic condition. */
|
||||||
|
b 1b
|
||||||
|
#else
|
||||||
|
/* Otherwise, deprivilege to EL2. */
|
||||||
|
/* TODO: Does N still have this? We need it for qemu emulation/unit testing, we should come up with a better solution maybe. */
|
||||||
bl _ZN3ams4kern4init16JumpFromEL2ToEL1Ev
|
bl _ZN3ams4kern4init16JumpFromEL2ToEL1Ev
|
||||||
othercore_el1:
|
#endif
|
||||||
|
|
||||||
|
2: /* We're EL1. */
|
||||||
|
/* Disable the MMU/Caches. */
|
||||||
bl _ZN3ams4kern4init19DisableMmuAndCachesEv
|
bl _ZN3ams4kern4init19DisableMmuAndCachesEv
|
||||||
|
|
||||||
/* Setup system registers using values from our KInitArguments. */
|
/* Setup system registers using values from our KInitArguments. */
|
||||||
|
@ -171,21 +203,20 @@ othercore_el1:
|
||||||
mrs x1, midr_el1
|
mrs x1, midr_el1
|
||||||
ubfx x2, x1, #0x18, #0x8 /* Extract implementer bits. */
|
ubfx x2, x1, #0x18, #0x8 /* Extract implementer bits. */
|
||||||
cmp x2, #0x41 /* Implementer::ArmLimited */
|
cmp x2, #0x41 /* Implementer::ArmLimited */
|
||||||
b.ne othercore_cpu_specific_setup_end
|
b.ne 4f
|
||||||
ubfx x2, x1, #0x4, #0xC /* Extract primary part number. */
|
ubfx x2, x1, #0x4, #0xC /* Extract primary part number. */
|
||||||
cmp x2, #0xD07 /* PrimaryPartNumber::CortexA57 */
|
cmp x2, #0xD07 /* PrimaryPartNumber::CortexA57 */
|
||||||
b.eq othercore_cpu_specific_setup_cortex_a57
|
b.eq 3f
|
||||||
cmp x2, #0xD03 /* PrimaryPartNumber::CortexA53 */
|
cmp x2, #0xD03 /* PrimaryPartNumber::CortexA53 */
|
||||||
b.eq othercore_cpu_specific_setup_cortex_a53
|
b.eq 3f
|
||||||
b othercore_cpu_specific_setup_end
|
b 4f
|
||||||
othercore_cpu_specific_setup_cortex_a57:
|
3: /* We're running on a Cortex-A53/Cortex-A57. */
|
||||||
othercore_cpu_specific_setup_cortex_a53:
|
|
||||||
ldr x1, [x20, #(INIT_ARGUMENTS_CPUACTLR)]
|
ldr x1, [x20, #(INIT_ARGUMENTS_CPUACTLR)]
|
||||||
msr cpuactlr_el1, x1
|
msr cpuactlr_el1, x1
|
||||||
ldr x1, [x20, #(INIT_ARGUMENTS_CPUECTLR)]
|
ldr x1, [x20, #(INIT_ARGUMENTS_CPUECTLR)]
|
||||||
msr cpuectlr_el1, x1
|
msr cpuectlr_el1, x1
|
||||||
|
|
||||||
othercore_cpu_specific_setup_end:
|
4:
|
||||||
/* Ensure instruction consistency. */
|
/* Ensure instruction consistency. */
|
||||||
dsb sy
|
dsb sy
|
||||||
isb
|
isb
|
||||||
|
@ -237,7 +268,8 @@ _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE:
|
||||||
ldr x0, [x20, #(INIT_ARGUMENTS_ARGUMENT)]
|
ldr x0, [x20, #(INIT_ARGUMENTS_ARGUMENT)]
|
||||||
br x1
|
br x1
|
||||||
|
|
||||||
|
/* TODO: Can we remove this while retaining QEMU support? */
|
||||||
|
#ifndef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||||
/* ams::kern::init::JumpFromEL2ToEL1() */
|
/* ams::kern::init::JumpFromEL2ToEL1() */
|
||||||
.section .crt0.text._ZN3ams4kern4init16JumpFromEL2ToEL1Ev, "ax", %progbits
|
.section .crt0.text._ZN3ams4kern4init16JumpFromEL2ToEL1Ev, "ax", %progbits
|
||||||
.global _ZN3ams4kern4init16JumpFromEL2ToEL1Ev
|
.global _ZN3ams4kern4init16JumpFromEL2ToEL1Ev
|
||||||
|
@ -314,6 +346,7 @@ _ZN3ams4kern4init16JumpFromEL2ToEL1Ev:
|
||||||
msr spsr_el2, x0
|
msr spsr_el2, x0
|
||||||
|
|
||||||
eret
|
eret
|
||||||
|
#endif
|
||||||
|
|
||||||
/* ams::kern::init::DisableMmuAndCaches() */
|
/* ams::kern::init::DisableMmuAndCaches() */
|
||||||
.section .crt0.text._ZN3ams4kern4init19DisableMmuAndCachesEv, "ax", %progbits
|
.section .crt0.text._ZN3ams4kern4init19DisableMmuAndCachesEv, "ax", %progbits
|
||||||
|
@ -341,6 +374,10 @@ _ZN3ams4kern4init19DisableMmuAndCachesEv:
|
||||||
and x0, x0, x1
|
and x0, x0, x1
|
||||||
msr sctlr_el1, x0
|
msr sctlr_el1, x0
|
||||||
|
|
||||||
|
/* Ensure instruction consistency. */
|
||||||
|
dsb sy
|
||||||
|
isb
|
||||||
|
|
||||||
mov x30, x22
|
mov x30, x22
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
@ -354,13 +391,10 @@ _ZN3ams4kern4arch5arm643cpu32FlushEntireDataCacheWithoutStackEv:
|
||||||
|
|
||||||
/* Ensure that the cache is coherent. */
|
/* Ensure that the cache is coherent. */
|
||||||
bl _ZN3ams4kern4arch5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv
|
bl _ZN3ams4kern4arch5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv
|
||||||
dsb sy
|
|
||||||
|
|
||||||
bl _ZN3ams4kern4arch5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv
|
bl _ZN3ams4kern4arch5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv
|
||||||
dsb sy
|
|
||||||
|
|
||||||
bl _ZN3ams4kern4arch5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv
|
bl _ZN3ams4kern4arch5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv
|
||||||
dsb sy
|
|
||||||
|
|
||||||
/* Invalidate the entire TLB, and ensure instruction consistency. */
|
/* Invalidate the entire TLB, and ensure instruction consistency. */
|
||||||
tlbi vmalle1is
|
tlbi vmalle1is
|
||||||
|
@ -387,10 +421,10 @@ _ZN3ams4kern4arch5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv:
|
||||||
mov x9, xzr
|
mov x9, xzr
|
||||||
|
|
||||||
/* while (level <= levels_of_unification) { */
|
/* while (level <= levels_of_unification) { */
|
||||||
begin_flush_cache_local_loop:
|
|
||||||
cmp x9, x10
|
cmp x9, x10
|
||||||
b.eq done_flush_cache_local_loop
|
b.eq 1f
|
||||||
|
|
||||||
|
0:
|
||||||
/* FlushEntireDataCacheImplWithoutStack(level); */
|
/* FlushEntireDataCacheImplWithoutStack(level); */
|
||||||
mov w0, w9
|
mov w0, w9
|
||||||
bl _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv
|
bl _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv
|
||||||
|
@ -399,9 +433,13 @@ begin_flush_cache_local_loop:
|
||||||
add w9, w9, #1
|
add w9, w9, #1
|
||||||
|
|
||||||
/* } */
|
/* } */
|
||||||
b begin_flush_cache_local_loop
|
cmp x9, x10
|
||||||
|
b.ne 0b
|
||||||
|
|
||||||
done_flush_cache_local_loop:
|
/* cpu::DataSynchronizationBarrier(); */
|
||||||
|
dsb sy
|
||||||
|
|
||||||
|
1:
|
||||||
mov x30, x24
|
mov x30, x24
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
@ -423,21 +461,25 @@ _ZN3ams4kern4arch5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv:
|
||||||
/* int level = levels_of_unification */
|
/* int level = levels_of_unification */
|
||||||
|
|
||||||
/* while (level <= levels_of_coherency) { */
|
/* while (level <= levels_of_coherency) { */
|
||||||
begin_flush_cache_shared_loop:
|
|
||||||
cmp w9, w10
|
cmp w9, w10
|
||||||
b.hi done_flush_cache_shared_loop
|
b.hi 1f
|
||||||
|
|
||||||
|
0:
|
||||||
/* FlushEntireDataCacheImplWithoutStack(level); */
|
/* FlushEntireDataCacheImplWithoutStack(level); */
|
||||||
mov w0, w9
|
mov w0, w9
|
||||||
bl _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv
|
bl _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv
|
||||||
|
|
||||||
/* level++; */
|
/* level++; */
|
||||||
|
cmp w9, w10
|
||||||
add w9, w9, #1
|
add w9, w9, #1
|
||||||
|
|
||||||
/* } */
|
/* } */
|
||||||
b begin_flush_cache_shared_loop
|
b.cc 0b
|
||||||
|
|
||||||
done_flush_cache_shared_loop:
|
/* cpu::DataSynchronizationBarrier(); */
|
||||||
|
dsb sy
|
||||||
|
|
||||||
|
1:
|
||||||
mov x30, x24
|
mov x30, x24
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
@ -450,6 +492,9 @@ _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv:
|
||||||
lsl w6, w0, #1
|
lsl w6, w0, #1
|
||||||
sxtw x6, w6
|
sxtw x6, w6
|
||||||
|
|
||||||
|
/* cpu::DataSynchronizationBarrier(); */
|
||||||
|
dsb sy
|
||||||
|
|
||||||
/* cpu::SetCsselrEl1(level_sel_value); */
|
/* cpu::SetCsselrEl1(level_sel_value); */
|
||||||
msr csselr_el1, x6
|
msr csselr_el1, x6
|
||||||
|
|
||||||
|
@ -479,17 +524,17 @@ _ZN3ams4kern4arch5arm643cpu36FlushEntireDataCacheImplWithoutStackEv:
|
||||||
mov x5, #0
|
mov x5, #0
|
||||||
|
|
||||||
/* while (way <= num_ways) { */
|
/* while (way <= num_ways) { */
|
||||||
begin_flush_cache_impl_way_loop:
|
0:
|
||||||
cmp w8, w5
|
cmp w8, w5
|
||||||
b.lt done_flush_cache_impl_way_loop
|
b.lt 3f
|
||||||
|
|
||||||
/* int set = 0; */
|
/* int set = 0; */
|
||||||
mov x0, #0
|
mov x0, #0
|
||||||
|
|
||||||
/* while (set <= num_sets) { */
|
/* while (set <= num_sets) { */
|
||||||
begin_flush_cache_impl_set_loop:
|
1:
|
||||||
cmp w3, w0
|
cmp w3, w0
|
||||||
b.lt done_flush_cache_impl_set_loop
|
b.lt 2f
|
||||||
|
|
||||||
/* const u64 cisw_value = (static_cast<u64>(way) << way_shift) | (static_cast<u64>(set) << set_shift) | level_sel_value; */
|
/* const u64 cisw_value = (static_cast<u64>(way) << way_shift) | (static_cast<u64>(set) << set_shift) | level_sel_value; */
|
||||||
lsl x2, x5, x7
|
lsl x2, x5, x7
|
||||||
|
@ -504,13 +549,13 @@ begin_flush_cache_impl_set_loop:
|
||||||
add x0, x0, #1
|
add x0, x0, #1
|
||||||
|
|
||||||
/* } */
|
/* } */
|
||||||
b begin_flush_cache_impl_set_loop
|
b 1b
|
||||||
done_flush_cache_impl_set_loop:
|
2:
|
||||||
|
|
||||||
/* way++; */
|
/* way++; */
|
||||||
add x5, x5, 1
|
add x5, x5, 1
|
||||||
|
|
||||||
/* } */
|
/* } */
|
||||||
b begin_flush_cache_impl_way_loop
|
b 0b
|
||||||
done_flush_cache_impl_way_loop:
|
3:
|
||||||
ret
|
ret
|
||||||
|
|
|
@ -88,17 +88,17 @@ namespace ams::kern::init::loader {
|
||||||
cpu::MemoryAccessIndirectionRegisterAccessor(MairValue).Store();
|
cpu::MemoryAccessIndirectionRegisterAccessor(MairValue).Store();
|
||||||
cpu::TranslationControlRegisterAccessor(TcrValue).Store();
|
cpu::TranslationControlRegisterAccessor(TcrValue).Store();
|
||||||
|
|
||||||
|
/* Ensure that our configuration takes before proceeding. */
|
||||||
|
cpu::EnsureInstructionConsistency();
|
||||||
|
|
||||||
/* Perform board-specific setup. */
|
/* Perform board-specific setup. */
|
||||||
PerformBoardSpecificSetup();
|
PerformBoardSpecificSetup();
|
||||||
|
|
||||||
/* Ensure that the entire cache is flushed. */
|
|
||||||
cpu::FlushEntireCacheForInit();
|
|
||||||
|
|
||||||
/* Setup SCTLR_EL1. */
|
/* Setup SCTLR_EL1. */
|
||||||
/* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/
|
/* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/
|
||||||
constexpr u64 SctlrValue = 0x0000000034D5D925ul;
|
constexpr u64 SctlrValue = 0x0000000034D5D925ul;
|
||||||
cpu::SetSctlrEl1(SctlrValue);
|
cpu::SetSctlrEl1(SctlrValue);
|
||||||
cpu::EnsureInstructionConsistency();
|
cpu::InstructionMemoryBarrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress GetRandomKernelBaseAddress(KInitialPageTable &page_table, KPhysicalAddress phys_base_address, size_t kernel_size) {
|
KVirtualAddress GetRandomKernelBaseAddress(KInitialPageTable &page_table, KPhysicalAddress phys_base_address, size_t kernel_size) {
|
||||||
|
|
Loading…
Reference in a new issue