From 6f423fcfab656e303e07aed0fd133ce317dc3433 Mon Sep 17 00:00:00 2001 From: TuxSH <1922548+TuxSH@users.noreply.github.com> Date: Mon, 2 Mar 2020 01:49:27 +0000 Subject: [PATCH] thermosphere: rewrite (host) memory map (wip, need to update start.s anyway) --- .../src/cpu/hvisor_cpu_instructions.hpp | 2 + thermosphere/src/hvisor_irq_manager.hpp | 9 +- thermosphere/src/hvisor_memory_map.cpp | 199 ++++++++++++++++++ thermosphere/src/hvisor_memory_map.hpp | 94 +++++++++ thermosphere/src/hvisor_virtual_gic.hpp | 1 - thermosphere/src/memory_map.c | 166 --------------- thermosphere/src/traps/hvisor_traps_smc.cpp | 6 +- 7 files changed, 302 insertions(+), 175 deletions(-) create mode 100644 thermosphere/src/hvisor_memory_map.cpp create mode 100644 thermosphere/src/hvisor_memory_map.hpp delete mode 100644 thermosphere/src/memory_map.c diff --git a/thermosphere/src/cpu/hvisor_cpu_instructions.hpp b/thermosphere/src/cpu/hvisor_cpu_instructions.hpp index 9d636460c..cd9801a2a 100644 --- a/thermosphere/src/cpu/hvisor_cpu_instructions.hpp +++ b/thermosphere/src/cpu/hvisor_cpu_instructions.hpp @@ -51,9 +51,11 @@ namespace ams::hvisor::cpu { DECLARE_SINGLE_ASM_INSN2(dsbSy, "dsb sy") DECLARE_SINGLE_ASM_INSN(isb) + DECLARE_SINGLE_ASM_INSN2(TlbInvalidateEl2Local, "tlbi alle2") DECLARE_SINGLE_ASM_INSN2(TlbInvalidateEl2, "tlbi alle2is") DECLARE_SINGLE_ASM_INSN2(TlbInvalidateEl1, "tlbi vmalle1is") DECLARE_SINGLE_ASM_INSN2(TlbInvalidateEl1Stage12, "tlbi alle1is") + DECLARE_SINGLE_ASM_INSN2(TlbInvalidateEl1Stage12Local, "tlbi alle1") ALWAYS_INLINE void TlbInvalidateEl2Page(uintptr_t addr) { diff --git a/thermosphere/src/hvisor_irq_manager.hpp b/thermosphere/src/hvisor_irq_manager.hpp index 5c5cbfe75..b2f23c15e 100644 --- a/thermosphere/src/hvisor_irq_manager.hpp +++ b/thermosphere/src/hvisor_irq_manager.hpp @@ -20,10 +20,9 @@ #include "hvisor_synchronization.hpp" #include "hvisor_i_interrupt_task.hpp" #include "hvisor_exception_stack_frame.hpp" +#include "hvisor_memory_map.hpp" #include "cpu/hvisor_cpu_sysreg_general.hpp" -#include "memory_map.h" - namespace ams::hvisor { class IrqManager final { @@ -33,9 +32,9 @@ namespace ams::hvisor { static constexpr u8 hostPriority = 0; static constexpr u8 guestPriority = 1; - static inline volatile auto *const gicd = (volatile GicV2Distributor *)MEMORY_MAP_VA_GICD; - static inline volatile auto *const gicc = (volatile GicV2Controller *)MEMORY_MAP_VA_GICC; - static inline volatile auto *const gich = (volatile GicV2VirtualInterfaceController *)MEMORY_MAP_VA_GICH; + static inline volatile auto *const gicd = reinterpret_cast(MemoryMap::gicdVa); + static inline volatile auto *const gicc = reinterpret_cast(MemoryMap::giccVa); + static inline volatile auto *const gich = reinterpret_cast(MemoryMap::gichVa); static bool IsGuestInterrupt(u32 id); diff --git a/thermosphere/src/hvisor_memory_map.cpp b/thermosphere/src/hvisor_memory_map.cpp new file mode 100644 index 000000000..f2e661dc1 --- /dev/null +++ b/thermosphere/src/hvisor_memory_map.cpp @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2019-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "hvisor_memory_map.hpp" +#include "hvisor_core_context.hpp" + +#include "cpu/hvisor_cpu_mmu.hpp" +#include "cpu/hvisor_cpu_instructions.hpp" + +#include "platform/interrupt_config.h" // TODO remove + +namespace ams::hvisor { + + uintptr_t MemoryMap::currentPlatformMmioPage = MemoryMap::mmioPlatBaseVa; + + void MemoryMap::SetupMmu(const MemoryMap::LoadImageLayout *layout) + { + using namespace cpu; + + constexpr u64 normalAttribs = MMU_INNER_SHAREABLE | MMU_ATTRINDX(Memtype_Normal); + constexpr u64 deviceAttribs = MMU_XN | MMU_INNER_SHAREABLE | MMU_ATTRINDX(Memtype_Device_nGnRE); + + /* + Layout in physmem: + Location1 + Image (code and data incl. BSS), which size is page-aligned + Location2 + tempbss + MMU table (taken from temp physmem) + + Layout in vmem: + Location1 + Image + padding + tempbss + Location2 + Crash stacks + {guard page, stack} * numCores + Location3 (all L1, L2, L3 bits set): + MMU table + + We map the table into itself at the entry which index has all bits set. + This is called "recursive page tables" and means (assuming 39-bit addr space) that: + - the table will reuse itself as L2 table for the 0x7FC0000000+ range + - the table will reuse itself as L3 table for the 0x7FFFE00000+ range + - the table itself will be accessible at 0x7FFFFFF000 + */ + + using Builder = MmuTableBuilder<3, addressSpaceSize>; + uintptr_t mmuTablePa = layout->tempPa + layout->maxTempSize; + + uintptr_t tempVa = imageVa + layout->imageSize; + uintptr_t crashStacksPa = layout->tempPa + layout->tempSize; + uintptr_t stacksPa = crashStacksPa + crashStacksSize; + + Builder{reinterpret_cast(mmuTablePa)} + .InitializeTable() + // Image & tempbss & crash stacks + .MapBlockRange(imageVa, layout->startPa, layout->imageSize, normalAttribs) + .MapBlockRange(tempVa, layout->tempPa, layout->tempSize, normalAttribs) + .MapBlockRange(crashStacksBottomVa, crashStacksPa, crashStacksSize, normalAttribs) + // Stacks, each with a guard page + .MapBlockRange(stacksBottomVa, stacksPa, 0x1000ul * MAX_CORE, normalAttribs, 0x1000) + // GICD, GICC, GICH + .MapBlock(gicdVa, MEMORY_MAP_PA_GICD, deviceAttribs) + .MapBlockRange(giccVa, MEMORY_MAP_PA_GICC, 0x2000, deviceAttribs) + .MapBlock(gichVa, MEMORY_MAP_PA_GICH, deviceAttribs) + // Recursive page mapping + .MapBlock(ttblVa, mmuTablePa, normalAttribs) + ; + } + + std::array MemoryMap::EnableMmuGetStacks(const MemoryMap::LoadImageLayout *layout, u32 coreId) + { + using namespace cpu; + uintptr_t mmuTablePa = layout->tempPa + layout->maxTempSize; + + u32 ps = THERMOSPHERE_GET_SYSREG(id_aa64mmfr0_el1) & 0xF; + /* + - PA size: from ID_AA64MMFR0_EL1 + - Granule size: 4KB + - Shareability attribute for memory associated with translation table walks using TTBR0_EL2: + Inner Shareable + - Outer cacheability attribute for memory associated with translation table walks using TTBR0_EL2: + Normal memory, Outer Write-Back Read-Allocate Write-Allocate Cacheable + - Inner cacheability attribute for memory associated with translation table walks using TTBR0_EL2: + Normal memory, Inner Write-Back Read-Allocate Write-Allocate Cacheable + - T0SZ = 39 + */ + u64 tcr = TCR_EL2_RSVD | TCR_PS(ps) | TCR_TG0(TranslationGranule_4K) | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA | TCR_T0SZ(addressSpaceSize); + + + /* + - Attribute 0: Device-nGnRnE memory + - Attribute 1: Normal memory, Inner and Outer Write-Back Read-Allocate Write-Allocate Non-transient + - Attribute 2: Device-nGnRE memory + - Attribute 3: Normal memory, Inner and Outer Noncacheable + - Other attributes: Device-nGnRnE memory + */ + constexpr u64 mair = 0x44FF0400; + + // Set VBAR because we *will* crash (instruction abort because of the value of pc) when enabling the MMU + THERMOSPHERE_SET_SYSREG(vbar_el2, layout->vbar); + + // MMU regs config + THERMOSPHERE_SET_SYSREG(ttbr0_el2, mmuTablePa); + THERMOSPHERE_SET_SYSREG(tcr_el2, tcr); + THERMOSPHERE_SET_SYSREG(mair_el2, mair); + dsb(); + isb(); + + // TLB invalidation + // Whether this does anything before MMU is enabled is impldef, apparently + TlbInvalidateEl2Local(); + dsb(); + isb(); + + // Enable MMU & enable caching. We will crash. + u64 sctlr = THERMOSPHERE_GET_SYSREG(sctlr_el2); + sctlr |= SCTLR_ELx_I | SCTLR_ELx_C | SCTLR_ELx_M; + THERMOSPHERE_SET_SYSREG(sctlr_el2, sctlr); + dsb(); + isb(); + + // crashStackTop is fragile, check if crashStacksSize is suitable for MAX_CORE + uintptr_t stackTop = stacksBottomVa + 0x2000 * coreId + 0x1000; + uintptr_t crashStackTop = crashStacksBottomVa + (crashStacksSize / MAX_CORE) * (1 + coreId); + return std::array{stackTop, crashStackTop}; + } + + uintptr_t MemoryMap::MapPlatformMmio(uintptr_t pa, size_t size) + { + using namespace cpu; + using Builder = MmuTableBuilder<3, addressSpaceSize, true>; + constexpr u64 deviceAttribs = MMU_XN | MMU_INNER_SHAREABLE | MMU_ATTRINDX(Memtype_Device_nGnRE); + + uintptr_t va = currentPlatformMmioPage; + size = (size + 0xFFF) & ~0xFFFul; + Builder{reinterpret_cast(ttblVa)}.MapBlockRange(currentPlatformMmioPage, va, size, deviceAttribs); + + currentPlatformMmioPage += size; + return va; + } + + uintptr_t MemoryMap::MapGuestPage(uintptr_t pa, u64 memAttribs, u64 shareability) + { + using namespace cpu; + using Builder = MmuTableBuilder<3, addressSpaceSize, true>; + + u64 attribs = MMU_XN | MMU_SH(shareability) | MMU_ATTRINDX(Memtype_Guest_Slot); + uintptr_t va = guestMemVa + 0x2000 * currentCoreCtx->GetCoreId(); // one guard page + + // Update mair_el2 + u64 mair = THERMOSPHERE_GET_SYSREG(mair_el2); + mair |= memAttribs << (8 * Memtype_Guest_Slot); + THERMOSPHERE_SET_SYSREG(mair_el2, mair); + isb(); + + Builder{reinterpret_cast(ttblVa)}.MapBlock(va, pa, attribs); + TlbInvalidateEl2Page(va); + dsb(); + isb(); + } + + uintptr_t MemoryMap::UnmapGuestPage() + { + using namespace cpu; + using Builder = MmuTableBuilder<3, addressSpaceSize, true>; + uintptr_t va = guestMemVa + 0x2000 * currentCoreCtx->GetCoreId(); + + dsb(); + isb(); + + Builder{reinterpret_cast(ttblVa)}.Unmap(va); + TlbInvalidateEl2Page(va); + dsb(); + isb(); + + // Update mair_el2 + u64 mair = THERMOSPHERE_GET_SYSREG(mair_el2); + mair &= ~(0xFF << (8 * Memtype_Guest_Slot)); + THERMOSPHERE_SET_SYSREG(mair_el2, mair); + isb(); + } +} diff --git a/thermosphere/src/hvisor_memory_map.hpp b/thermosphere/src/hvisor_memory_map.hpp new file mode 100644 index 000000000..9676e6979 --- /dev/null +++ b/thermosphere/src/hvisor_memory_map.hpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2019-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#include "defines.hpp" + +namespace ams::hvisor { + + class MemoryMap final { + NON_COPYABLE(MemoryMap); + NON_MOVEABLE(MemoryMap); + private: + // Maps to AttrIndx[2:0] + enum MemType { + Memtype_Device_nGnRnE = 0, + Memtype_Normal = 1, + Memtype_Device_nGnRE = 2, + Memtype_Normal_Uncacheable = 3, + Memtype_Guest_Slot = 4, + }; + + struct LoadImageLayout { + uintptr_t startPa; + size_t imageSize; // "image" includes "real" BSS but not tempbss + + uintptr_t tempPa; + size_t maxTempSize; + size_t tempSize; + + uintptr_t vbar; + }; + static_assert(std::is_standard_layout_v); + static_assert(std::is_trivial_v); + private: + static LoadImageLayout imageLayout; + static uintptr_t currentPlatformMmioPage; + + public: + static constexpr u32 addressSpaceSize = 39; + + // The following come from the fact we're using a recursive page table: + static constexpr uintptr_t selfL2VaRange = 0x7FC0000000ul; // = 511 << 31 + static constexpr uintptr_t selfL3VaRange = 0x7FFFE00000ul; // = 511 << 31 | 511 << 21 + static constexpr uintptr_t ttblVa = 0x7FFFFFF000ul; // = 511 << 31 | 511 << 21 | 511 << 12 + static constexpr uintptr_t maxVa = 0x7FFFFFFFFFul; // = all 39 bits set + + static constexpr size_t crashStacksSize = 0x1000ul; + + // Do not use the first 0x10000 to allow for L1/L2 mappings... + static constexpr uintptr_t imageVa = selfL3VaRange + 0x10000; + static constexpr uintptr_t crashStacksBottomVa = selfL3VaRange + 0x40000; + static constexpr uintptr_t crashStacksTopVa = crashStacksBottomVa + crashStacksSize; + static constexpr uintptr_t guestMemVa = selfL3VaRange + 0x50000; + static constexpr uintptr_t stacksBottomVa = selfL3VaRange + 0x60000; + + static constexpr uintptr_t mmioBaseVa = selfL3VaRange + 0x80000; + static constexpr uintptr_t gicdVa = mmioBaseVa + 0x0000; + static constexpr uintptr_t giccVa = mmioBaseVa + 0x1000; + static constexpr uintptr_t gichVa = mmioBaseVa + 0x3000; + + static constexpr uintptr_t mmioPlatBaseVa = selfL3VaRange + 0x90000; + + static uintptr_t GetStartPa() { return imageLayout.startPa; } + + // Called before MMU is enabled. EnableMmu must not use a stack frame + static void SetupMmu(const LoadImageLayout *layout); + static std::array EnableMmuGetStacks(const LoadImageLayout *layout, u32 coreId); + + // Caller is expected to invalidate TLB + barrier at some point + static uintptr_t MapPlatformMmio(uintptr_t pa, size_t size); + + // Caller is expected to disable interrupts, etc, etc. + static uintptr_t MapGuestPage(uintptr_t pa, u64 memAttribs, u64 shareability); + static uintptr_t UnmapGuestPage(); + + public: + constexpr MemoryMap() = delete; + }; + +} diff --git a/thermosphere/src/hvisor_virtual_gic.hpp b/thermosphere/src/hvisor_virtual_gic.hpp index ca321c681..021fab512 100644 --- a/thermosphere/src/hvisor_virtual_gic.hpp +++ b/thermosphere/src/hvisor_virtual_gic.hpp @@ -20,7 +20,6 @@ #include "hvisor_core_context.hpp" #include "cpu/hvisor_cpu_exception_sysregs.hpp" #include "hvisor_irq_manager.hpp" -#include "memory_map.h" namespace ams::hvisor { diff --git a/thermosphere/src/memory_map.c b/thermosphere/src/memory_map.c deleted file mode 100644 index f7dc818c8..000000000 --- a/thermosphere/src/memory_map.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (c) 2019 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include "memory_map.h" -#include "mmu.h" -#include "sysreg.h" -#include "platform/interrupt_config.h" - -#define ATTRIB_MEMTYPE_NORMAL MMU_PTE_BLOCK_MEMTYPE(MEMORY_MAP_MEMTYPE_NORMAL) -#define ATTRIB_MEMTYPE_DEVICE MMU_PTE_BLOCK_MEMTYPE(MEMORY_MAP_MEMTYPE_DEVICE_NGNRE) - -static uintptr_t g_currentPlatformMmioPage = MEMORY_MAP_VA_MMIO_PLAT_BASE; - -void memoryMapSetupMmu(const LoadImageLayout *layout, u64 *mmuTable) -{ - static const u64 normalAttribs = MMU_PTE_BLOCK_INNER_SHAREBLE | ATTRIB_MEMTYPE_NORMAL; - static const u64 deviceAttribs = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | ATTRIB_MEMTYPE_DEVICE; - - // mmuTable is currently a PA - mmu_init_table(mmuTable, 0x200); - - /* - Map the table into itself at the entry which index has all bits set. - This is called "recursive page tables" and means (assuming 39-bit addr space) that: - - the table will reuse itself as L2 table for the 0x7FC0000000+ range - - the table will reuse itself as L3 table for the 0x7FFFE00000+ range - - the table itself will be accessible at 0x7FFFFFF000 - */ - mmuTable[0x1FF] = (uintptr_t)mmuTable | MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_AF | MMU_PTE_TYPE_TABLE; - - /* - Layout in physmem: - Location1 - Image (code and data incl. BSS) - Location2 - tempbss - MMU table (taken from temp physmem) - - Layout in vmem: - Location1 - Image - padding - tempbss - Location2 - Crash stacks - {guard page, stack} * numCores - Location3 (all L1, L2, L3 bits set): - MMU table - */ - - // Map our code & data (.text/other code, .rodata, .data, .bss) at the bottom of our L3 range, all RWX - // Note that the end of "image" is page-aligned - // See LD script for more details - uintptr_t curVa = MEMORY_MAP_VA_IMAGE; - uintptr_t curPa = layout->startPa; - - // Do not map the MMU table in that mapping: - mmu_map_page_range(mmuTable, curVa, curPa, layout->imageSize, normalAttribs); - - curVa += layout->imageSize; - curPa = layout->tempPa; - mmu_map_page_range(mmuTable, curVa, curPa, layout->tempSize , normalAttribs); - curPa += layout->tempSize; - - // Map the remaining temporary data as stacks, aligned 0x1000 - - // Crash stacks, total size is fixed: - curVa = MEMORY_MAP_VA_CRASH_STACKS_BOTTOM; - mmu_map_page_range(mmuTable, curVa, curPa, MEMORY_MAP_VA_CRASH_STACKS_SIZE, normalAttribs); - curPa += MEMORY_MAP_VA_CRASH_STACKS_SIZE; - - // Regular stacks - size_t sizePerStack = 0x1000; - curVa = MEMORY_MAP_VA_STACKS_TOP - sizePerStack; - for (u32 i = 0; i < 4; i++) { - mmu_map_page_range(mmuTable, curVa, curPa, sizePerStack, normalAttribs); - curVa -= 2 * sizePerStack; - curPa += sizePerStack; - } - - // MMIO - mmu_map_page(mmuTable, MEMORY_MAP_VA_GICD, MEMORY_MAP_PA_GICD, deviceAttribs); - mmu_map_page_range(mmuTable, MEMORY_MAP_VA_GICC, MEMORY_MAP_PA_GICC, 0x2000, deviceAttribs); - mmu_map_page(mmuTable, MEMORY_MAP_VA_GICH, MEMORY_MAP_PA_GICH, deviceAttribs); -} - -void memoryMapEnableMmu(const LoadImageLayout *layout) -{ - uintptr_t mmuTable = layout->tempPa + layout->maxTempSize; - - u32 ps = GET_SYSREG(id_aa64mmfr0_el1) & 0xF; - /* - - PA size: from ID_AA64MMFR0_EL1 - - Granule size: 4KB - - Shareability attribute for memory associated with translation table walks using TTBR0_EL2: Inner Shareable - - Outer cacheability attribute for memory associated with translation table walks using TTBR0_EL2: Normal memory, Outer Write-Back Read-Allocate Write-Allocate Cacheable - - Inner cacheability attribute for memory associated with translation table walks using TTBR0_EL2: Normal memory, Inner Write-Back Read-Allocate Write-Allocate Cacheable - - T0SZ = MEMORY_MAP_VA_SPACE_SIZE = 39 - */ - u64 tcr = TCR_EL2_RSVD | TCR_PS(ps) | TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA | TCR_T0SZ(MEMORY_MAP_VA_SPACE_SIZE); - - - /* - - Attribute 0: Device-nGnRnE memory - - Attribute 1: Normal memory, Inner and Outer Write-Back Read-Allocate Write-Allocate Non-transient - - Attribute 2: Device-nGnRE memory - - Attribute 3: Normal memory, Inner and Outer Noncacheable - - Other attributes: Device-nGnRnE memory - */ - u64 mair = 0x44FF0400; - - // Set VBAR because we *will* crash (instruction abort because of the value of pc) when enabling the MMU - SET_SYSREG(vbar_el2, layout->vbar); - - // MMU regs config - SET_SYSREG(ttbr0_el2, mmuTable); - SET_SYSREG(tcr_el2, tcr); - SET_SYSREG(mair_el2, mair); - __dsb_local(); - __isb(); - - // TLB invalidation - // Whether this does anything before MMU is enabled is impldef, apparently - __tlb_invalidate_el2_local(); - __dsb_local(); - __isb(); - - // Enable MMU & enable caching. We will crash. - u64 sctlr = GET_SYSREG(sctlr_el2); - sctlr |= SCTLR_ELx_I | SCTLR_ELx_C | SCTLR_ELx_M; - SET_SYSREG(sctlr_el2, sctlr); - __dsb_local(); - __isb(); -} - -uintptr_t memoryMapGetStackTop(u32 coreId) -{ - return MEMORY_MAP_VA_STACKS_TOP - 0x2000 * coreId; -} - -uintptr_t memoryMapPlatformMmio(uintptr_t pa, size_t size) -{ - uintptr_t va = g_currentPlatformMmioPage; - static const u64 deviceAttribs = MMU_PTE_BLOCK_XN | MMU_PTE_BLOCK_INNER_SHAREBLE | ATTRIB_MEMTYPE_DEVICE; - u64 *mmuTable = (u64 *)MEMORY_MAP_VA_TTBL; - - size = (size + 0xFFF) & ~0xFFFul; - mmu_map_page_range(mmuTable, va, pa, size, deviceAttribs); - - g_currentPlatformMmioPage += size; - - return va; -} diff --git a/thermosphere/src/traps/hvisor_traps_smc.cpp b/thermosphere/src/traps/hvisor_traps_smc.cpp index e04e4b815..5b6ee75cf 100644 --- a/thermosphere/src/traps/hvisor_traps_smc.cpp +++ b/thermosphere/src/traps/hvisor_traps_smc.cpp @@ -16,10 +16,10 @@ #include "hvisor_traps_smc.hpp" #include "../hvisor_core_context.hpp" +#include "../hvisor_memory_map.hpp" #include "../cpu/hvisor_cpu_caches.hpp" #include "../debug_manager.h" -#include "../memory_map.h" namespace { @@ -36,7 +36,7 @@ namespace { if (cpuId < MAX_CORE) { auto &ctx = ams::hvisor::CoreContext::GetInstanceFor(cpuId); ctx.SetKernelEntrypoint(ep); - frame->WriteRegister(2, g_loadImageLayout.startPa + 4); //FIXME + frame->WriteRegister(2, ams::hvisor::MemoryMap::GetStartPa() + 4); //FIXME } ams::hvisor::cpu::dmb(); } @@ -52,7 +52,7 @@ namespace { // We may trigger warmboot, depending on powerState (x1 or default value) uintptr_t ep = frame->ReadRegister(epIdx); ams::hvisor::currentCoreCtx->SetKernelEntrypoint(ep, true); - frame->WriteRegister(epIdx, g_loadImageLayout.startPa + 4); //FIXME + frame->WriteRegister(epIdx, ams::hvisor::MemoryMap::GetStartPa() + 4); //FIXME ams::hvisor::cpu::dmb(); }