diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index 93925e3fa..8941928ed 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -170,9 +170,17 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; } constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; } constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; } + + constexpr ALWAYS_INLINE u64 GetTestTableMask() const { return (m_attributes & ExtensionFlag_TestTableMask); } + constexpr ALWAYS_INLINE bool IsBlock() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; } + constexpr ALWAYS_INLINE bool IsPage() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_TestTableMask; } constexpr ALWAYS_INLINE bool IsTable() const { return (m_attributes & ExtensionFlag_TestTableMask) == 2; } constexpr ALWAYS_INLINE bool IsEmpty() const { return (m_attributes & ExtensionFlag_TestTableMask) == 0; } + + constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { return this->SelectBits(12, 36); } + + constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; } constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; } constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; } @@ -196,10 +204,13 @@ namespace ams::kern::arch::arm64 { return (m_attributes & BaseMaskForMerge) == attr; } - constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const { + constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const { return m_attributes; } + constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const { + return m_attributes; + } protected: constexpr ALWAYS_INLINE u64 GetRawAttributes() const { return m_attributes; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index e5df6defb..8616ac0dd 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -37,10 +37,17 @@ namespace ams::kern::arch::arm64 { constexpr bool IsTailMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadTail) != 0; } }; + enum EntryLevel : u32 { + EntryLevel_L3 = 0, + EntryLevel_L2 = 1, + EntryLevel_L1 = 2, + EntryLevel_Count = 3, + }; + struct TraversalContext { - const L1PageTableEntry *l1_entry; - const L2PageTableEntry *l2_entry; - const L3PageTableEntry *l3_entry; + const PageTableEntry *level_entries[EntryLevel_Count]; + EntryLevel level; + bool is_contiguous; }; private: static constexpr size_t PageBits = util::CountTrailingZeros(PageSize); @@ -53,16 +60,26 @@ namespace ams::kern::arch::arm64 { return (value >> Offset) & ((1ul << Count) - 1); } + static constexpr ALWAYS_INLINE u64 GetBits(u64 value, size_t offset, size_t count) { + return (value >> offset) & ((1ul << count) - 1); + } + template - constexpr ALWAYS_INLINE u64 SelectBits(u64 value) { + static constexpr ALWAYS_INLINE u64 SelectBits(u64 value) { return value & (((1ul << Count) - 1) << Offset); } + static constexpr ALWAYS_INLINE u64 SelectBits(u64 value, size_t offset, size_t count) { + return value & (((1ul << count) - 1) << offset); + } + static constexpr ALWAYS_INLINE uintptr_t GetL0Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL1Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL3Index(KProcessAddress addr) { return GetBits(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetLevelIndex(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), PageBits + LevelBits * level, LevelBits); } + static constexpr ALWAYS_INLINE uintptr_t GetL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1)>(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2)>(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3)>(GetInteger(addr)); } @@ -70,13 +87,16 @@ namespace ams::kern::arch::arm64 { static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); } static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); } + static constexpr ALWAYS_INLINE uintptr_t GetBlock(const PageTableEntry *pte, EntryLevel level) { return SelectBits(pte->GetRawAttributesUnsafe(), PageBits + LevelBits * level, LevelBits * (NumLevels + 1 - level)); } + static constexpr ALWAYS_INLINE uintptr_t GetOffset(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), 0, PageBits + LevelBits * level); } + static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) { return KMemoryLayout::GetLinearVirtualAddress(addr); } - ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const; - ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const; - ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const; + //ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const; + //ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const; + //ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const; private: L1PageTableEntry *m_table; bool m_is_kernel; diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 280498f78..5bc775f66 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -33,103 +33,98 @@ namespace ams::kern::arch::arm64 { return m_table; } - bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { - /* Set the L3 entry. */ - out_context->l3_entry = l3_entry; - - if (l3_entry->IsBlock()) { - /* Set the output entry. */ - out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1)); - if (l3_entry->IsContiguous()) { - out_entry->block_size = L3ContiguousBlockSize; - } else { - out_entry->block_size = L3BlockSize; - } - out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); - out_entry->attr = 0; - - return true; - } else { - out_entry->phys_addr = Null; - out_entry->block_size = L3BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - return false; - } - } - - bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const { - /* Set the L2 entry. */ - out_context->l2_entry = l2_entry; - - if (l2_entry->IsBlock()) { - /* Set the output entry. */ - out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1)); - if (l2_entry->IsContiguous()) { - out_entry->block_size = L2ContiguousBlockSize; - } else { - out_entry->block_size = L2BlockSize; - } - out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); - out_entry->attr = 0; - - /* Set the output context. */ - out_context->l3_entry = nullptr; - return true; - } else if (l2_entry->IsTable()) { - return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); - } else { - out_entry->phys_addr = Null; - out_entry->block_size = L2BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - - out_context->l3_entry = nullptr; - return false; - } - } - - bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const { - /* Set the L1 entry. */ - out_context->l1_entry = l1_entry; - - if (l1_entry->IsBlock()) { - /* Set the output entry. */ - out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1)); - if (l1_entry->IsContiguous()) { - out_entry->block_size = L1ContiguousBlockSize; - } else { - out_entry->block_size = L1BlockSize; - } - out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits(); - - /* Set the output context. */ - out_context->l2_entry = nullptr; - out_context->l3_entry = nullptr; - return true; - } else if (l1_entry->IsTable()) { - return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); - } else { - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - - out_context->l2_entry = nullptr; - out_context->l3_entry = nullptr; - return false; - } - } + // bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { + // /* Set the L3 entry. */ + // out_context->l3_entry = l3_entry; + // + // if (l3_entry->IsBlock()) { + // /* Set the output entry. */ + // out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1)); + // if (l3_entry->IsContiguous()) { + // out_entry->block_size = L3ContiguousBlockSize; + // } else { + // out_entry->block_size = L3BlockSize; + // } + // out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); + // out_entry->attr = 0; + // + // return true; + // } else { + // out_entry->phys_addr = Null; + // out_entry->block_size = L3BlockSize; + // out_entry->sw_reserved_bits = 0; + // out_entry->attr = 0; + // return false; + // } + // } + // + // bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const { + // /* Set the L2 entry. */ + // out_context->l2_entry = l2_entry; + // + // if (l2_entry->IsBlock()) { + // /* Set the output entry. */ + // out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1)); + // if (l2_entry->IsContiguous()) { + // out_entry->block_size = L2ContiguousBlockSize; + // } else { + // out_entry->block_size = L2BlockSize; + // } + // out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); + // out_entry->attr = 0; + // + // /* Set the output context. */ + // out_context->l3_entry = nullptr; + // return true; + // } else if (l2_entry->IsTable()) { + // return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); + // } else { + // out_entry->phys_addr = Null; + // out_entry->block_size = L2BlockSize; + // out_entry->sw_reserved_bits = 0; + // out_entry->attr = 0; + // + // out_context->l3_entry = nullptr; + // return false; + // } + // } + // + // bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const { + // /* Set the L1 entry. */ + // out_context->level_entries[EntryLevel_L1] = l1_entry; + // + // if (l1_entry->IsBlock()) { + // /* Set the output entry. */ + // out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1)); + // if (l1_entry->IsContiguous()) { + // out_entry->block_size = L1ContiguousBlockSize; + // } else { + // out_entry->block_size = L1BlockSize; + // } + // out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits(); + // + // /* Set the output context. */ + // out_context->l2_entry = nullptr; + // out_context->l3_entry = nullptr; + // return true; + // } else if (l1_entry->IsTable()) { + // return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); + // } else { + // out_entry->phys_addr = Null; + // out_entry->block_size = L1BlockSize; + // out_entry->sw_reserved_bits = 0; + // out_entry->attr = 0; + // + // out_context->l2_entry = nullptr; + // out_context->l3_entry = nullptr; + // return false; + // } + // } bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const { /* Setup invalid defaults. */ - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - out_context->l1_entry = m_table + m_num_entries; - out_context->l2_entry = nullptr; - out_context->l3_entry = nullptr; + *out_entry = {}; + *out_context = {}; /* Validate that we can read the actual entry. */ const size_t l0_index = GetL0Index(address); @@ -146,125 +141,79 @@ namespace ams::kern::arch::arm64 { } } - /* Extract the entry. */ - const bool valid = this->ExtractL1Entry(out_entry, out_context, this->GetL1Entry(address), address); + /* Get the L1 entry, and check if it's a table. */ + out_context->level_entries[EntryLevel_L1] = this->GetL1Entry(address); + if (out_context->level_entries[EntryLevel_L1]->IsMappedTable()) { + /* Get the L2 entry, and check if it's a table. */ + out_context->level_entries[EntryLevel_L2] = this->GetL2EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L1]->GetTable()), address); + if (out_context->level_entries[EntryLevel_L2]->IsMappedTable()) { + /* Get the L3 entry. */ + out_context->level_entries[EntryLevel_L3] = this->GetL3EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L2]->GetTable()), address); - /* Update the context for next traversal. */ - switch (out_entry->block_size) { - case L1ContiguousBlockSize: - out_context->l1_entry += (L1ContiguousBlockSize / L1BlockSize) - GetContiguousL1Offset(address) / L1BlockSize; - break; - case L1BlockSize: - out_context->l1_entry += 1; - break; - case L2ContiguousBlockSize: - out_context->l1_entry += 1; - out_context->l2_entry += (L2ContiguousBlockSize / L2BlockSize) - GetContiguousL2Offset(address) / L2BlockSize; - break; - case L2BlockSize: - out_context->l1_entry += 1; - out_context->l2_entry += 1; - break; - case L3ContiguousBlockSize: - out_context->l1_entry += 1; - out_context->l2_entry += 1; - out_context->l3_entry += (L3ContiguousBlockSize / L3BlockSize) - GetContiguousL3Offset(address) / L3BlockSize; - break; - case L3BlockSize: - out_context->l1_entry += 1; - out_context->l2_entry += 1; - out_context->l3_entry += 1; - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + /* It's either a page or not. */ + out_context->level = EntryLevel_L3; + } else { + /* Not a L2 table, so possibly an L2 block. */ + out_context->level = EntryLevel_L2; + } + } else { + /* Not a L1 table, so possibly an L1 block. */ + out_context->level = EntryLevel_L1; } - return valid; + /* Determine other fields. */ + const auto *pte = out_context->level_entries[out_context->level]; + + out_context->is_contiguous = pte->IsContiguous(); + + out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits(); + out_entry->attr = 0; + out_entry->phys_addr = this->GetBlock(pte, out_context->level) + this->GetOffset(address, out_context->level); + out_entry->block_size = static_cast(1) << (PageBits + LevelBits * out_context->level + 4 * out_context->is_contiguous); + + return out_context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); } bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const { - bool valid = false; + /* Advance entry. */ - /* Check if we're not at the end of an L3 table. */ - if (!util::IsAligned(reinterpret_cast(context->l3_entry), PageSize)) { - valid = this->ExtractL3Entry(out_entry, context, context->l3_entry, Null); + auto *cur_pte = context->level_entries[context->level]; + auto *next_pte = reinterpret_cast(context->is_contiguous ? util::AlignDown(reinterpret_cast(cur_pte), 0x10 * sizeof(PageTableEntry)) + 0x10 * sizeof(PageTableEntry) : reinterpret_cast(cur_pte) + sizeof(PageTableEntry)); - switch (out_entry->block_size) { - case L3ContiguousBlockSize: - context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); - break; - case L3BlockSize: - context->l3_entry += 1; - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); - } - } else if (!util::IsAligned(reinterpret_cast(context->l2_entry), PageSize)) { - /* We're not at the end of an L2 table. */ - valid = this->ExtractL2Entry(out_entry, context, context->l2_entry, Null); + /* Set the pte. */ + context->level_entries[context->level] = next_pte; - switch (out_entry->block_size) { - case L2ContiguousBlockSize: - context->l2_entry += (L2ContiguousBlockSize / L2BlockSize); - break; - case L2BlockSize: - context->l2_entry += 1; - break; - case L3ContiguousBlockSize: - context->l2_entry += 1; - context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); - break; - case L3BlockSize: - context->l2_entry += 1; - context->l3_entry += 1; - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); - } - } else { - /* We need to update the l1 entry. */ - const size_t l1_index = context->l1_entry - m_table; - if (l1_index < m_num_entries) { - valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null); - } else { - /* Invalid, end traversal. */ - out_entry->phys_addr = Null; - out_entry->block_size = L1BlockSize; - out_entry->sw_reserved_bits = 0; - out_entry->attr = 0; - context->l1_entry = m_table + m_num_entries; - context->l2_entry = nullptr; - context->l3_entry = nullptr; + /* Advance appropriately. */ + while (context->level < EntryLevel_L1 && util::IsAligned(reinterpret_cast(context->level_entries[context->level]), PageSize)) { + /* Advance the above table by one entry. */ + context->level_entries[context->level + 1]++; + context->level = static_cast(util::ToUnderlying(context->level) + 1); + } + + /* Check if we've hit the end of the L1 table. */ + if (context->level == EntryLevel_L1) { + if (context->level_entries[EntryLevel_L1] - static_cast(m_table) >= m_num_entries) { + *context = {}; + *out_entry = {}; return false; } - - switch (out_entry->block_size) { - case L1ContiguousBlockSize: - context->l1_entry += (L1ContiguousBlockSize / L1BlockSize); - break; - case L1BlockSize: - context->l1_entry += 1; - break; - case L2ContiguousBlockSize: - context->l1_entry += 1; - context->l2_entry += (L2ContiguousBlockSize / L2BlockSize); - break; - case L2BlockSize: - context->l1_entry += 1; - context->l2_entry += 1; - break; - case L3ContiguousBlockSize: - context->l1_entry += 1; - context->l2_entry += 1; - context->l3_entry += (L3ContiguousBlockSize / L3BlockSize); - break; - case L3BlockSize: - context->l1_entry += 1; - context->l2_entry += 1; - context->l3_entry += 1; - break; - MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); - } } - return valid; + /* We may have advanced to a new table, and if we have we should descend. */ + while (context->level > EntryLevel_L3 && context->level_entries[context->level]->IsMappedTable()) { + context->level_entries[context->level - 1] = GetPointer(GetPageTableVirtualAddress(context->level_entries[context->level]->GetTable())); + context->level = static_cast(util::ToUnderlying(context->level) - 1); + } + + const auto *pte = context->level_entries[context->level]; + + context->is_contiguous = pte->IsContiguous(); + + out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits(); + out_entry->attr = 0; + out_entry->phys_addr = this->GetBlock(pte, context->level); + out_entry->block_size = static_cast(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous); + return context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); } bool KPageTableImpl::GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { @@ -283,32 +232,27 @@ namespace ams::kern::arch::arm64 { } } - /* Try to get from l1 table. */ - const L1PageTableEntry *l1_entry = this->GetL1Entry(address); - if (l1_entry->IsBlock()) { - *out = l1_entry->GetBlock() + GetL1Offset(address); - return true; - } else if (!l1_entry->IsTable()) { - return false; + /* Get the L1 entry, and check if it's a table. */ + const PageTableEntry *pte = this->GetL1Entry(address); + EntryLevel level = EntryLevel_L1; + if (pte->IsMappedTable()) { + /* Get the L2 entry, and check if it's a table. */ + pte = this->GetL2EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address); + level = EntryLevel_L2; + if (pte->IsMappedTable()) { + pte = this->GetL3EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address); + level = EntryLevel_L3; + } } - /* Try to get from l2 table. */ - const L2PageTableEntry *l2_entry = this->GetL2Entry(l1_entry, address); - if (l2_entry->IsBlock()) { - *out = l2_entry->GetBlock() + GetL2Offset(address); - return true; - } else if (!l2_entry->IsTable()) { - return false; + const bool is_block = level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); + if (is_block) { + *out = this->GetBlock(pte, level) + this->GetOffset(address, level); + } else { + *out = Null; } - /* Try to get from l3 table. */ - const L3PageTableEntry *l3_entry = this->GetL3Entry(l2_entry, address); - if (l3_entry->IsBlock()) { - *out = l3_entry->GetBlock() + GetL3Offset(address); - return true; - } - - return false; + return is_block; } void KPageTableImpl::Dump(uintptr_t start, size_t size) const {