From 5c7122d0f36d2cbc1f952fbc7d558d3598a3e442 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 10 Oct 2024 18:04:54 -0700 Subject: [PATCH] kern: fix more page table refactor bugs --- .../arch/arm64/kern_k_page_table_impl.hpp | 8 ++++++ .../source/arch/arm64/kern_k_page_table.cpp | 27 ++++++++++++++----- .../arch/arm64/kern_k_page_table_impl.cpp | 4 +-- 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index 57159c527..e8965c4d8 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -146,6 +146,14 @@ namespace ams::kern::arch::arm64 { static bool MergePages(KVirtualAddress *out, TraversalContext *context); void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const; + + KProcessAddress GetAddressForContext(const TraversalContext *context) const { + KProcessAddress addr = m_is_kernel ? static_cast(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0; + for (u32 level = context->level; level <= EntryLevel_L1; ++level) { + addr += ((reinterpret_cast(context->level_entries[level]) / sizeof(PageTableEntry)) & (BlocksPerTable - 1)) << (PageBits + LevelBits * level); + } + return addr; + } }; } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index d488a1152..39efc0b5f 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -226,6 +226,9 @@ namespace ams::kern::arch::arm64 { /* If we cleared a table, we need to note that we updated and free the table. */ if (freeing_table) { KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize)); + if (table == Null) { + break; + } ClearPageTable(table); this->GetPageTableManager().Free(table); } @@ -243,11 +246,14 @@ namespace ams::kern::arch::arm64 { context.level = static_cast(util::ToUnderlying(context.level) + 1); freeing_table = true; } - } /* Continue the traversal. */ cur_valid = impl.ContinueTraversal(std::addressof(entry), std::addressof(context)); + + if (entry.block_size == 0) { + break; + } } /* Free any remaining pages. */ @@ -266,7 +272,6 @@ namespace ams::kern::arch::arm64 { KPageTableBase::Finalize(); } - R_SUCCEED(); } @@ -379,6 +384,7 @@ namespace ams::kern::arch::arm64 { /* Unmap the block. */ bool freeing_table = false; + bool need_recalculate_virt_addr = false; while (true) { /* Clear the entries. */ const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1; @@ -394,8 +400,14 @@ namespace ams::kern::arch::arm64 { /* If we cleared a table, we need to note that we updated and free the table. */ if (freeing_table) { + /* If there's no table, we also don't need to do a free. */ + const KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize)); + if (table == Null) { + break; + } this->NoteUpdated(); - this->FreePageTable(page_list, KVirtualAddress(util::AlignDown(reinterpret_cast(context.level_entries[context.level - 1]), PageSize))); + this->FreePageTable(page_list, table); + need_recalculate_virt_addr = true; } /* Advance; we're no longer contiguous. */ @@ -424,9 +436,10 @@ namespace ams::kern::arch::arm64 { /* Advance. */ size_t freed_size = next_entry.block_size; - if (freeing_table) { + if (need_recalculate_virt_addr) { /* We advanced more than by the block, so we need to calculate the actual advanced size. */ - const KProcessAddress new_virt_addr = util::AlignUp(GetInteger(virt_addr), impl.GetBlockSize(context.level, context.is_contiguous)); + const size_t block_size = impl.GetBlockSize(context.level, context.is_contiguous); + const KProcessAddress new_virt_addr = util::AlignDown(GetInteger(impl.GetAddressForContext(std::addressof(context))) + block_size, block_size); MESOSPHERE_ABORT_UNLESS(new_virt_addr >= virt_addr + next_entry.block_size); freed_size = std::min(new_virt_addr - virt_addr, remaining_pages * PageSize); @@ -451,8 +464,8 @@ namespace ams::kern::arch::arm64 { Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); - /* MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); */ - /* MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); */ + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); auto &impl = this->GetImpl(); u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 3a9c1275b..bddf5323f 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -176,7 +176,7 @@ namespace ams::kern::arch::arm64 { /* We want to upgrade a contiguous mapping in a table to a block. */ PageTableEntry *pte = reinterpret_cast(util::AlignDown(reinterpret_cast(context->level_entries[context->level]), BlocksPerTable * sizeof(PageTableEntry))); - const KPhysicalAddress phys_addr = GetBlock(pte, context->level); + const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(static_cast(context->level + 1), false)); /* First, check that all entries are valid for us to merge. */ const u64 entry_template = pte->GetEntryTemplateForMerge(); @@ -208,7 +208,7 @@ namespace ams::kern::arch::arm64 { } else { /* We want to upgrade a non-contiguous mapping to a contiguous mapping. */ PageTableEntry *pte = reinterpret_cast(util::AlignDown(reinterpret_cast(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry))); - const KPhysicalAddress phys_addr = GetBlock(pte, context->level); + const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(context->level, true)); /* First, check that all entries are valid for us to merge. */ const u64 entry_template = pte->GetEntryTemplateForMerge();