diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index ac0292d08..783d0d5b3 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -28,11 +28,7 @@ namespace ams::kern::arch::arm64 { public: using TraversalEntry = KPageTableImpl::TraversalEntry; using TraversalContext = KPageTableImpl::TraversalContext; - private: - KPageTableManager *manager; - u64 ttbr; - u8 asid; - private: + enum BlockType { BlockType_L3Block, BlockType_L3ContiguousBlock, @@ -64,6 +60,10 @@ namespace ams::kern::arch::arm64 { [BlockType_L1Block] = L1BlockSize, }; + static constexpr BlockType GetMaxBlockType() { + return BlockType_L1Block; + } + static constexpr size_t GetBlockSize(BlockType type) { return BlockSizes[type]; } @@ -91,6 +91,10 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ASSERT(alignment < L1BlockSize); return KPageTable::GetBlockSize(static_cast(KPageTable::GetBlockType(alignment) + 1)); } + private: + KPageTableManager *manager; + u64 ttbr; + u8 asid; protected: virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override; virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override; @@ -111,7 +115,7 @@ namespace ams::kern::arch::arm64 { /* Set page attribute. */ if (properties.io) { - MESOSPHERE_ABORT_UNLESS(!properties.io); + MESOSPHERE_ABORT_UNLESS(!properties.uncached); MESOSPHERE_ABORT_UNLESS((properties.perm & (KMemoryPermission_KernelExecute | KMemoryPermission_UserExecute)) == 0); entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index b90fcfea2..22b593a42 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -424,6 +424,22 @@ namespace ams::kern { static ALWAYS_INLINE KMemoryRegionTree &GetVirtualLinearMemoryRegionTree() { return s_virtual_linear_tree; } static ALWAYS_INLINE KMemoryRegionTree &GetPhysicalLinearMemoryRegionTree() { return s_physical_linear_tree; } + static ALWAYS_INLINE KMemoryRegionTree::iterator GetEnd(KVirtualAddress) { + return GetVirtualLinearMemoryRegionTree().end(); + } + + static ALWAYS_INLINE KMemoryRegionTree::iterator GetEnd(KPhysicalAddress) { + return GetPhysicalMemoryRegionTree().end(); + } + + static NOINLINE KMemoryRegionTree::iterator FindContainingRegion(KVirtualAddress address) { + return GetVirtualMemoryRegionTree().FindContainingRegion(GetInteger(address)); + } + + static NOINLINE KMemoryRegionTree::iterator FindContainingRegion(KPhysicalAddress address) { + return GetPhysicalMemoryRegionTree().FindContainingRegion(GetInteger(address)); + } + static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) { return GetInteger(address) + s_linear_phys_to_virt_diff; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index 45b124421..4e557cf15 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -53,7 +53,9 @@ namespace ams::kern { /* TODO: perm/attr operations */ }; - static constexpr size_t RegionAlignment = KernelAslrAlignment; + static constexpr size_t MaxPhysicalMapAlignment = 1_GB; + static constexpr size_t RegionAlignment = 2_MB; + static_assert(RegionAlignment == KernelAslrAlignment); struct PageLinkedList { private: diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 4d672a831..eb6311a03 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -630,7 +630,81 @@ namespace ams::kern { } Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { - MESOSPHERE_TODO_IMPLEMENT(); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(size, PageSize)); + MESOSPHERE_ASSERT(size > 0); + R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress()); + const size_t num_pages = size / PageSize; + const KPhysicalAddress last = phys_addr + size - 1; + + /* Get region extents. */ + const KProcessAddress region_start = this->GetRegionAddress(KMemoryState_Io); + const size_t region_size = this->GetRegionSize(KMemoryState_Io); + const size_t region_num_pages = region_size / PageSize; + + /* Locate the memory region. */ + auto region_it = KMemoryLayout::FindContainingRegion(phys_addr); + const auto end_it = KMemoryLayout::GetEnd(phys_addr); + R_UNLESS(region_it != end_it, svc::ResultInvalidAddress()); + + MESOSPHERE_ASSERT(region_it->Contains(GetInteger(phys_addr))); + + /* Ensure that the region is mappable. */ + const bool is_rw = perm == KMemoryPermission_UserReadWrite; + do { + /* Check the region attributes. */ + R_UNLESS(!region_it->IsDerivedFrom(KMemoryRegionType_Dram), svc::ResultInvalidAddress()); + R_UNLESS(!region_it->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, svc::ResultInvalidAddress()); + R_UNLESS(!region_it->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), svc::ResultInvalidAddress()); + + /* Check if we're done. */ + if (GetInteger(last) <= region_it->GetLastAddress()) { + break; + } + + /* Advance. */ + region_it++; + } while (region_it != end_it); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Select an address to map at. */ + KProcessAddress addr = Null; + const size_t phys_alignment = std::min(std::min(GetInteger(phys_addr) & -GetInteger(phys_addr), size & -size), MaxPhysicalMapAlignment); + for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) { + const size_t alignment = KPageTable::GetBlockSize(static_cast(block_type)); + if (alignment > phys_alignment) { + continue; + } + + addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages()); + if (addr != Null) { + break; + } + } + R_UNLESS(addr != Null, svc::ResultOutOfMemory()); + + /* Check that we can map IO here. */ + MESOSPHERE_ASSERT(this->CanContain(addr, size, KMemoryState_Io)); + MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { perm, true, false, false }; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, KMemoryState_Io, perm, KMemoryAttribute_None); + + /* We successfully mapped the pages. */ + return ResultSuccess(); } Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {