diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp index cc772876b..0943e32ab 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp @@ -46,9 +46,9 @@ namespace ams::kern { /* We need to have positive size. */ R_UNLESS(sz > 0, svc::ResultOutOfMemory()); - /* Calculate metadata overhead. */ - const size_t metadata_size = KPageBitmap::CalculateMetadataOverheadSize(sz / sizeof(PageBuffer)); - const size_t allocatable_size = sz - metadata_size; + /* Calculate management overhead. */ + const size_t management_size = KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); + const size_t allocatable_size = sz - management_size; /* Set tracking fields. */ this->address = memory; @@ -56,12 +56,12 @@ namespace ams::kern { this->count = allocatable_size / sizeof(PageBuffer); R_UNLESS(this->count > 0, svc::ResultOutOfMemory()); - /* Clear the metadata region. */ - u64 *metadata_ptr = GetPointer(this->address + allocatable_size); - std::memset(metadata_ptr, 0, metadata_size); + /* Clear the management region. */ + u64 *management_ptr = GetPointer(this->address + allocatable_size); + std::memset(management_ptr, 0, management_size); /* Initialize the bitmap. */ - this->page_bitmap.Initialize(metadata_ptr, this->count); + this->page_bitmap.Initialize(management_ptr, this->count); /* Free the pages to the bitmap. */ std::memset(GetPointer(this->address), 0, this->count * sizeof(PageBuffer)); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index b1bfa6469..8bca9c4bd 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -38,6 +38,7 @@ namespace ams::kern { /* Aliases. */ Pool_Unsafe = Pool_Application, + Pool_Secure = Pool_System, }; enum Direction { @@ -54,7 +55,7 @@ namespace ams::kern { private: using RefCount = u16; public: - static size_t CalculateMetadataOverheadSize(size_t region_size); + static size_t CalculateManagementOverheadSize(size_t region_size); static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { return (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); @@ -62,19 +63,19 @@ namespace ams::kern { private: KPageHeap heap; RefCount *page_reference_counts; - KVirtualAddress metadata_region; + KVirtualAddress management_region; Pool pool; Impl *next; Impl *prev; public: - Impl() : heap(), page_reference_counts(), metadata_region(), pool(), next(), prev() { /* ... */ } + Impl() : heap(), page_reference_counts(), management_region(), pool(), next(), prev() { /* ... */ } - size_t Initialize(const KMemoryRegion *region, Pool pool, KVirtualAddress metadata_region, KVirtualAddress metadata_region_end); + size_t Initialize(const KMemoryRegion *region, Pool pool, KVirtualAddress management_region, KVirtualAddress management_region_end); KVirtualAddress AllocateBlock(s32 index, bool random) { return this->heap.AllocateBlock(index, random); } void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); } - void InitializeOptimizedMemory() { std::memset(GetVoidPointer(this->metadata_region), 0, CalculateOptimizedProcessOverheadSize(this->heap.GetSize())); } + void InitializeOptimizedMemory() { std::memset(GetVoidPointer(this->management_region), 0, CalculateOptimizedProcessOverheadSize(this->heap.GetSize())); } void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages); void TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages); @@ -172,7 +173,7 @@ namespace ams::kern { /* ... */ } - NOINLINE void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size); + NOINLINE void Initialize(KVirtualAddress management_region, size_t management_region_size); NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool); NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool); @@ -247,8 +248,8 @@ namespace ams::kern { return total; } public: - static size_t CalculateMetadataOverheadSize(size_t region_size) { - return Impl::CalculateMetadataOverheadSize(region_size); + static size_t CalculateManagementOverheadSize(size_t region_size) { + return Impl::CalculateManagementOverheadSize(region_size); } static constexpr ALWAYS_INLINE u32 EncodeOption(Pool pool, Direction dir) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp index 56ef731e1..33d21334c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp @@ -254,7 +254,7 @@ namespace ams::kern { } } public: - static constexpr size_t CalculateMetadataOverheadSize(size_t region_size) { + static constexpr size_t CalculateManagementOverheadSize(size_t region_size) { size_t overhead_bits = 0; for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { region_size = util::AlignUp(region_size, BITSIZEOF(u64)) / BITSIZEOF(u64); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp index 76f527991..3f11d6e68 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -115,11 +115,11 @@ namespace ams::kern { return this->heap_address + (offset << this->GetShift()); } public: - static constexpr size_t CalculateMetadataOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) { + static constexpr size_t CalculateManagementOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) { const size_t cur_block_size = (u64(1) << cur_block_shift); const size_t next_block_size = (u64(1) << next_block_shift); const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size; - return KPageBitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size); + return KPageBitmap::CalculateManagementOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size); } }; private: @@ -129,7 +129,7 @@ namespace ams::kern { size_t num_blocks; Block blocks[NumMemoryBlockPageShifts]; private: - void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts); + void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts); size_t GetNumFreePages() const; void FreeBlock(KVirtualAddress block, s32 index); @@ -142,8 +142,8 @@ namespace ams::kern { constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; } constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; } - void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) { - return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); + void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size) { + return Initialize(heap_address, heap_size, management_address, management_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); } size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; } @@ -155,10 +155,10 @@ namespace ams::kern { KVirtualAddress AllocateBlock(s32 index, bool random); void Free(KVirtualAddress addr, size_t num_pages); private: - static size_t CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts); + static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts); public: - static size_t CalculateMetadataOverheadSize(size_t region_size) { - return CalculateMetadataOverheadSize(region_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); + static size_t CalculateManagementOverheadSize(size_t region_size) { + return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); } }; diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp index e888d5bcf..071942bc7 100644 --- a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp @@ -59,6 +59,79 @@ namespace ams::kern { namespace init { + namespace { + + void SetupPoolPartitionMemoryRegionsImpl() { + /* Start by identifying the extents of the DRAM memory region. */ + const auto dram_extents = KMemoryLayout::GetMainMemoryPhysicalExtents(); + + const uintptr_t pool_end = dram_extents.GetEndAddress() - KTraceBufferSize; + + /* Get Application and Applet pool sizes. */ + const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize(); + const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize(); + const size_t unsafe_system_pool_min_size = KSystemControl::Init::GetMinimumNonSecureSystemPoolSize(); + + /* Find the start of the kernel DRAM region. */ + const KMemoryRegion *kernel_dram_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramKernelBase); + MESOSPHERE_INIT_ABORT_UNLESS(kernel_dram_region != nullptr); + + const uintptr_t kernel_dram_start = kernel_dram_region->GetAddress(); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(kernel_dram_start, CarveoutAlignment)); + + /* Find the start of the pool partitions region. */ + const KMemoryRegion *pool_partitions_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(KMemoryRegionType_DramPoolPartition, 0); + MESOSPHERE_INIT_ABORT_UNLESS(pool_partitions_region != nullptr); + const uintptr_t pool_partitions_start = pool_partitions_region->GetAddress(); + + /* Decide on starting addresses for our pools. */ + const uintptr_t application_pool_start = pool_end - application_pool_size; + const uintptr_t applet_pool_start = application_pool_start - applet_pool_size; + const uintptr_t unsafe_system_pool_start = std::min(kernel_dram_start + CarveoutSizeMax, util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment)); + const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start; + + /* We want to arrange application pool depending on where the middle of dram is. */ + const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2; + u32 cur_pool_attr = 0; + size_t total_overhead_size = 0; + if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) { + InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size); + } else { + const size_t first_application_pool_size = dram_midpoint - application_pool_start; + const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint; + InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size); + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size); + } + + /* Insert the applet pool. */ + InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size); + + /* Insert the nonsecure system pool. */ + InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size); + + /* Insert the pool management region. */ + total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size); + const uintptr_t pool_management_start = unsafe_system_pool_start - total_overhead_size; + const size_t pool_management_size = total_overhead_size; + u32 pool_management_attr = 0; + InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr); + + /* Insert the system pool. */ + const uintptr_t system_pool_size = pool_management_start - pool_partitions_start; + InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr); + } + + void SetupPoolPartitionMemoryRegionsDeprecatedImpl() { + MESOSPHERE_UNIMPLEMENTED(); + } + + } + void SetupDevicePhysicalMemoryRegions() { /* TODO: Give these constexpr defines somewhere? */ MESOSPHERE_INIT_ABORT_UNLESS(SetupUartPhysicalMemoryRegion()); @@ -84,69 +157,13 @@ namespace ams::kern { } void SetupPoolPartitionMemoryRegions() { - /* Start by identifying the extents of the DRAM memory region. */ - const auto dram_extents = KMemoryLayout::GetMainMemoryPhysicalExtents(); - - const uintptr_t pool_end = dram_extents.GetEndAddress() - KTraceBufferSize; - - /* Get Application and Applet pool sizes. */ - const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize(); - const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize(); - const size_t unsafe_system_pool_min_size = KSystemControl::Init::GetMinimumNonSecureSystemPoolSize(); - - /* Find the start of the kernel DRAM region. */ - const KMemoryRegion *kernel_dram_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramKernelBase); - MESOSPHERE_INIT_ABORT_UNLESS(kernel_dram_region != nullptr); - - const uintptr_t kernel_dram_start = kernel_dram_region->GetAddress(); - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(kernel_dram_start, CarveoutAlignment)); - - /* Find the start of the pool partitions region. */ - const KMemoryRegion *pool_partitions_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(KMemoryRegionType_DramPoolPartition, 0); - MESOSPHERE_INIT_ABORT_UNLESS(pool_partitions_region != nullptr); - const uintptr_t pool_partitions_start = pool_partitions_region->GetAddress(); - - /* Decide on starting addresses for our pools. */ - const uintptr_t application_pool_start = pool_end - application_pool_size; - const uintptr_t applet_pool_start = application_pool_start - applet_pool_size; - const uintptr_t unsafe_system_pool_start = std::min(kernel_dram_start + CarveoutSizeMax, util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment)); - const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start; - - /* We want to arrange application pool depending on where the middle of dram is. */ - const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2; - u32 cur_pool_attr = 0; - size_t total_overhead_size = 0; - if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) { - InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); - total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(application_pool_size); + if (GetTargetFirmware() >= TargetFirmware_5_0_0) { + /* On 5.0.0+, setup modern 4-pool-partition layout. */ + SetupPoolPartitionMemoryRegionsImpl(); } else { - const size_t first_application_pool_size = dram_midpoint - application_pool_start; - const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint; - InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); - InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); - total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(first_application_pool_size); - total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(second_application_pool_size); + /* On < 5.0.0, setup a legacy 2-pool layout for backwards compatibility. */ + SetupPoolPartitionMemoryRegionsDeprecatedImpl(); } - - /* Insert the applet pool. */ - InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr); - total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(applet_pool_size); - - /* Insert the nonsecure system pool. */ - InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr); - total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(unsafe_system_pool_size); - - /* Insert the metadata pool. */ - total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size); - const uintptr_t metadata_pool_start = unsafe_system_pool_start - total_overhead_size; - const size_t metadata_pool_size = total_overhead_size; - u32 metadata_pool_attr = 0; - InsertPoolPartitionRegionIntoBothTrees(metadata_pool_start, metadata_pool_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, metadata_pool_attr); - - /* Insert the system pool. */ - const uintptr_t system_pool_size = metadata_pool_start - pool_partitions_start; - InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr); - } } diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index cc4dab05d..517805dee 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -31,10 +31,10 @@ namespace ams::kern { } - void KMemoryManager::Initialize(KVirtualAddress metadata_region, size_t metadata_region_size) { - /* Clear the metadata region to zero. */ - const KVirtualAddress metadata_region_end = metadata_region + metadata_region_size; - std::memset(GetVoidPointer(metadata_region), 0, metadata_region_size); + void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) { + /* Clear the management region to zero. */ + const KVirtualAddress management_region_end = management_region + management_region_size; + std::memset(GetVoidPointer(management_region), 0, management_region_size); /* Traverse the virtual memory layout tree, initializing each manager as appropriate. */ while (true) { @@ -72,9 +72,9 @@ namespace ams::kern { Impl *manager = std::addressof(this->managers[this->num_managers++]); MESOSPHERE_ABORT_UNLESS(this->num_managers <= util::size(this->managers)); - const size_t cur_size = manager->Initialize(region, pool, metadata_region, metadata_region_end); - metadata_region += cur_size; - MESOSPHERE_ABORT_UNLESS(metadata_region <= metadata_region_end); + const size_t cur_size = manager->Initialize(region, pool, management_region, management_region_end); + management_region += cur_size; + MESOSPHERE_ABORT_UNLESS(management_region <= management_region_end); /* Insert the manager into the pool list. */ if (this->pool_managers_tail[pool] == nullptr) { @@ -313,25 +313,25 @@ namespace ams::kern { return ResultSuccess(); } - size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) { - /* Calculate metadata sizes. */ + size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress management, KVirtualAddress management_end) { + /* Calculate management sizes. */ const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16); const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(region->GetSize()); const size_t manager_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize); - const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region->GetSize()); - const size_t total_metadata_size = manager_size + page_heap_size; - MESOSPHERE_ABORT_UNLESS(manager_size <= total_metadata_size); - MESOSPHERE_ABORT_UNLESS(metadata + total_metadata_size <= metadata_end); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(total_metadata_size, PageSize)); + const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region->GetSize()); + const size_t total_management_size = manager_size + page_heap_size; + MESOSPHERE_ABORT_UNLESS(manager_size <= total_management_size); + MESOSPHERE_ABORT_UNLESS(management + total_management_size <= management_end); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(total_management_size, PageSize)); /* Setup region. */ this->pool = p; - this->metadata_region = metadata; - this->page_reference_counts = GetPointer(metadata + optimize_map_size); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(this->metadata_region), PageSize)); + this->management_region = management; + this->page_reference_counts = GetPointer(management + optimize_map_size); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(this->management_region), PageSize)); /* Initialize the manager's KPageHeap. */ - this->heap.Initialize(region->GetAddress(), region->GetSize(), metadata + manager_size, page_heap_size); + this->heap.Initialize(region->GetAddress(), region->GetSize(), management + manager_size, page_heap_size); /* Free the memory to the heap. */ this->heap.Free(region->GetAddress(), region->GetSize() / PageSize); @@ -339,7 +339,7 @@ namespace ams::kern { /* Update the heap's used size. */ this->heap.UpdateUsedSize(); - return total_metadata_size; + return total_management_size; } void KMemoryManager::Impl::TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages) { @@ -348,7 +348,7 @@ namespace ams::kern { const size_t last = offset + num_pages - 1; /* Track. */ - u64 *optimize_map = GetPointer(this->metadata_region); + u64 *optimize_map = GetPointer(this->management_region); while (offset <= last) { /* Mark the page as not being optimized-allocated. */ optimize_map[offset / BITSIZEOF(u64)] &= ~(u64(1) << (offset % BITSIZEOF(u64))); @@ -363,7 +363,7 @@ namespace ams::kern { const size_t last = offset + num_pages - 1; /* Track. */ - u64 *optimize_map = GetPointer(this->metadata_region); + u64 *optimize_map = GetPointer(this->management_region); while (offset <= last) { /* Mark the page as being optimized-allocated. */ optimize_map[offset / BITSIZEOF(u64)] |= (u64(1) << (offset % BITSIZEOF(u64))); @@ -381,7 +381,7 @@ namespace ams::kern { const size_t last = offset + num_pages - 1; /* Process. */ - u64 *optimize_map = GetPointer(this->metadata_region); + u64 *optimize_map = GetPointer(this->management_region); while (offset <= last) { /* Check if the page has been optimized-allocated before. */ if ((optimize_map[offset / BITSIZEOF(u64)] & (u64(1) << (offset % BITSIZEOF(u64)))) == 0) { @@ -399,11 +399,11 @@ namespace ams::kern { return any_new; } - size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) { + size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); const size_t manager_meta_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize); - const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region_size); + const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); return manager_meta_size + page_heap_size; } diff --git a/libraries/libmesosphere/source/kern_k_page_heap.cpp b/libraries/libmesosphere/source/kern_k_page_heap.cpp index dd971ea7f..6199649e8 100644 --- a/libraries/libmesosphere/source/kern_k_page_heap.cpp +++ b/libraries/libmesosphere/source/kern_k_page_heap.cpp @@ -17,12 +17,12 @@ namespace ams::kern { - void KPageHeap::Initialize(KVirtualAddress address, size_t size, KVirtualAddress metadata_address, size_t metadata_size, const size_t *block_shifts, size_t num_block_shifts) { + void KPageHeap::Initialize(KVirtualAddress address, size_t size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts) { /* Check our assumptions. */ MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize)); MESOSPHERE_ASSERT(util::IsAligned(size, PageSize)); MESOSPHERE_ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts); - const KVirtualAddress metadata_end = metadata_address + metadata_size; + const KVirtualAddress management_end = management_address + management_size; /* Set our members. */ this->heap_address = address; @@ -30,7 +30,7 @@ namespace ams::kern { this->num_blocks = num_block_shifts; /* Setup bitmaps. */ - u64 *cur_bitmap_storage = GetPointer(metadata_address); + u64 *cur_bitmap_storage = GetPointer(management_address); for (size_t i = 0; i < num_block_shifts; i++) { const size_t cur_block_shift = block_shifts[i]; const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; @@ -38,7 +38,7 @@ namespace ams::kern { } /* Ensure we didn't overextend our bounds. */ - MESOSPHERE_ABORT_UNLESS(KVirtualAddress(cur_bitmap_storage) <= metadata_end); + MESOSPHERE_ABORT_UNLESS(KVirtualAddress(cur_bitmap_storage) <= management_end); } size_t KPageHeap::GetNumFreePages() const { @@ -122,12 +122,12 @@ namespace ams::kern { } } - size_t KPageHeap::CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) { + size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) { size_t overhead_size = 0; for (size_t i = 0; i < num_block_shifts; i++) { const size_t cur_block_shift = block_shifts[i]; const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; - overhead_size += KPageHeap::Block::CalculateMetadataOverheadSize(region_size, cur_block_shift, next_block_shift); + overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(region_size, cur_block_shift, next_block_shift); } return util::AlignUp(overhead_size, PageSize); }