diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index ff656f766..4193633b2 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -176,6 +176,7 @@ namespace ams::kern { NOINLINE void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size); NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool); + NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool); NOINLINE KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option); NOINLINE Result Allocate(KPageGroup *out, size_t num_pages, u32 option); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 3b7059e50..a3f457219 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -497,7 +497,8 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ABORT_UNLESS(force); const size_t cur_size = std::min(next_entry.block_size - (GetInteger(virt_addr) & (next_entry.block_size - 1)), remaining_pages * PageSize); remaining_pages -= cur_size / PageSize; - virt_addr += cur_size; + virt_addr += cur_size; + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); continue; } diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index 268db247c..082c1db01 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -106,6 +106,16 @@ namespace ams::kern { return ResultSuccess(); } + void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { + /* Lock the pool. */ + KScopedLightLock lk(this->pool_locks[pool]); + + /* If the process was optimized, clear it. */ + if (this->has_optimized_process[pool] && this->optimized_process_ids[pool] == process_id) { + this->has_optimized_process[pool] = false; + } + } + KVirtualAddress KMemoryManager::AllocateContinuous(size_t num_pages, size_t align_pages, u32 option) { /* Early return if we're allocating no pages. */ diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index e77534dab..e90470465 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -3419,7 +3419,8 @@ namespace ams::kern { bool next_valid; size_t tot_size = 0; - next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_address); + cur_address = address; + next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_address); next_entry.block_size = (next_entry.block_size - (GetInteger(next_entry.phys_addr) & (next_entry.block_size - 1))); /* Iterate, building the group. */ @@ -3497,7 +3498,7 @@ namespace ams::kern { /* Advance our physical block. */ ++pg_it; pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); - pg_pages = pg_it->GetNumPages(); + pg_pages = pg_it->GetNumPages(); } /* Map whatever we can. */ diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index 5275df1a1..0177f969f 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -105,6 +105,9 @@ namespace ams::kern { /* Clear our tracking variables. */ this->system_resource_address = Null; this->system_resource_num_pages = 0; + + /* Finalize optimized memory. If memory wasn't optimized, this is a no-op. */ + Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), this->memory_pool); } /* Release memory to the resource limit. */ @@ -359,7 +362,7 @@ namespace ams::kern { MESOSPHERE_ABORT_UNLESS(this->process_id <= ProcessIdMax); /* If we should optimize memory allocations, do so. */ - if (this->system_resource_address != Null) { + if (this->system_resource_address != Null && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) { R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(this->process_id, pool)); }