From b393f8f348da1154f41eabf4d7f56f948d2eade6 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Tue, 1 Dec 2020 06:53:22 -0800 Subject: [PATCH] kern: implement DisableDeviceAddressSpaceMerge --- .../arch/arm64/kern_k_page_table.hpp | 2 +- .../arch/arm64/kern_k_process_page_table.hpp | 12 +- .../nintendo/nx/kern_k_device_page_table.hpp | 4 + .../kern_k_memory_block_manager.hpp | 3 +- .../mesosphere/kern_k_page_table_base.hpp | 11 +- .../source/arch/arm64/kern_k_page_table.cpp | 4 +- .../source/kern_k_device_address_space.cpp | 39 ++-- .../source/kern_k_initial_process_reader.cpp | 3 + .../source/kern_k_memory_block_manager.cpp | 2 +- .../source/kern_k_page_table_base.cpp | 184 ++++++++++++++---- .../libmesosphere/source/kern_k_process.cpp | 22 ++- 11 files changed, 214 insertions(+), 72 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 119f93093..81c388044 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -176,7 +176,7 @@ namespace ams::kern::arch::arm64 { } NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end); - NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager); + NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager); Result Finalize(); private: Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp index c882adc66..2b46a723c 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp @@ -30,8 +30,8 @@ namespace ams::kern::arch::arm64 { this->page_table.Activate(id); } - Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) { - return this->page_table.InitializeForProcess(id, as_type, enable_aslr, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager); + Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) { + return this->page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager); } void Finalize() { this->page_table.Finalize(); } @@ -152,6 +152,14 @@ namespace ams::kern::arch::arm64 { return this->page_table.UnlockForDeviceAddressSpace(address, size); } + Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) { + return this->page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size); + } + + Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) { + return this->page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size); + } + Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) { return this->page_table.LockForIpcUserBuffer(out, address, size); } diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp index b00ed7d7c..c86cf1778 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp @@ -71,6 +71,10 @@ namespace ams::kern::board::nintendo::nx { Result Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings); Result Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address); + + void Unmap(KDeviceVirtualAddress device_address, size_t size) { + return this->UnmapImpl(device_address, size, false); + } private: Result MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp index 18f919b9a..54dfcdd71 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp @@ -76,6 +76,7 @@ namespace ams::kern { class KMemoryBlockManager { public: using MemoryBlockTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, bool right); using iterator = MemoryBlockTree::iterator; using const_iterator = MemoryBlockTree::const_iterator; private: @@ -97,7 +98,7 @@ namespace ams::kern { KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const; void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr); - void UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, void (KMemoryBlock::*lock_func)(KMemoryPermission new_perm, bool left, bool right), KMemoryPermission perm); + void UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm); void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index e6e1e218f..f1a6ac8f0 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -158,6 +158,7 @@ namespace ams::kern { u32 address_space_width; bool is_kernel; bool enable_aslr; + bool enable_device_address_space_merge; KMemoryBlockSlabManager *memory_block_slab_manager; KBlockInfoManager *block_info_manager; const KMemoryRegion *cached_physical_linear_region; @@ -172,15 +173,15 @@ namespace ams::kern { alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(), kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(), max_heap_size(), mapped_physical_memory_size(), mapped_unsafe_physical_memory(), general_lock(), map_physical_memory_lock(), - impl(), memory_block_manager(), allocate_option(), address_space_width(), is_kernel(), enable_aslr(), memory_block_slab_manager(), - block_info_manager(), cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(), + impl(), memory_block_manager(), allocate_option(), address_space_width(), is_kernel(), enable_aslr(), enable_device_address_space_merge(), + memory_block_slab_manager(), block_info_manager(), cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(), heap_fill_value(), ipc_fill_value(), stack_fill_value() { /* ... */ } NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end); - NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager); + NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_device_address_space_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager); void Finalize(); @@ -353,6 +354,10 @@ namespace ams::kern { Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned); Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size); + + Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size); + Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size); + Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size); Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 0f518bf77..9459cd556 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -181,7 +181,7 @@ namespace ams::kern::arch::arm64 { return ResultSuccess(); } - Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) { + Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) { /* The input ID isn't actually used. */ MESOSPHERE_UNUSED(id); @@ -202,7 +202,7 @@ namespace ams::kern::arch::arm64 { const size_t as_width = GetAddressSpaceWidth(as_type); const KProcessAddress as_start = 0; const KProcessAddress as_end = (1ul << as_width); - R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, mem_block_slab_manager, block_info_manager)); + R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, mem_block_slab_manager, block_info_manager)); /* We succeeded! */ table_guard.Cancel(); diff --git a/libraries/libmesosphere/source/kern_k_device_address_space.cpp b/libraries/libmesosphere/source/kern_k_device_address_space.cpp index 467343653..bfecb953a 100644 --- a/libraries/libmesosphere/source/kern_k_device_address_space.cpp +++ b/libraries/libmesosphere/source/kern_k_device_address_space.cpp @@ -79,25 +79,30 @@ namespace ams::kern { ON_SCOPE_EXIT { pg.Close(); }; /* Ensure that if we fail, we don't keep unmapped pages locked. */ - ON_SCOPE_EXIT { - if (*out_mapped_size != size) { - page_table->UnlockForDeviceAddressSpace(process_address + *out_mapped_size, size - *out_mapped_size); - }; - }; + auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); }; /* Map the pages. */ { /* Clear the output size to zero on failure. */ - auto map_guard = SCOPE_GUARD { *out_mapped_size = 0; }; + auto mapped_size_guard = SCOPE_GUARD { *out_mapped_size = 0; }; /* Perform the mapping. */ R_TRY(this->table.Map(out_mapped_size, pg, device_address, device_perm, refresh_mappings)); - /* We succeeded, so cancel our guard. */ + /* Ensure that we unmap the pages if we fail to update the protections. */ + /* NOTE: Nintendo does not check the result of this unmap call. */ + auto map_guard = SCOPE_GUARD { this->table.Unmap(device_address, *out_mapped_size); }; + + /* Update the protections in accordance with how much we mapped. */ + R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, *out_mapped_size)); + + /* We succeeded, so cancel our guards. */ map_guard.Cancel(); + mapped_size_guard.Cancel(); } - + /* We succeeded, so we don't need to unlock our pages. */ + unlock_guard.Cancel(); return ResultSuccess(); } @@ -110,19 +115,23 @@ namespace ams::kern { /* Make and open a page group for the unmapped region. */ KPageGroup pg(page_table->GetBlockInfoManager()); - R_TRY(page_table->MakeAndOpenPageGroupContiguous(std::addressof(pg), process_address, size / PageSize, - KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap, - KMemoryPermission_None, KMemoryPermission_None, - KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); + R_TRY(page_table->MakePageGroupForUnmapDeviceAddressSpace(std::addressof(pg), process_address, size)); /* Ensure the page group is closed on scope exit. */ ON_SCOPE_EXIT { pg.Close(); }; - /* Unmap. */ - R_TRY(this->table.Unmap(pg, device_address)); + /* If we fail to unmap, we want to do a partial unlock. */ + { + auto unlock_guard = SCOPE_GUARD { page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, size); }; + + /* Unmap. */ + R_TRY(this->table.Unmap(pg, device_address)); + + unlock_guard.Cancel(); + } /* Unlock the pages. */ - R_TRY(page_table->UnlockForDeviceAddressSpace(process_address, size)); + MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp index 8e2196a3c..7c9a53f49 100644 --- a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp +++ b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp @@ -140,6 +140,9 @@ namespace ams::kern { out->flags |= ams::svc::CreateProcessFlag_AddressSpace32Bit; } + /* All initial processes should disable device address space merge. */ + out->flags |= ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge; + return ResultSuccess(); } diff --git a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp index ce0bbbed4..c6bcc2147 100644 --- a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp @@ -287,7 +287,7 @@ namespace ams::kern { this->CoalesceForUpdate(allocator, address, num_pages); } - void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, void (KMemoryBlock::*lock_func)(KMemoryPermission new_perm, bool left, bool right), KMemoryPermission perm) { + void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm) { /* Ensure for auditing that we never end up with an invalid tree. */ KScopedMemoryBlockManagerAuditor auditor(this); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize)); diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index 336d579b4..00f2b4639 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -20,40 +20,41 @@ namespace ams::kern { Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) { /* Initialize our members. */ - this->address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32); - this->address_space_start = KProcessAddress(GetInteger(start)); - this->address_space_end = KProcessAddress(GetInteger(end)); - this->is_kernel = true; - this->enable_aslr = true; + this->address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32); + this->address_space_start = KProcessAddress(GetInteger(start)); + this->address_space_end = KProcessAddress(GetInteger(end)); + this->is_kernel = true; + this->enable_aslr = true; + this->enable_device_address_space_merge = false; - this->heap_region_start = 0; - this->heap_region_end = 0; - this->current_heap_end = 0; - this->alias_region_start = 0; - this->alias_region_end = 0; - this->stack_region_start = 0; - this->stack_region_end = 0; - this->kernel_map_region_start = 0; - this->kernel_map_region_end = 0; - this->alias_code_region_start = 0; - this->alias_code_region_end = 0; - this->code_region_start = 0; - this->code_region_end = 0; - this->max_heap_size = 0; - this->mapped_physical_memory_size = 0; - this->mapped_unsafe_physical_memory = 0; + this->heap_region_start = 0; + this->heap_region_end = 0; + this->current_heap_end = 0; + this->alias_region_start = 0; + this->alias_region_end = 0; + this->stack_region_start = 0; + this->stack_region_end = 0; + this->kernel_map_region_start = 0; + this->kernel_map_region_end = 0; + this->alias_code_region_start = 0; + this->alias_code_region_end = 0; + this->code_region_start = 0; + this->code_region_end = 0; + this->max_heap_size = 0; + this->mapped_physical_memory_size = 0; + this->mapped_unsafe_physical_memory = 0; - this->memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager()); - this->block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); + this->memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager()); + this->block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); - this->allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); - this->heap_fill_value = MemoryFillValue_Zero; - this->ipc_fill_value = MemoryFillValue_Zero; - this->stack_fill_value = MemoryFillValue_Zero; + this->allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); + this->heap_fill_value = MemoryFillValue_Zero; + this->ipc_fill_value = MemoryFillValue_Zero; + this->stack_fill_value = MemoryFillValue_Zero; - this->cached_physical_linear_region = nullptr; - this->cached_physical_heap_region = nullptr; - this->cached_virtual_heap_region = nullptr; + this->cached_physical_linear_region = nullptr; + this->cached_physical_heap_region = nullptr; + this->cached_virtual_heap_region = nullptr; /* Initialize our implementation. */ this->impl.InitializeForKernel(table, start, end); @@ -64,7 +65,7 @@ namespace ams::kern { return ResultSuccess(); } - Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager) { + Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager) { /* Validate the region. */ MESOSPHERE_ABORT_UNLESS(start <= code_address); MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size); @@ -123,12 +124,13 @@ namespace ams::kern { } /* Set other basic fields. */ - this->enable_aslr = enable_aslr; - this->address_space_start = start; - this->address_space_end = end; - this->is_kernel = false; - this->memory_block_slab_manager = mem_block_slab_manager; - this->block_info_manager = block_info_manager; + this->enable_aslr = enable_aslr; + this->enable_device_address_space_merge = enable_das_merge; + this->address_space_start = start; + this->address_space_end = end; + this->is_kernel = false; + this->memory_block_slab_manager = mem_block_slab_manager; + this->block_info_manager = block_info_manager; /* Determine the region we can place our undetermineds in. */ KProcessAddress alloc_start; @@ -2356,6 +2358,114 @@ namespace ams::kern { return ResultSuccess(); } + Result KPageTableBase::MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) { + /* Lightly validate the range before doing anything else. */ + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check the memory state. */ + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks), + address, size, + KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap, KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.Initialize(num_allocator_blocks)); + + /* Make the page group. */ + R_TRY(this->MakePageGroup(*out, address, num_pages)); + + /* Update the memory blocks. */ + const KMemoryBlockManager::MemoryBlockLockFunction lock_func = this->enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; + this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None); + + /* Open a reference to the pages in the page group. */ + out->Open(); + + return ResultSuccess(); + } + + Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) { + /* Lightly validate the range before doing anything else. */ + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Determine useful extents. */ + const KProcessAddress mapped_end_address = address + mapped_size; + const size_t unmapped_size = size - mapped_size; + + /* Check memory state. */ + size_t allocator_num_blocks = 0, unmapped_allocator_num_blocks = 0; + if (unmapped_size) { + if (this->enable_device_address_space_merge) { + R_TRY(this->CheckMemoryState(std::addressof(allocator_num_blocks), + address, size, + KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); + } + R_TRY(this->CheckMemoryState(std::addressof(unmapped_allocator_num_blocks), + mapped_end_address, unmapped_size, + KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); + } else { + R_TRY(this->CheckMemoryState(std::addressof(allocator_num_blocks), + address, size, + KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); + } + + /* Create an update allocator for the region. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.Initialize(allocator_num_blocks)); + + /* Create an update allocator for the unmapped region. */ + KMemoryBlockManagerUpdateAllocator unmapped_allocator(this->memory_block_slab_manager); + R_TRY(unmapped_allocator.Initialize(unmapped_allocator_num_blocks)); + + /* Determine parameters for the update lock call. */ + KMemoryBlockManagerUpdateAllocator *lock_allocator; + KProcessAddress lock_address; + size_t lock_num_pages; + KMemoryBlockManager::MemoryBlockLockFunction lock_func; + if (unmapped_size) { + /* If device address space merge is enabled, update tracking appropriately. */ + if (this->enable_device_address_space_merge) { + this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareLeft, KMemoryPermission_None); + } + + lock_allocator = std::addressof(unmapped_allocator); + lock_address = mapped_end_address; + lock_num_pages = unmapped_size / PageSize; + lock_func = &KMemoryBlock::UnshareToDeviceRight; + } else { + lock_allocator = std::addressof(allocator); + lock_address = address; + lock_num_pages = num_pages; + if (this->enable_device_address_space_merge) { + lock_func = &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare; + } else { + lock_func = &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight; + } + } + + /* Update the memory blocks. */ + this->memory_block_manager.UpdateLock(lock_allocator, lock_address, lock_num_pages, lock_func, KMemoryPermission_None); + + return ResultSuccess(); + } + Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) { return this->LockMemoryAndOpen(nullptr, out, address, size, KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer, diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index bd8336160..ae767d13b 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -237,13 +237,14 @@ namespace ams::kern { /* NOTE: Nintendo passes process ID despite not having set it yet. */ /* This goes completely unused, but even so... */ { - const auto as_type = static_cast(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask); - const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr); - const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication); - auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager()); - auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); - auto *pt_manager = std::addressof(Kernel::GetPageTableManager()); - R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager)); + const auto as_type = static_cast(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask); + const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0; + const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0; + const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0; + auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager()); + auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); + auto *pt_manager = std::addressof(Kernel::GetPageTableManager()); + R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager)); } auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); }; @@ -344,9 +345,10 @@ namespace ams::kern { /* NOTE: Nintendo passes process ID despite not having set it yet. */ /* This goes completely unused, but even so... */ { - const auto as_type = static_cast(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask); - const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr); - R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager)); + const auto as_type = static_cast(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask); + const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0; + const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0; + R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager)); } auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); };