2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

Merge pull request #1876 from lioncash/vma

vm_manager: Make vma_map private
This commit is contained in:
bunnei 2018-12-10 10:09:50 -05:00 committed by GitHub
commit 74242a8fb4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 42 additions and 29 deletions

View file

@ -39,15 +39,15 @@ SharedPtr<SharedMemory> SharedMemory::Create(KernelCore& kernel, SharedPtr<Proce
shared_memory->backing_block.get()); shared_memory->backing_block.get());
} }
} else { } else {
auto& vm_manager = shared_memory->owner_process->VMManager(); const auto& vm_manager = shared_memory->owner_process->VMManager();
// The memory is already available and mapped in the owner process. // The memory is already available and mapped in the owner process.
auto vma = vm_manager.FindVMA(address); const auto vma = vm_manager.FindVMA(address);
ASSERT_MSG(vma != vm_manager.vma_map.end(), "Invalid memory address"); ASSERT_MSG(vm_manager.IsValidHandle(vma), "Invalid memory address");
ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address");
// The returned VMA might be a bigger one encompassing the desired address. // The returned VMA might be a bigger one encompassing the desired address.
auto vma_offset = address - vma->first; const auto vma_offset = address - vma->first;
ASSERT_MSG(vma_offset + size <= vma->second.size, ASSERT_MSG(vma_offset + size <= vma->second.size,
"Shared memory exceeds bounds of mapped block"); "Shared memory exceeds bounds of mapped block");

View file

@ -239,7 +239,7 @@ static ResultCode SetMemoryPermission(VAddr addr, u64 size, u32 prot) {
} }
const VMManager::VMAHandle iter = vm_manager.FindVMA(addr); const VMManager::VMAHandle iter = vm_manager.FindVMA(addr);
if (iter == vm_manager.vma_map.end()) { if (!vm_manager.IsValidHandle(iter)) {
LOG_ERROR(Kernel_SVC, "Unable to find VMA for address=0x{:016X}", addr); LOG_ERROR(Kernel_SVC, "Unable to find VMA for address=0x{:016X}", addr);
return ERR_INVALID_ADDRESS_STATE; return ERR_INVALID_ADDRESS_STATE;
} }
@ -1077,19 +1077,23 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* /*page_i
process_handle); process_handle);
return ERR_INVALID_HANDLE; return ERR_INVALID_HANDLE;
} }
auto vma = process->VMManager().FindVMA(addr);
const auto& vm_manager = process->VMManager();
const auto vma = vm_manager.FindVMA(addr);
memory_info->attributes = 0; memory_info->attributes = 0;
if (vma == process->VMManager().vma_map.end()) { if (vm_manager.IsValidHandle(vma)) {
memory_info->base_address = 0;
memory_info->permission = static_cast<u32>(VMAPermission::None);
memory_info->size = 0;
memory_info->type = static_cast<u32>(MemoryState::Unmapped);
} else {
memory_info->base_address = vma->second.base; memory_info->base_address = vma->second.base;
memory_info->permission = static_cast<u32>(vma->second.permissions); memory_info->permission = static_cast<u32>(vma->second.permissions);
memory_info->size = vma->second.size; memory_info->size = vma->second.size;
memory_info->type = static_cast<u32>(vma->second.meminfo_state); memory_info->type = static_cast<u32>(vma->second.meminfo_state);
} else {
memory_info->base_address = 0;
memory_info->permission = static_cast<u32>(VMAPermission::None);
memory_info->size = 0;
memory_info->type = static_cast<u32>(MemoryState::Unmapped);
} }
return RESULT_SUCCESS; return RESULT_SUCCESS;
} }

View file

@ -87,6 +87,10 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
} }
} }
bool VMManager::IsValidHandle(VMAHandle handle) const {
return handle != vma_map.cend();
}
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
std::shared_ptr<std::vector<u8>> block, std::shared_ptr<std::vector<u8>> block,
std::size_t offset, u64 size, std::size_t offset, u64 size,

View file

@ -113,16 +113,10 @@ struct VirtualMemoryArea {
* - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/ * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
*/ */
class VMManager final { class VMManager final {
using VMAMap = std::map<VAddr, VirtualMemoryArea>;
public: public:
/** using VMAHandle = VMAMap::const_iterator;
* A map covering the entirety of the managed address space, keyed by the `base` field of each
* VMA. It must always be modified by splitting or merging VMAs, so that the invariant
* `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
* merged when possible so that no two similar and adjacent regions exist that have not been
* merged.
*/
std::map<VAddr, VirtualMemoryArea> vma_map;
using VMAHandle = decltype(vma_map)::const_iterator;
VMManager(); VMManager();
~VMManager(); ~VMManager();
@ -133,6 +127,9 @@ public:
/// Finds the VMA in which the given address is included in, or `vma_map.end()`. /// Finds the VMA in which the given address is included in, or `vma_map.end()`.
VMAHandle FindVMA(VAddr target) const; VMAHandle FindVMA(VAddr target) const;
/// Indicates whether or not the given handle is within the VMA map.
bool IsValidHandle(VMAHandle handle) const;
// TODO(yuriks): Should these functions actually return the handle? // TODO(yuriks): Should these functions actually return the handle?
/** /**
@ -281,7 +278,7 @@ public:
Memory::PageTable page_table; Memory::PageTable page_table;
private: private:
using VMAIter = decltype(vma_map)::iterator; using VMAIter = VMAMap::iterator;
/// Converts a VMAHandle to a mutable VMAIter. /// Converts a VMAHandle to a mutable VMAIter.
VMAIter StripIterConstness(const VMAHandle& iter); VMAIter StripIterConstness(const VMAHandle& iter);
@ -328,6 +325,15 @@ private:
/// Clears out the page table /// Clears out the page table
void ClearPageTable(); void ClearPageTable();
/**
* A map covering the entirety of the managed address space, keyed by the `base` field of each
* VMA. It must always be modified by splitting or merging VMAs, so that the invariant
* `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
* merged when possible so that no two similar and adjacent regions exist that have not been
* merged.
*/
VMAMap vma_map;
u32 address_space_width = 0; u32 address_space_width = 0;
VAddr address_space_base = 0; VAddr address_space_base = 0;
VAddr address_space_end = 0; VAddr address_space_end = 0;

View file

@ -125,14 +125,13 @@ void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPoin
* using a VMA from the current process * using a VMA from the current process
*/ */
static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) {
const auto& vm_manager = process.VMManager();
const auto it = vm_manager.FindVMA(vaddr);
DEBUG_ASSERT(vm_manager.IsValidHandle(it));
u8* direct_pointer = nullptr; u8* direct_pointer = nullptr;
const auto& vma = it->second;
auto& vm_manager = process.VMManager();
auto it = vm_manager.FindVMA(vaddr);
ASSERT(it != vm_manager.vma_map.end());
auto& vma = it->second;
switch (vma.type) { switch (vma.type) {
case Kernel::VMAType::AllocatedMemoryBlock: case Kernel::VMAType::AllocatedMemoryBlock:
direct_pointer = vma.backing_block->data() + vma.offset; direct_pointer = vma.backing_block->data() + vma.offset;