2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

Merge pull request #2763 from lioncash/map-phys

kernel/vm_manager: Minor cleanup
This commit is contained in:
David 2019-09-09 16:43:54 +10:00 committed by GitHub
commit 6f7b349461
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 41 additions and 39 deletions

View file

@ -296,12 +296,6 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
} }
ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
const auto end_addr = target + size;
const auto last_addr = end_addr - 1;
VAddr cur_addr = target;
ResultCode result = RESULT_SUCCESS;
// Check how much memory we've already mapped. // Check how much memory we've already mapped.
const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size); const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size);
if (mapped_size_result.Failed()) { if (mapped_size_result.Failed()) {
@ -324,13 +318,16 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
// Keep track of the memory regions we unmap. // Keep track of the memory regions we unmap.
std::vector<std::pair<u64, u64>> mapped_regions; std::vector<std::pair<u64, u64>> mapped_regions;
ResultCode result = RESULT_SUCCESS;
// Iterate, trying to map memory. // Iterate, trying to map memory.
{ {
cur_addr = target; const auto end_addr = target + size;
const auto last_addr = end_addr - 1;
VAddr cur_addr = target;
auto iter = FindVMA(target); auto iter = FindVMA(target);
ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end"); ASSERT(iter != vma_map.end());
while (true) { while (true) {
const auto& vma = iter->second; const auto& vma = iter->second;
@ -342,7 +339,7 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
if (vma.state == MemoryState::Unmapped) { if (vma.state == MemoryState::Unmapped) {
const auto map_res = const auto map_res =
MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size, 0), 0, MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size), 0,
map_size, MemoryState::Heap, VMAPermission::ReadWrite); map_size, MemoryState::Heap, VMAPermission::ReadWrite);
result = map_res.Code(); result = map_res.Code();
if (result.IsError()) { if (result.IsError()) {
@ -360,7 +357,7 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
// Advance to the next block. // Advance to the next block.
cur_addr = vma_end; cur_addr = vma_end;
iter = FindVMA(cur_addr); iter = FindVMA(cur_addr);
ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end"); ASSERT(iter != vma_map.end());
} }
} }
@ -368,7 +365,7 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
if (result.IsError()) { if (result.IsError()) {
for (const auto [unmap_address, unmap_size] : mapped_regions) { for (const auto [unmap_address, unmap_size] : mapped_regions) {
ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(), ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(),
"MapPhysicalMemory un-map on error"); "Failed to unmap memory range.");
} }
return result; return result;
@ -381,12 +378,6 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
} }
ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
const auto end_addr = target + size;
const auto last_addr = end_addr - 1;
VAddr cur_addr = target;
ResultCode result = RESULT_SUCCESS;
// Check how much memory is currently mapped. // Check how much memory is currently mapped.
const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size); const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size);
if (mapped_size_result.Failed()) { if (mapped_size_result.Failed()) {
@ -401,13 +392,16 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
// Keep track of the memory regions we unmap. // Keep track of the memory regions we unmap.
std::vector<std::pair<u64, u64>> unmapped_regions; std::vector<std::pair<u64, u64>> unmapped_regions;
ResultCode result = RESULT_SUCCESS;
// Try to unmap regions. // Try to unmap regions.
{ {
cur_addr = target; const auto end_addr = target + size;
const auto last_addr = end_addr - 1;
VAddr cur_addr = target;
auto iter = FindVMA(target); auto iter = FindVMA(target);
ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end"); ASSERT(iter != vma_map.end());
while (true) { while (true) {
const auto& vma = iter->second; const auto& vma = iter->second;
@ -434,7 +428,7 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
// Advance to the next block. // Advance to the next block.
cur_addr = vma_end; cur_addr = vma_end;
iter = FindVMA(cur_addr); iter = FindVMA(cur_addr);
ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end"); ASSERT(iter != vma_map.end());
} }
} }
@ -443,10 +437,12 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
if (result.IsError()) { if (result.IsError()) {
for (const auto [map_address, map_size] : unmapped_regions) { for (const auto [map_address, map_size] : unmapped_regions) {
const auto remap_res = const auto remap_res =
MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size, 0), 0, MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size), 0, map_size,
map_size, MemoryState::Heap, VMAPermission::None); MemoryState::Heap, VMAPermission::None);
ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error"); ASSERT_MSG(remap_res.Succeeded(), "Failed to remap a memory block.");
} }
return result;
} }
// Update mapped amount // Update mapped amount
@ -757,20 +753,26 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre
// Always merge allocated memory blocks, even when they don't share the same backing block. // Always merge allocated memory blocks, even when they don't share the same backing block.
if (left.type == VMAType::AllocatedMemoryBlock && if (left.type == VMAType::AllocatedMemoryBlock &&
(left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) {
const auto right_begin = right.backing_block->begin() + right.offset;
const auto right_end = right_begin + right.size;
// Check if we can save work. // Check if we can save work.
if (left.offset == 0 && left.size == left.backing_block->size()) { if (left.offset == 0 && left.size == left.backing_block->size()) {
// Fast case: left is an entire backing block. // Fast case: left is an entire backing block.
left.backing_block->insert(left.backing_block->end(), left.backing_block->insert(left.backing_block->end(), right_begin, right_end);
right.backing_block->begin() + right.offset,
right.backing_block->begin() + right.offset + right.size);
} else { } else {
// Slow case: make a new memory block for left and right. // Slow case: make a new memory block for left and right.
const auto left_begin = left.backing_block->begin() + left.offset;
const auto left_end = left_begin + left.size;
const auto left_size = static_cast<std::size_t>(std::distance(left_begin, left_end));
const auto right_size = static_cast<std::size_t>(std::distance(right_begin, right_end));
auto new_memory = std::make_shared<PhysicalMemory>(); auto new_memory = std::make_shared<PhysicalMemory>();
new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset, new_memory->reserve(left_size + right_size);
left.backing_block->begin() + left.offset + left.size); new_memory->insert(new_memory->end(), left_begin, left_end);
new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset, new_memory->insert(new_memory->end(), right_begin, right_end);
right.backing_block->begin() + right.offset + right.size);
left.backing_block = new_memory; left.backing_block = std::move(new_memory);
left.offset = 0; left.offset = 0;
} }
@ -965,7 +967,7 @@ ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address,
VAddr cur_addr = address; VAddr cur_addr = address;
auto iter = FindVMA(cur_addr); auto iter = FindVMA(cur_addr);
ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end"); ASSERT(iter != vma_map.end());
while (true) { while (true) {
const auto& vma = iter->second; const auto& vma = iter->second;
@ -986,7 +988,7 @@ ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address,
// Advance to the next block. // Advance to the next block.
cur_addr = vma_end; cur_addr = vma_end;
iter = std::next(iter); iter = std::next(iter);
ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end"); ASSERT(iter != vma_map.end());
} }
return MakeResult(mapped_size); return MakeResult(mapped_size);
@ -1000,7 +1002,7 @@ ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr ad
VAddr cur_addr = address; VAddr cur_addr = address;
auto iter = FindVMA(cur_addr); auto iter = FindVMA(cur_addr);
ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end"); ASSERT(iter != vma_map.end());
while (true) { while (true) {
const auto& vma = iter->second; const auto& vma = iter->second;
@ -1029,7 +1031,7 @@ ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr ad
// Advance to the next block. // Advance to the next block.
cur_addr = vma_end; cur_addr = vma_end;
iter = std::next(iter); iter = std::next(iter);
ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end"); ASSERT(iter != vma_map.end());
} }
return MakeResult(mapped_size); return MakeResult(mapped_size);

View file

@ -454,8 +454,8 @@ public:
/// Maps memory at a given address. /// Maps memory at a given address.
/// ///
/// @param addr The virtual address to map memory at. /// @param target The virtual address to map memory at.
/// @param size The amount of memory to map. /// @param size The amount of memory to map.
/// ///
/// @note The destination address must lie within the Map region. /// @note The destination address must lie within the Map region.
/// ///
@ -468,8 +468,8 @@ public:
/// Unmaps memory at a given address. /// Unmaps memory at a given address.
/// ///
/// @param addr The virtual address to unmap memory at. /// @param target The virtual address to unmap memory at.
/// @param size The amount of memory to unmap. /// @param size The amount of memory to unmap.
/// ///
/// @note The destination address must lie within the Map region. /// @note The destination address must lie within the Map region.
/// ///