2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

Merge pull request #4122 from lioncash/hide

video_core: Eliminate some variable shadowing
This commit is contained in:
bunnei 2020-06-21 22:38:04 -04:00 committed by GitHub
commit 14a1181a97
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 31 additions and 28 deletions

View file

@ -47,7 +47,7 @@ public:
bool is_written = false, bool use_fast_cbuf = false) { bool is_written = false, bool use_fast_cbuf = false) {
std::lock_guard lock{mutex}; std::lock_guard lock{mutex};
const auto& memory_manager = system.GPU().MemoryManager(); auto& memory_manager = system.GPU().MemoryManager();
const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr); const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr);
if (!cpu_addr_opt) { if (!cpu_addr_opt) {
return {GetEmptyBuffer(size), 0}; return {GetEmptyBuffer(size), 0};
@ -59,7 +59,6 @@ public:
constexpr std::size_t max_stream_size = 0x800; constexpr std::size_t max_stream_size = 0x800;
if (use_fast_cbuf || size < max_stream_size) { if (use_fast_cbuf || size < max_stream_size) {
if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) {
auto& memory_manager = system.GPU().MemoryManager();
const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size); const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size);
if (use_fast_cbuf) { if (use_fast_cbuf) {
u8* dest; u8* dest;

View file

@ -553,7 +553,7 @@ Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) {
} }
void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) { void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) {
auto SetRegister = [=](u32 reg, Xbyak::Reg32 result) { const auto SetRegister = [this](u32 reg, const Xbyak::Reg32& result) {
// Register 0 is supposed to always return 0. NOP is implemented as a store to the zero // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero
// register. // register.
if (reg == 0) { if (reg == 0) {
@ -561,7 +561,7 @@ void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u3
} }
mov(dword[STATE + offsetof(JITState, registers) + reg * sizeof(u32)], result); mov(dword[STATE + offsetof(JITState, registers) + reg * sizeof(u32)], result);
}; };
auto SetMethodAddress = [=](Xbyak::Reg32 reg) { mov(METHOD_ADDRESS, reg); }; const auto SetMethodAddress = [this](const Xbyak::Reg32& reg) { mov(METHOD_ADDRESS, reg); };
switch (operation) { switch (operation) {
case Macro::ResultOperation::IgnoreAndFetch: case Macro::ResultOperation::IgnoreAndFetch:

View file

@ -210,10 +210,11 @@ bool MemoryManager::IsBlockContinuous(const GPUVAddr start, const std::size_t si
return range == inner_size; return range == inner_size;
} }
void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::size_t size) const { void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer,
const std::size_t size) const {
std::size_t remaining_size{size}; std::size_t remaining_size{size};
std::size_t page_index{src_addr >> page_bits}; std::size_t page_index{gpu_src_addr >> page_bits};
std::size_t page_offset{src_addr & page_mask}; std::size_t page_offset{gpu_src_addr & page_mask};
auto& memory = system.Memory(); auto& memory = system.Memory();
@ -234,11 +235,11 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s
} }
} }
void MemoryManager::ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer,
const std::size_t size) const { const std::size_t size) const {
std::size_t remaining_size{size}; std::size_t remaining_size{size};
std::size_t page_index{src_addr >> page_bits}; std::size_t page_index{gpu_src_addr >> page_bits};
std::size_t page_offset{src_addr & page_mask}; std::size_t page_offset{gpu_src_addr & page_mask};
auto& memory = system.Memory(); auto& memory = system.Memory();
@ -259,10 +260,11 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer,
} }
} }
void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const std::size_t size) { void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer,
const std::size_t size) {
std::size_t remaining_size{size}; std::size_t remaining_size{size};
std::size_t page_index{dest_addr >> page_bits}; std::size_t page_index{gpu_dest_addr >> page_bits};
std::size_t page_offset{dest_addr & page_mask}; std::size_t page_offset{gpu_dest_addr & page_mask};
auto& memory = system.Memory(); auto& memory = system.Memory();
@ -283,11 +285,11 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const
} }
} }
void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer,
const std::size_t size) { const std::size_t size) {
std::size_t remaining_size{size}; std::size_t remaining_size{size};
std::size_t page_index{dest_addr >> page_bits}; std::size_t page_index{gpu_dest_addr >> page_bits};
std::size_t page_offset{dest_addr & page_mask}; std::size_t page_offset{gpu_dest_addr & page_mask};
auto& memory = system.Memory(); auto& memory = system.Memory();
@ -306,16 +308,18 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer,
} }
} }
void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) { void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr,
const std::size_t size) {
std::vector<u8> tmp_buffer(size); std::vector<u8> tmp_buffer(size);
ReadBlock(src_addr, tmp_buffer.data(), size); ReadBlock(gpu_src_addr, tmp_buffer.data(), size);
WriteBlock(dest_addr, tmp_buffer.data(), size); WriteBlock(gpu_dest_addr, tmp_buffer.data(), size);
} }
void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) { void MemoryManager::CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr,
const std::size_t size) {
std::vector<u8> tmp_buffer(size); std::vector<u8> tmp_buffer(size);
ReadBlockUnsafe(src_addr, tmp_buffer.data(), size); ReadBlockUnsafe(gpu_src_addr, tmp_buffer.data(), size);
WriteBlockUnsafe(dest_addr, tmp_buffer.data(), size); WriteBlockUnsafe(gpu_dest_addr, tmp_buffer.data(), size);
} }
bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) {

View file

@ -79,9 +79,9 @@ public:
* in the Host Memory counterpart. Note: This functions cause Host GPU Memory * in the Host Memory counterpart. Note: This functions cause Host GPU Memory
* Flushes and Invalidations, respectively to each operation. * Flushes and Invalidations, respectively to each operation.
*/ */
void ReadBlock(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const; void ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const;
void WriteBlock(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); void WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size);
void CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); void CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size);
/** /**
* ReadBlockUnsafe and WriteBlockUnsafe are special versions of ReadBlock and * ReadBlockUnsafe and WriteBlockUnsafe are special versions of ReadBlock and
@ -93,9 +93,9 @@ public:
* WriteBlockUnsafe instead of WriteBlock since it shouldn't invalidate the texture * WriteBlockUnsafe instead of WriteBlock since it shouldn't invalidate the texture
* being flushed. * being flushed.
*/ */
void ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, std::size_t size) const; void ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const;
void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); void WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size);
void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); void CopyBlockUnsafe(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size);
/** /**
* IsGranularRange checks if a gpu region can be simply read with a pointer * IsGranularRange checks if a gpu region can be simply read with a pointer