2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

buffer_cache: Avoid copying twice on certain cases

Avoid copying to a staging buffer on non-granular memory addresses.
Add a callable argument to StreamBufferUpload to be able to copy to the
staging buffer directly from ReadBlockUnsafe.
This commit is contained in:
ReinUsesLisp 2020-05-27 23:05:50 -03:00
parent 508242c267
commit 3b2dee88e6

View file

@ -56,24 +56,28 @@ public:
if (use_fast_cbuf || size < max_stream_size) { if (use_fast_cbuf || size < max_stream_size) {
if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) {
auto& memory_manager = system.GPU().MemoryManager(); auto& memory_manager = system.GPU().MemoryManager();
const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size);
if (use_fast_cbuf) { if (use_fast_cbuf) {
if (memory_manager.IsGranularRange(gpu_addr, size)) { u8* dest;
const auto host_ptr = memory_manager.GetPointer(gpu_addr); if (is_granular) {
return ConstBufferUpload(host_ptr, size); dest = memory_manager.GetPointer(gpu_addr);
} else { } else {
staging_buffer.resize(size); staging_buffer.resize(size);
memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); dest = staging_buffer.data();
return ConstBufferUpload(staging_buffer.data(), size); memory_manager.ReadBlockUnsafe(gpu_addr, dest, size);
} }
return ConstBufferUpload(dest, size);
}
if (is_granular) {
u8* const host_ptr = memory_manager.GetPointer(gpu_addr);
return StreamBufferUpload(size, alignment, [host_ptr, size](u8* dest) {
std::memcpy(dest, host_ptr, size);
});
} else { } else {
if (memory_manager.IsGranularRange(gpu_addr, size)) { return StreamBufferUpload(
const auto host_ptr = memory_manager.GetPointer(gpu_addr); size, alignment, [&memory_manager, gpu_addr, size](u8* dest) {
return StreamBufferUpload(host_ptr, size, alignment); memory_manager.ReadBlockUnsafe(gpu_addr, dest, size);
} else { });
staging_buffer.resize(size);
memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size);
return StreamBufferUpload(staging_buffer.data(), size, alignment);
}
} }
} }
} }
@ -101,7 +105,9 @@ public:
BufferInfo UploadHostMemory(const void* raw_pointer, std::size_t size, BufferInfo UploadHostMemory(const void* raw_pointer, std::size_t size,
std::size_t alignment = 4) { std::size_t alignment = 4) {
std::lock_guard lock{mutex}; std::lock_guard lock{mutex};
return StreamBufferUpload(raw_pointer, size, alignment); return StreamBufferUpload(size, alignment, [raw_pointer, size](u8* dest) {
std::memcpy(dest, raw_pointer, size);
});
} }
void Map(std::size_t max_size) { void Map(std::size_t max_size) {
@ -424,11 +430,11 @@ private:
map->MarkAsModified(false, 0); map->MarkAsModified(false, 0);
} }
BufferInfo StreamBufferUpload(const void* raw_pointer, std::size_t size, template <typename Callable>
std::size_t alignment) { BufferInfo StreamBufferUpload(std::size_t size, std::size_t alignment, Callable&& callable) {
AlignBuffer(alignment); AlignBuffer(alignment);
const std::size_t uploaded_offset = buffer_offset; const std::size_t uploaded_offset = buffer_offset;
std::memcpy(buffer_ptr, raw_pointer, size); callable(buffer_ptr);
buffer_ptr += size; buffer_ptr += size;
buffer_offset += size; buffer_offset += size;