2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

rasterizer: Update pages in batches

This commit is contained in:
ReinUsesLisp 2021-06-06 20:58:57 -03:00 committed by Markus Wick
parent ee67460ff0
commit 7b0d8bd1fb

View file

@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <atomic>
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/div_ceil.h" #include "common/div_ceil.h"
@ -10,35 +12,59 @@
namespace VideoCore { namespace VideoCore {
RasterizerAccelerated::RasterizerAccelerated(Core::Memory::Memory& cpu_memory_) using namespace Core::Memory;
: cpu_memory{cpu_memory_} {}
RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) : cpu_memory{cpu_memory_} {}
RasterizerAccelerated::~RasterizerAccelerated() = default; RasterizerAccelerated::~RasterizerAccelerated() = default;
void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
const auto page_end = Common::DivCeil(addr + size, Core::Memory::PAGE_SIZE); u64 uncache_begin = 0;
for (auto page = addr >> Core::Memory::PAGE_BITS; page != page_end; ++page) { u64 cache_begin = 0;
auto& count = cached_pages.at(page >> 2).Count(page); u64 uncache_bytes = 0;
u64 cache_bytes = 0;
std::atomic_thread_fence(std::memory_order_acquire);
const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE);
for (u64 page = addr >> PAGE_BITS; page != page_end; ++page) {
std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page);
if (delta > 0) { if (delta > 0) {
ASSERT_MSG(count < UINT16_MAX, "Count may overflow!"); ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
} else if (delta < 0) { } else if (delta < 0) {
ASSERT_MSG(count > 0, "Count may underflow!"); ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
} else { } else {
ASSERT_MSG(true, "Delta must be non-zero!"); ASSERT_MSG(false, "Delta must be non-zero!");
} }
// Adds or subtracts 1, as count is a unsigned 8-bit value // Adds or subtracts 1, as count is a unsigned 8-bit value
count += static_cast<u16>(delta); count.fetch_add(static_cast<u16>(delta), std::memory_order_release);
// Assume delta is either -1 or 1 // Assume delta is either -1 or 1
if (count == 0) { if (count.load(std::memory_order::relaxed) == 0) {
cpu_memory.RasterizerMarkRegionCached(page << Core::Memory::PAGE_BITS, if (uncache_bytes == 0) {
Core::Memory::PAGE_SIZE, false); uncache_begin = page;
} else if (count == 1 && delta > 0) { }
cpu_memory.RasterizerMarkRegionCached(page << Core::Memory::PAGE_BITS, uncache_bytes += PAGE_SIZE;
Core::Memory::PAGE_SIZE, true); } else if (uncache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false);
uncache_bytes = 0;
} }
if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
if (cache_bytes == 0) {
cache_begin = page;
}
cache_bytes += PAGE_SIZE;
} else if (cache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true);
cache_bytes = 0;
}
}
if (uncache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false);
}
if (cache_bytes > 0) {
cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true);
} }
} }