2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

renderer_vulkan: convert FSR to graphics pipeline

This commit is contained in:
Liam 2024-01-15 10:19:02 -05:00
parent b78900e956
commit 2ed9586130
17 changed files with 311 additions and 463 deletions

View file

@ -9,7 +9,7 @@ set(FIDELITYFX_FILES
)
set(GLSL_INCLUDES
fidelityfx_fsr.comp
fidelityfx_fsr.frag
${FIDELITYFX_FILES}
)
@ -56,10 +56,11 @@ set(SHADER_FILES
vulkan_color_clear.frag
vulkan_color_clear.vert
vulkan_depthstencil_clear.frag
vulkan_fidelityfx_fsr_easu_fp16.comp
vulkan_fidelityfx_fsr_easu_fp32.comp
vulkan_fidelityfx_fsr_rcas_fp16.comp
vulkan_fidelityfx_fsr_rcas_fp32.comp
vulkan_fidelityfx_fsr.vert
vulkan_fidelityfx_fsr_easu_fp16.frag
vulkan_fidelityfx_fsr_easu_fp32.frag
vulkan_fidelityfx_fsr_rcas_fp16.frag
vulkan_fidelityfx_fsr_rcas_fp32.frag
vulkan_present.frag
vulkan_present.vert
vulkan_present_scaleforce_fp16.frag

View file

@ -34,7 +34,6 @@ layout( push_constant ) uniform constants {
};
layout(set=0,binding=0) uniform sampler2D InputTexture;
layout(set=0,binding=1,rgba16f) uniform image2D OutputTexture;
#define A_GPU 1
#define A_GLSL 1
@ -72,44 +71,40 @@ layout(set=0,binding=1,rgba16f) uniform image2D OutputTexture;
#include "ffx_fsr1.h"
void CurrFilter(AU2 pos) {
#if USE_BILINEAR
AF2 pp = (AF2(pos) * AF2_AU2(Const0.xy) + AF2_AU2(Const0.zw)) * AF2_AU2(Const1.xy) + AF2(0.5, -0.5) * AF2_AU2(Const1.zw);
imageStore(OutputTexture, ASU2(pos), textureLod(InputTexture, pp, 0.0));
#if USE_RCAS
layout(location = 0) in vec2 frag_texcoord;
#endif
layout (location = 0) out vec4 frag_color;
void CurrFilter(AU2 pos) {
#if USE_EASU
#ifndef YUZU_USE_FP16
AF3 c;
FsrEasuF(c, pos, Const0, Const1, Const2, Const3);
imageStore(OutputTexture, ASU2(pos), AF4(c, 1));
frag_color = AF4(c, 1.0);
#else
AH3 c;
FsrEasuH(c, pos, Const0, Const1, Const2, Const3);
imageStore(OutputTexture, ASU2(pos), AH4(c, 1));
frag_color = AH4(c, 1.0);
#endif
#endif
#if USE_RCAS
#ifndef YUZU_USE_FP16
AF3 c;
FsrRcasF(c.r, c.g, c.b, pos, Const0);
imageStore(OutputTexture, ASU2(pos), AF4(c, 1));
frag_color = AF4(c, 1.0);
#else
AH3 c;
FsrRcasH(c.r, c.g, c.b, pos, Const0);
imageStore(OutputTexture, ASU2(pos), AH4(c, 1));
frag_color = AH4(c, 1.0);
#endif
#endif
}
layout(local_size_x=64) in;
void main() {
// Do remapping of local xy in workgroup for a more PS-like swizzle pattern.
AU2 gxy = ARmp8x8(gl_LocalInvocationID.x) + AU2(gl_WorkGroupID.x << 4u, gl_WorkGroupID.y << 4u);
CurrFilter(gxy);
gxy.x += 8u;
CurrFilter(gxy);
gxy.y += 8u;
CurrFilter(gxy);
gxy.x -= 8u;
CurrFilter(gxy);
#if USE_RCAS
CurrFilter(AU2(frag_texcoord * vec2(textureSize(InputTexture, 0))));
#else
CurrFilter(AU2(gl_FragCoord.xy));
#endif
}

View file

@ -0,0 +1,13 @@
// SPDX-FileCopyrightText: Copyright 2024 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#version 450
layout(location = 0) out vec2 texcoord;
void main() {
float x = float((gl_VertexIndex & 1) << 2);
float y = float((gl_VertexIndex & 2) << 1);
gl_Position = vec4(x - 1.0, y - 1.0, 0.0, 1.0);
texcoord = vec2(x, y) / 2.0;
}

View file

@ -7,4 +7,4 @@
#define YUZU_USE_FP16
#define USE_EASU 1
#include "fidelityfx_fsr.comp"
#include "fidelityfx_fsr.frag"

View file

@ -6,4 +6,4 @@
#define USE_EASU 1
#include "fidelityfx_fsr.comp"
#include "fidelityfx_fsr.frag"

View file

@ -7,4 +7,4 @@
#define YUZU_USE_FP16
#define USE_RCAS 1
#include "fidelityfx_fsr.comp"
#include "fidelityfx_fsr.frag"

View file

@ -6,4 +6,4 @@
#define USE_RCAS 1
#include "fidelityfx_fsr.comp"
#include "fidelityfx_fsr.frag"

View file

@ -12,16 +12,14 @@ class Scheduler;
class AntiAliasPass {
public:
virtual ~AntiAliasPass() = default;
virtual VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkImageView source_image_view) = 0;
virtual void Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView* inout_image_view) = 0;
};
class NoAA final : public AntiAliasPass {
public:
virtual VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkImageView source_image_view) {
return source_image_view;
}
void Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView* inout_image_view) override {}
};
} // namespace Vulkan

View file

@ -6,11 +6,13 @@
#include "common/settings.h"
#include "video_core/fsr.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_easu_fp16_comp_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_easu_fp32_comp_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_rcas_fp16_comp_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_rcas_fp32_comp_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_easu_fp16_frag_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_easu_fp32_frag_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_rcas_fp16_frag_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_rcas_fp32_frag_spv.h"
#include "video_core/host_shaders/vulkan_fidelityfx_fsr_vert_spv.h"
#include "video_core/renderer_vulkan/present/fsr.h"
#include "video_core/renderer_vulkan/present/util.h"
#include "video_core/renderer_vulkan/vk_scheduler.h"
#include "video_core/renderer_vulkan/vk_shader_util.h"
#include "video_core/vulkan_common/vulkan_device.h"
@ -18,403 +20,207 @@
namespace Vulkan {
using namespace FSR;
FSR::FSR(const Device& device_, MemoryAllocator& memory_allocator_, size_t image_count_,
VkExtent2D output_size_)
: device{device_}, memory_allocator{memory_allocator_}, image_count{image_count_},
output_size{output_size_} {
using PushConstants = std::array<u32, 4 * 4>;
FSR::FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
VkExtent2D extent)
: m_device{device}, m_memory_allocator{memory_allocator},
m_image_count{image_count}, m_extent{extent} {
CreateImages();
CreateRenderPasses();
CreateSampler();
CreateShaders();
CreateDescriptorPool();
CreateDescriptorSetLayout();
CreateDescriptorSets();
CreatePipelineLayout();
CreatePipeline();
}
VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect) {
UpdateDescriptorSet(image_index, image_view);
scheduler.Record([this, image_index, input_image_extent, crop_rect](vk::CommandBuffer cmdbuf) {
const VkImageMemoryBarrier base_barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = 0,
.dstAccessMask = 0,
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = {},
.subresourceRange =
{
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *easu_pipeline);
const f32 input_image_width = static_cast<f32>(input_image_extent.width);
const f32 input_image_height = static_cast<f32>(input_image_extent.height);
const f32 output_image_width = static_cast<f32>(output_size.width);
const f32 output_image_height = static_cast<f32>(output_size.height);
const f32 viewport_width = (crop_rect.right - crop_rect.left) * input_image_width;
const f32 viewport_x = crop_rect.left * input_image_width;
const f32 viewport_height = (crop_rect.bottom - crop_rect.top) * input_image_height;
const f32 viewport_y = crop_rect.top * input_image_height;
std::array<u32, 4 * 4> push_constants;
FsrEasuConOffset(push_constants.data() + 0, push_constants.data() + 4,
push_constants.data() + 8, push_constants.data() + 12,
viewport_width, viewport_height, input_image_width, input_image_height,
output_image_width, output_image_height, viewport_x, viewport_y);
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants);
{
VkImageMemoryBarrier fsr_write_barrier = base_barrier;
fsr_write_barrier.image = *images[image_index];
fsr_write_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, fsr_write_barrier);
}
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
descriptor_sets[image_index * 2], {});
cmdbuf.Dispatch(Common::DivCeil(output_size.width, 16u),
Common::DivCeil(output_size.height, 16u), 1);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *rcas_pipeline);
const float sharpening =
static_cast<float>(Settings::values.fsr_sharpening_slider.GetValue()) / 100.0f;
FsrRcasCon(push_constants.data(), sharpening);
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, push_constants);
{
std::array<VkImageMemoryBarrier, 2> barriers;
auto& fsr_read_barrier = barriers[0];
auto& blit_write_barrier = barriers[1];
fsr_read_barrier = base_barrier;
fsr_read_barrier.image = *images[image_index];
fsr_read_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
fsr_read_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
blit_write_barrier = base_barrier;
blit_write_barrier.image = *images[image_count + image_index];
blit_write_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
blit_write_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, {}, {}, barriers);
}
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline_layout, 0,
descriptor_sets[image_index * 2 + 1], {});
cmdbuf.Dispatch(Common::DivCeil(output_size.width, 16u),
Common::DivCeil(output_size.height, 16u), 1);
{
std::array<VkImageMemoryBarrier, 1> barriers;
auto& blit_read_barrier = barriers[0];
blit_read_barrier = base_barrier;
blit_read_barrier.image = *images[image_count + image_index];
blit_read_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
blit_read_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, {}, {}, barriers);
}
});
return *image_views[image_count + image_index];
}
void FSR::CreateDescriptorPool() {
const std::array<VkDescriptorPoolSize, 2> pool_sizes{{
{
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = static_cast<u32>(image_count * 2),
},
{
.type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
.descriptorCount = static_cast<u32>(image_count * 2),
},
}};
const VkDescriptorPoolCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.maxSets = static_cast<u32>(image_count * 2),
.poolSizeCount = static_cast<u32>(pool_sizes.size()),
.pPoolSizes = pool_sizes.data(),
};
descriptor_pool = device.GetLogical().CreateDescriptorPool(ci);
}
void FSR::CreateDescriptorSetLayout() {
const std::array<VkDescriptorSetLayoutBinding, 2> layout_bindings{{
{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = sampler.address(),
},
{
.binding = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = sampler.address(),
},
}};
const VkDescriptorSetLayoutCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.bindingCount = static_cast<u32>(layout_bindings.size()),
.pBindings = layout_bindings.data(),
};
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(ci);
}
void FSR::CreateDescriptorSets() {
const u32 sets = static_cast<u32>(image_count * 2);
const std::vector layouts(sets, *descriptor_set_layout);
const VkDescriptorSetAllocateInfo ai{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = *descriptor_pool,
.descriptorSetCount = sets,
.pSetLayouts = layouts.data(),
};
descriptor_sets = descriptor_pool.Allocate(ai);
CreatePipelineLayouts();
CreatePipelines();
}
void FSR::CreateImages() {
images.resize(image_count * 2);
image_views.resize(image_count * 2);
for (size_t i = 0; i < image_count * 2; ++i) {
images[i] = memory_allocator.CreateImage(VkImageCreateInfo{
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.imageType = VK_IMAGE_TYPE_2D,
.format = VK_FORMAT_R16G16B16A16_SFLOAT,
.extent =
{
.width = output_size.width,
.height = output_size.height,
.depth = 1,
},
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
});
image_views[i] = device.GetLogical().CreateImageView(VkImageViewCreateInfo{
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = *images[i],
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = VK_FORMAT_R16G16B16A16_SFLOAT,
.components =
{
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
},
.subresourceRange =
{
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
});
m_dynamic_images.resize(m_image_count);
for (auto& images : m_dynamic_images) {
images.images[Easu] =
CreateWrappedImage(m_memory_allocator, m_extent, VK_FORMAT_R16G16B16A16_SFLOAT);
images.images[Rcas] =
CreateWrappedImage(m_memory_allocator, m_extent, VK_FORMAT_R16G16B16A16_SFLOAT);
images.image_views[Easu] =
CreateWrappedImageView(m_device, images.images[Easu], VK_FORMAT_R16G16B16A16_SFLOAT);
images.image_views[Rcas] =
CreateWrappedImageView(m_device, images.images[Rcas], VK_FORMAT_R16G16B16A16_SFLOAT);
}
}
void FSR::CreatePipelineLayout() {
VkPushConstantRange push_const{
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
void FSR::CreateRenderPasses() {
m_renderpass = CreateWrappedRenderPass(m_device, VK_FORMAT_R16G16B16A16_SFLOAT);
for (auto& images : m_dynamic_images) {
images.framebuffers[Easu] =
CreateWrappedFramebuffer(m_device, m_renderpass, images.image_views[Easu], m_extent);
images.framebuffers[Rcas] =
CreateWrappedFramebuffer(m_device, m_renderpass, images.image_views[Rcas], m_extent);
}
}
void FSR::CreateSampler() {
m_sampler = CreateBilinearSampler(m_device);
}
void FSR::CreateShaders() {
m_vert_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_VERT_SPV);
if (m_device.IsFloat16Supported()) {
m_easu_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_EASU_FP16_FRAG_SPV);
m_rcas_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_RCAS_FP16_FRAG_SPV);
} else {
m_easu_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_EASU_FP32_FRAG_SPV);
m_rcas_shader = BuildShader(m_device, VULKAN_FIDELITYFX_FSR_RCAS_FP32_FRAG_SPV);
}
}
void FSR::CreateDescriptorPool() {
// EASU: 1 descriptor
// RCAS: 1 descriptor
// 2 descriptors, 2 descriptor sets per invocation
m_descriptor_pool = CreateWrappedDescriptorPool(m_device, 2 * m_image_count, 2 * m_image_count);
}
void FSR::CreateDescriptorSetLayout() {
m_descriptor_set_layout =
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
}
void FSR::CreateDescriptorSets() {
std::vector<VkDescriptorSetLayout> layouts(MaxFsrStage, *m_descriptor_set_layout);
for (auto& images : m_dynamic_images) {
images.descriptor_sets = CreateWrappedDescriptorSets(m_descriptor_pool, layouts);
}
}
void FSR::CreatePipelineLayouts() {
const VkPushConstantRange range{
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.offset = 0,
.size = sizeof(std::array<u32, 4 * 4>),
.size = sizeof(PushConstants),
};
VkPipelineLayoutCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.setLayoutCount = 1,
.pSetLayouts = descriptor_set_layout.address(),
.pSetLayouts = m_descriptor_set_layout.address(),
.pushConstantRangeCount = 1,
.pPushConstantRanges = &push_const,
.pPushConstantRanges = &range,
};
pipeline_layout = device.GetLogical().CreatePipelineLayout(ci);
m_pipeline_layout = m_device.GetLogical().CreatePipelineLayout(ci);
}
void FSR::UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const {
const auto fsr_image_view = *image_views[image_index];
const auto blit_image_view = *image_views[image_count + image_index];
const VkDescriptorImageInfo image_info{
.sampler = VK_NULL_HANDLE,
.imageView = image_view,
.imageLayout = VK_IMAGE_LAYOUT_GENERAL,
};
const VkDescriptorImageInfo fsr_image_info{
.sampler = VK_NULL_HANDLE,
.imageView = fsr_image_view,
.imageLayout = VK_IMAGE_LAYOUT_GENERAL,
};
const VkDescriptorImageInfo blit_image_info{
.sampler = VK_NULL_HANDLE,
.imageView = blit_image_view,
.imageLayout = VK_IMAGE_LAYOUT_GENERAL,
};
VkWriteDescriptorSet sampler_write{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = descriptor_sets[image_index * 2],
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr,
};
VkWriteDescriptorSet output_write{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = descriptor_sets[image_index * 2],
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
.pImageInfo = &fsr_image_info,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr,
};
device.GetLogical().UpdateDescriptorSets(std::array{sampler_write, output_write}, {});
sampler_write.dstSet = descriptor_sets[image_index * 2 + 1];
sampler_write.pImageInfo = &fsr_image_info;
output_write.dstSet = descriptor_sets[image_index * 2 + 1];
output_write.pImageInfo = &blit_image_info;
device.GetLogical().UpdateDescriptorSets(std::array{sampler_write, output_write}, {});
void FSR::CreatePipelines() {
m_easu_pipeline = CreateWrappedPipeline(m_device, m_renderpass, m_pipeline_layout,
std::tie(m_vert_shader, m_easu_shader));
m_rcas_pipeline = CreateWrappedPipeline(m_device, m_renderpass, m_pipeline_layout,
std::tie(m_vert_shader, m_rcas_shader));
}
void FSR::CreateSampler() {
const VkSamplerCreateInfo ci{
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.mipLodBias = 0.0f,
.anisotropyEnable = VK_FALSE,
.maxAnisotropy = 0.0f,
.compareEnable = VK_FALSE,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = 0.0f,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
.unnormalizedCoordinates = VK_FALSE,
};
void FSR::UpdateDescriptorSets(VkImageView image_view, size_t image_index) {
Images& images = m_dynamic_images[image_index];
std::vector<VkDescriptorImageInfo> image_infos;
std::vector<VkWriteDescriptorSet> updates;
image_infos.reserve(2);
sampler = device.GetLogical().CreateSampler(ci);
updates.push_back(CreateWriteDescriptorSet(image_infos, *m_sampler, image_view,
images.descriptor_sets[Easu], 0));
updates.push_back(CreateWriteDescriptorSet(image_infos, *m_sampler, *images.image_views[Easu],
images.descriptor_sets[Rcas], 0));
m_device.GetLogical().UpdateDescriptorSets(updates, {});
}
void FSR::CreateShaders() {
if (device.IsFloat16Supported()) {
easu_shader = BuildShader(device, VULKAN_FIDELITYFX_FSR_EASU_FP16_COMP_SPV);
rcas_shader = BuildShader(device, VULKAN_FIDELITYFX_FSR_RCAS_FP16_COMP_SPV);
} else {
easu_shader = BuildShader(device, VULKAN_FIDELITYFX_FSR_EASU_FP32_COMP_SPV);
rcas_shader = BuildShader(device, VULKAN_FIDELITYFX_FSR_RCAS_FP32_COMP_SPV);
void FSR::UploadImages(Scheduler& scheduler) {
if (m_images_ready) {
return;
}
scheduler.Record([&](vk::CommandBuffer cmdbuf) {
for (auto& image : m_dynamic_images) {
ClearColorImage(cmdbuf, *image.images[Easu]);
ClearColorImage(cmdbuf, *image.images[Rcas]);
}
});
scheduler.Finish();
m_images_ready = true;
}
void FSR::CreatePipeline() {
VkPipelineShaderStageCreateInfo shader_stage_easu{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = *easu_shader,
.pName = "main",
.pSpecializationInfo = nullptr,
};
VkImageView FSR::Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkImageView source_image_view, VkExtent2D input_image_extent,
const Common::Rectangle<f32>& crop_rect) {
Images& images = m_dynamic_images[image_index];
VkPipelineShaderStageCreateInfo shader_stage_rcas{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
.module = *rcas_shader,
.pName = "main",
.pSpecializationInfo = nullptr,
};
VkImage easu_image = *images.images[Easu];
VkImage rcas_image = *images.images[Rcas];
VkDescriptorSet easu_descriptor_set = images.descriptor_sets[Easu];
VkDescriptorSet rcas_descriptor_set = images.descriptor_sets[Rcas];
VkFramebuffer easu_framebuffer = *images.framebuffers[Easu];
VkFramebuffer rcas_framebuffer = *images.framebuffers[Rcas];
VkPipeline easu_pipeline = *m_easu_pipeline;
VkPipeline rcas_pipeline = *m_rcas_pipeline;
VkPipelineLayout pipeline_layout = *m_pipeline_layout;
VkRenderPass renderpass = *m_renderpass;
VkExtent2D extent = m_extent;
VkComputePipelineCreateInfo pipeline_ci_easu{
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = shader_stage_easu,
.layout = *pipeline_layout,
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = 0,
};
const f32 input_image_width = static_cast<f32>(input_image_extent.width);
const f32 input_image_height = static_cast<f32>(input_image_extent.height);
const f32 output_image_width = static_cast<f32>(extent.width);
const f32 output_image_height = static_cast<f32>(extent.height);
const f32 viewport_width = (crop_rect.right - crop_rect.left) * input_image_width;
const f32 viewport_x = crop_rect.left * input_image_width;
const f32 viewport_height = (crop_rect.bottom - crop_rect.top) * input_image_height;
const f32 viewport_y = crop_rect.top * input_image_height;
VkComputePipelineCreateInfo pipeline_ci_rcas{
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = shader_stage_rcas,
.layout = *pipeline_layout,
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = 0,
};
PushConstants easu_con{};
PushConstants rcas_con{};
FsrEasuConOffset(easu_con.data() + 0, easu_con.data() + 4, easu_con.data() + 8,
easu_con.data() + 12, viewport_width, viewport_height, input_image_width,
input_image_height, output_image_width, output_image_height, viewport_x,
viewport_y);
easu_pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci_easu);
rcas_pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci_rcas);
const float sharpening =
static_cast<float>(Settings::values.fsr_sharpening_slider.GetValue()) / 100.0f;
FsrRcasCon(rcas_con.data(), sharpening);
UploadImages(scheduler);
UpdateDescriptorSets(source_image_view, image_index);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([=](vk::CommandBuffer cmdbuf) {
TransitionImageLayout(cmdbuf, source_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, easu_image, VK_IMAGE_LAYOUT_GENERAL);
BeginRenderPass(cmdbuf, renderpass, easu_framebuffer, extent);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, easu_pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0,
easu_descriptor_set, {});
cmdbuf.PushConstants(pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, easu_con);
cmdbuf.Draw(3, 1, 0, 0);
cmdbuf.EndRenderPass();
TransitionImageLayout(cmdbuf, easu_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, rcas_image, VK_IMAGE_LAYOUT_GENERAL);
BeginRenderPass(cmdbuf, renderpass, rcas_framebuffer, extent);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, rcas_pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0,
rcas_descriptor_set, {});
cmdbuf.PushConstants(pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT, rcas_con);
cmdbuf.Draw(3, 1, 0, 0);
cmdbuf.EndRenderPass();
TransitionImageLayout(cmdbuf, rcas_image, VK_IMAGE_LAYOUT_GENERAL);
});
return *images.image_views[Rcas];
}
} // namespace Vulkan

View file

@ -15,38 +15,55 @@ class Scheduler;
class FSR {
public:
explicit FSR(const Device& device, MemoryAllocator& memory_allocator, size_t image_count,
VkExtent2D output_size);
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImageView image_view,
VkExtent2D input_image_extent, const Common::Rectangle<f32>& crop_rect);
VkExtent2D extent);
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkImageView source_image_view, VkExtent2D input_image_extent,
const Common::Rectangle<f32>& crop_rect);
private:
void CreateImages();
void CreateRenderPasses();
void CreateSampler();
void CreateShaders();
void CreateDescriptorPool();
void CreateDescriptorSetLayout();
void CreateDescriptorSets();
void CreateImages();
void CreateSampler();
void CreateShaders();
void CreatePipeline();
void CreatePipelineLayout();
void CreatePipelineLayouts();
void CreatePipelines();
void UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const;
void UploadImages(Scheduler& scheduler);
void UpdateDescriptorSets(VkImageView image_view, size_t image_index);
const Device& device;
MemoryAllocator& memory_allocator;
size_t image_count;
VkExtent2D output_size;
const Device& m_device;
MemoryAllocator& m_memory_allocator;
const size_t m_image_count;
const VkExtent2D m_extent;
vk::DescriptorPool descriptor_pool;
vk::DescriptorSetLayout descriptor_set_layout;
vk::DescriptorSets descriptor_sets;
vk::PipelineLayout pipeline_layout;
vk::ShaderModule easu_shader;
vk::ShaderModule rcas_shader;
vk::Pipeline easu_pipeline;
vk::Pipeline rcas_pipeline;
vk::Sampler sampler;
std::vector<vk::Image> images;
std::vector<vk::ImageView> image_views;
enum FsrStage {
Easu,
Rcas,
MaxFsrStage,
};
vk::DescriptorPool m_descriptor_pool;
vk::DescriptorSetLayout m_descriptor_set_layout;
vk::PipelineLayout m_pipeline_layout;
vk::ShaderModule m_vert_shader;
vk::ShaderModule m_easu_shader;
vk::ShaderModule m_rcas_shader;
vk::Pipeline m_easu_pipeline;
vk::Pipeline m_rcas_pipeline;
vk::RenderPass m_renderpass;
vk::Sampler m_sampler;
struct Images {
vk::DescriptorSets descriptor_sets;
std::array<vk::Image, MaxFsrStage> images;
std::array<vk::ImageView, MaxFsrStage> image_views;
std::array<vk::Framebuffer, MaxFsrStage> framebuffers;
};
std::vector<Images> m_dynamic_images;
bool m_images_ready{};
};
} // namespace Vulkan

View file

@ -63,7 +63,9 @@ void FXAA::CreateDescriptorPool() {
}
void FXAA::CreateDescriptorSetLayouts() {
m_descriptor_set_layout = CreateWrappedDescriptorSetLayout(m_device, 2);
m_descriptor_set_layout =
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
}
void FXAA::CreateDescriptorSets() {
@ -112,9 +114,10 @@ void FXAA::UploadImages(Scheduler& scheduler) {
m_images_ready = true;
}
VkImageView FXAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkImageView source_image_view) {
void FXAA::Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView* inout_image_view) {
const Image& image{m_dynamic_images[image_index]};
const VkImage input_image{*inout_image};
const VkImage output_image{*image.image};
const VkDescriptorSet descriptor_set{image.descriptor_sets[0]};
const VkFramebuffer framebuffer{*image.framebuffer};
@ -124,11 +127,11 @@ VkImageView FXAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_
const VkExtent2D extent{m_extent};
UploadImages(scheduler);
UpdateDescriptorSets(source_image_view, image_index);
UpdateDescriptorSets(*inout_image_view, image_index);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([=](vk::CommandBuffer cmdbuf) {
TransitionImageLayout(cmdbuf, source_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, input_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL);
BeginRenderPass(cmdbuf, renderpass, framebuffer, extent);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
@ -138,7 +141,8 @@ VkImageView FXAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_
TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL);
});
return *image.image_view;
*inout_image = *image.image;
*inout_image_view = *image.image_view;
}
} // namespace Vulkan

View file

@ -19,8 +19,8 @@ public:
VkExtent2D extent);
~FXAA() override;
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkImageView source_image_view) override;
void Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView* inout_image_view) override;
private:
void CreateImages();

View file

@ -122,10 +122,15 @@ void SMAA::CreateDescriptorPool() {
}
void SMAA::CreateDescriptorSetLayouts() {
m_descriptor_set_layouts[EdgeDetection] = CreateWrappedDescriptorSetLayout(m_device, 1);
m_descriptor_set_layouts[EdgeDetection] =
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
m_descriptor_set_layouts[BlendingWeightCalculation] =
CreateWrappedDescriptorSetLayout(m_device, 3);
m_descriptor_set_layouts[NeighborhoodBlending] = CreateWrappedDescriptorSetLayout(m_device, 2);
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
m_descriptor_set_layouts[NeighborhoodBlending] =
CreateWrappedDescriptorSetLayout(m_device, {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
}
void SMAA::CreateDescriptorSets() {
@ -204,10 +209,11 @@ void SMAA::UploadImages(Scheduler& scheduler) {
m_images_ready = true;
}
VkImageView SMAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkImageView source_image_view) {
void SMAA::Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView* inout_image_view) {
Images& images = m_dynamic_images[image_index];
VkImage input_image = *inout_image;
VkImage output_image = *images.images[Output];
VkImage edges_image = *images.images[Edges];
VkImage blend_image = *images.images[Blend];
@ -224,11 +230,11 @@ VkImageView SMAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_
VkFramebuffer neighborhood_blending_framebuffer = *images.framebuffers[NeighborhoodBlending];
UploadImages(scheduler);
UpdateDescriptorSets(source_image_view, image_index);
UpdateDescriptorSets(*inout_image_view, image_index);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([=, this](vk::CommandBuffer cmdbuf) {
TransitionImageLayout(cmdbuf, source_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, input_image, VK_IMAGE_LAYOUT_GENERAL);
TransitionImageLayout(cmdbuf, edges_image, VK_IMAGE_LAYOUT_GENERAL);
BeginRenderPass(cmdbuf, *m_renderpasses[EdgeDetection], edge_detection_framebuffer,
m_extent);
@ -264,7 +270,8 @@ VkImageView SMAA::Draw(Scheduler& scheduler, size_t image_index, VkImage source_
TransitionImageLayout(cmdbuf, output_image, VK_IMAGE_LAYOUT_GENERAL);
});
return *images.image_views[Output];
*inout_image = *images.images[Output];
*inout_image_view = *images.image_views[Output];
}
} // namespace Vulkan

View file

@ -20,8 +20,8 @@ public:
VkExtent2D extent);
~SMAA() override;
VkImageView Draw(Scheduler& scheduler, size_t image_index, VkImage source_image,
VkImageView source_image_view) override;
void Draw(Scheduler& scheduler, size_t image_index, VkImage* inout_image,
VkImageView* inout_image_view) override;
private:
enum SMAAStage {

View file

@ -215,32 +215,37 @@ vk::ShaderModule CreateWrappedShaderModule(const Device& device, std::span<const
});
}
vk::DescriptorPool CreateWrappedDescriptorPool(const Device& device, u32 max_descriptors,
u32 max_sets) {
const VkDescriptorPoolSize pool_size{
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = static_cast<u32>(max_descriptors),
};
vk::DescriptorPool CreateWrappedDescriptorPool(const Device& device, size_t max_descriptors,
size_t max_sets,
std::initializer_list<VkDescriptorType> types) {
std::vector<VkDescriptorPoolSize> pool_sizes(types.size());
for (u32 i = 0; i < types.size(); i++) {
pool_sizes[i] = VkDescriptorPoolSize{
.type = std::data(types)[i],
.descriptorCount = static_cast<u32>(max_descriptors),
};
}
return device.GetLogical().CreateDescriptorPool(VkDescriptorPoolCreateInfo{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.maxSets = max_sets,
.poolSizeCount = 1,
.pPoolSizes = &pool_size,
.maxSets = static_cast<u32>(max_sets),
.poolSizeCount = static_cast<u32>(pool_sizes.size()),
.pPoolSizes = pool_sizes.data(),
});
}
vk::DescriptorSetLayout CreateWrappedDescriptorSetLayout(const Device& device,
u32 max_sampler_bindings) {
std::vector<VkDescriptorSetLayoutBinding> bindings(max_sampler_bindings);
for (u32 i = 0; i < max_sampler_bindings; i++) {
vk::DescriptorSetLayout CreateWrappedDescriptorSetLayout(
const Device& device, std::initializer_list<VkDescriptorType> types) {
std::vector<VkDescriptorSetLayoutBinding> bindings(types.size());
for (size_t i = 0; i < types.size(); i++) {
bindings[i] = {
.binding = i,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.binding = static_cast<u32>(i),
.descriptorType = std::data(types)[i],
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
VK_SHADER_STAGE_COMPUTE_BIT,
.pImmutableSamplers = nullptr,
};
}

View file

@ -25,10 +25,12 @@ vk::Framebuffer CreateWrappedFramebuffer(const Device& device, vk::RenderPass& r
vk::ImageView& dest_image, VkExtent2D extent);
vk::Sampler CreateWrappedSampler(const Device& device, VkFilter filter = VK_FILTER_LINEAR);
vk::ShaderModule CreateWrappedShaderModule(const Device& device, std::span<const u32> code);
vk::DescriptorPool CreateWrappedDescriptorPool(const Device& device, u32 max_sampler_bindings,
u32 max_sets);
vk::DescriptorSetLayout CreateWrappedDescriptorSetLayout(const Device& device,
u32 max_sampler_bindings);
vk::DescriptorPool CreateWrappedDescriptorPool(const Device& device, size_t max_descriptors,
size_t max_sets,
std::initializer_list<VkDescriptorType> types = {
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER});
vk::DescriptorSetLayout CreateWrappedDescriptorSetLayout(
const Device& device, std::initializer_list<VkDescriptorType> types);
vk::DescriptorSets CreateWrappedDescriptorSets(vk::DescriptorPool& pool,
vk::Span<VkDescriptorSetLayout> layouts);
vk::PipelineLayout CreateWrappedPipelineLayout(const Device& device,

View file

@ -234,7 +234,7 @@ void BlitScreen::Draw(RasterizerVulkan& rasterizer, const Tegra::FramebufferConf
});
}
source_image_view = anti_alias->Draw(scheduler, image_index, source_image, source_image_view);
anti_alias->Draw(scheduler, image_index, &source_image, &source_image_view);
const auto crop_rect = Tegra::NormalizeCrop(framebuffer, texture_width, texture_height);
const VkExtent2D render_extent{
@ -248,8 +248,8 @@ void BlitScreen::Draw(RasterizerVulkan& rasterizer, const Tegra::FramebufferConf
.height = layout.screen.GetHeight(),
};
source_image_view =
fsr->Draw(scheduler, image_index, source_image_view, render_extent, crop_rect);
source_image_view = fsr->Draw(scheduler, image_index, source_image, source_image_view,
render_extent, crop_rect);
const Common::Rectangle<f32> output_crop{0, 0, 1, 1};
window_adapt->Draw(scheduler, image_index, source_image_view, adapt_size, output_crop,