mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-07-04 23:31:19 +01:00
Merge pull request #3513 from ReinUsesLisp/native-astc
video_core: Use native ASTC when available
This commit is contained in:
commit
588a20be3f
12 changed files with 251 additions and 305 deletions
|
@ -131,6 +131,31 @@ std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindin
|
||||||
return bindings;
|
return bindings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IsASTCSupported() {
|
||||||
|
static constexpr std::array formats = {
|
||||||
|
GL_COMPRESSED_RGBA_ASTC_4x4_KHR, GL_COMPRESSED_RGBA_ASTC_5x4_KHR,
|
||||||
|
GL_COMPRESSED_RGBA_ASTC_5x5_KHR, GL_COMPRESSED_RGBA_ASTC_6x5_KHR,
|
||||||
|
GL_COMPRESSED_RGBA_ASTC_6x6_KHR, GL_COMPRESSED_RGBA_ASTC_8x5_KHR,
|
||||||
|
GL_COMPRESSED_RGBA_ASTC_8x6_KHR, GL_COMPRESSED_RGBA_ASTC_8x8_KHR,
|
||||||
|
GL_COMPRESSED_RGBA_ASTC_10x5_KHR, GL_COMPRESSED_RGBA_ASTC_10x6_KHR,
|
||||||
|
GL_COMPRESSED_RGBA_ASTC_10x8_KHR, GL_COMPRESSED_RGBA_ASTC_10x10_KHR,
|
||||||
|
GL_COMPRESSED_RGBA_ASTC_12x10_KHR, GL_COMPRESSED_RGBA_ASTC_12x12_KHR,
|
||||||
|
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR,
|
||||||
|
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR,
|
||||||
|
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR,
|
||||||
|
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR,
|
||||||
|
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR,
|
||||||
|
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR,
|
||||||
|
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR,
|
||||||
|
};
|
||||||
|
return std::find_if_not(formats.begin(), formats.end(), [](GLenum format) {
|
||||||
|
GLint supported;
|
||||||
|
glGetInternalformativ(GL_TEXTURE_2D, format, GL_INTERNALFORMAT_SUPPORTED, 1,
|
||||||
|
&supported);
|
||||||
|
return supported == GL_TRUE;
|
||||||
|
}) == formats.end();
|
||||||
|
}
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
Device::Device() : base_bindings{BuildBaseBindings()} {
|
Device::Device() : base_bindings{BuildBaseBindings()} {
|
||||||
|
@ -152,6 +177,7 @@ Device::Device() : base_bindings{BuildBaseBindings()} {
|
||||||
has_shader_ballot = GLAD_GL_ARB_shader_ballot;
|
has_shader_ballot = GLAD_GL_ARB_shader_ballot;
|
||||||
has_vertex_viewport_layer = GLAD_GL_ARB_shader_viewport_layer_array;
|
has_vertex_viewport_layer = GLAD_GL_ARB_shader_viewport_layer_array;
|
||||||
has_image_load_formatted = HasExtension(extensions, "GL_EXT_shader_image_load_formatted");
|
has_image_load_formatted = HasExtension(extensions, "GL_EXT_shader_image_load_formatted");
|
||||||
|
has_astc = IsASTCSupported();
|
||||||
has_variable_aoffi = TestVariableAoffi();
|
has_variable_aoffi = TestVariableAoffi();
|
||||||
has_component_indexing_bug = is_amd;
|
has_component_indexing_bug = is_amd;
|
||||||
has_precise_bug = TestPreciseBug();
|
has_precise_bug = TestPreciseBug();
|
||||||
|
|
|
@ -64,6 +64,10 @@ public:
|
||||||
return has_image_load_formatted;
|
return has_image_load_formatted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool HasASTC() const {
|
||||||
|
return has_astc;
|
||||||
|
}
|
||||||
|
|
||||||
bool HasVariableAoffi() const {
|
bool HasVariableAoffi() const {
|
||||||
return has_variable_aoffi;
|
return has_variable_aoffi;
|
||||||
}
|
}
|
||||||
|
@ -97,6 +101,7 @@ private:
|
||||||
bool has_shader_ballot{};
|
bool has_shader_ballot{};
|
||||||
bool has_vertex_viewport_layer{};
|
bool has_vertex_viewport_layer{};
|
||||||
bool has_image_load_formatted{};
|
bool has_image_load_formatted{};
|
||||||
|
bool has_astc{};
|
||||||
bool has_variable_aoffi{};
|
bool has_variable_aoffi{};
|
||||||
bool has_component_indexing_bug{};
|
bool has_component_indexing_bug{};
|
||||||
bool has_precise_bug{};
|
bool has_precise_bug{};
|
||||||
|
|
|
@ -24,7 +24,6 @@ using Tegra::Texture::SwizzleSource;
|
||||||
using VideoCore::MortonSwizzleMode;
|
using VideoCore::MortonSwizzleMode;
|
||||||
|
|
||||||
using VideoCore::Surface::PixelFormat;
|
using VideoCore::Surface::PixelFormat;
|
||||||
using VideoCore::Surface::SurfaceCompression;
|
|
||||||
using VideoCore::Surface::SurfaceTarget;
|
using VideoCore::Surface::SurfaceTarget;
|
||||||
using VideoCore::Surface::SurfaceType;
|
using VideoCore::Surface::SurfaceType;
|
||||||
|
|
||||||
|
@ -37,102 +36,100 @@ namespace {
|
||||||
|
|
||||||
struct FormatTuple {
|
struct FormatTuple {
|
||||||
GLint internal_format;
|
GLint internal_format;
|
||||||
GLenum format;
|
GLenum format = GL_NONE;
|
||||||
GLenum type;
|
GLenum type = GL_NONE;
|
||||||
bool compressed;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{
|
constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format_tuples = {{
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // ABGR8U
|
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // ABGR8U
|
||||||
{GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, false}, // ABGR8S
|
{GL_RGBA8_SNORM, GL_RGBA, GL_BYTE}, // ABGR8S
|
||||||
{GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, false}, // ABGR8UI
|
{GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE}, // ABGR8UI
|
||||||
{GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false}, // B5G6R5U
|
{GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV}, // B5G6R5U
|
||||||
{GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false}, // A2B10G10R10U
|
{GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV}, // A2B10G10R10U
|
||||||
{GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV, false}, // A1B5G5R5U
|
{GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_1_5_5_5_REV}, // A1B5G5R5U
|
||||||
{GL_R8, GL_RED, GL_UNSIGNED_BYTE, false}, // R8U
|
{GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R8U
|
||||||
{GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, false}, // R8UI
|
{GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE}, // R8UI
|
||||||
{GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, false}, // RGBA16F
|
{GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT}, // RGBA16F
|
||||||
{GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, false}, // RGBA16U
|
{GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT}, // RGBA16U
|
||||||
{GL_RGBA16_SNORM, GL_RGBA, GL_SHORT, false}, // RGBA16S
|
{GL_RGBA16_SNORM, GL_RGBA, GL_SHORT}, // RGBA16S
|
||||||
{GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, false}, // RGBA16UI
|
{GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT}, // RGBA16UI
|
||||||
{GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, false}, // R11FG11FB10F
|
{GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV}, // R11FG11FB10F
|
||||||
{GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, false}, // RGBA32UI
|
{GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT}, // RGBA32UI
|
||||||
{GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1
|
{GL_COMPRESSED_RGBA_S3TC_DXT1_EXT}, // DXT1
|
||||||
{GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23
|
{GL_COMPRESSED_RGBA_S3TC_DXT3_EXT}, // DXT23
|
||||||
{GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45
|
{GL_COMPRESSED_RGBA_S3TC_DXT5_EXT}, // DXT45
|
||||||
{GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_INT_8_8_8_8, true}, // DXN1
|
{GL_COMPRESSED_RED_RGTC1}, // DXN1
|
||||||
{GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_INT_8_8_8_8, true}, // DXN2UNORM
|
{GL_COMPRESSED_RG_RGTC2}, // DXN2UNORM
|
||||||
{GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_INT, true}, // DXN2SNORM
|
{GL_COMPRESSED_SIGNED_RG_RGTC2}, // DXN2SNORM
|
||||||
{GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // BC7U
|
{GL_COMPRESSED_RGBA_BPTC_UNORM}, // BC7U
|
||||||
{GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // BC6H_UF16
|
{GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT}, // BC6H_UF16
|
||||||
{GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // BC6H_SF16
|
{GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT}, // BC6H_SF16
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_4X4
|
{GL_COMPRESSED_RGBA_ASTC_4x4_KHR}, // ASTC_2D_4X4
|
||||||
{GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, false}, // BGRA8
|
{GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE}, // BGRA8
|
||||||
{GL_RGBA32F, GL_RGBA, GL_FLOAT, false}, // RGBA32F
|
{GL_RGBA32F, GL_RGBA, GL_FLOAT}, // RGBA32F
|
||||||
{GL_RG32F, GL_RG, GL_FLOAT, false}, // RG32F
|
{GL_RG32F, GL_RG, GL_FLOAT}, // RG32F
|
||||||
{GL_R32F, GL_RED, GL_FLOAT, false}, // R32F
|
{GL_R32F, GL_RED, GL_FLOAT}, // R32F
|
||||||
{GL_R16F, GL_RED, GL_HALF_FLOAT, false}, // R16F
|
{GL_R16F, GL_RED, GL_HALF_FLOAT}, // R16F
|
||||||
{GL_R16, GL_RED, GL_UNSIGNED_SHORT, false}, // R16U
|
{GL_R16, GL_RED, GL_UNSIGNED_SHORT}, // R16U
|
||||||
{GL_R16_SNORM, GL_RED, GL_SHORT, false}, // R16S
|
{GL_R16_SNORM, GL_RED, GL_SHORT}, // R16S
|
||||||
{GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, false}, // R16UI
|
{GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT}, // R16UI
|
||||||
{GL_R16I, GL_RED_INTEGER, GL_SHORT, false}, // R16I
|
{GL_R16I, GL_RED_INTEGER, GL_SHORT}, // R16I
|
||||||
{GL_RG16, GL_RG, GL_UNSIGNED_SHORT, false}, // RG16
|
{GL_RG16, GL_RG, GL_UNSIGNED_SHORT}, // RG16
|
||||||
{GL_RG16F, GL_RG, GL_HALF_FLOAT, false}, // RG16F
|
{GL_RG16F, GL_RG, GL_HALF_FLOAT}, // RG16F
|
||||||
{GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, false}, // RG16UI
|
{GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT}, // RG16UI
|
||||||
{GL_RG16I, GL_RG_INTEGER, GL_SHORT, false}, // RG16I
|
{GL_RG16I, GL_RG_INTEGER, GL_SHORT}, // RG16I
|
||||||
{GL_RG16_SNORM, GL_RG, GL_SHORT, false}, // RG16S
|
{GL_RG16_SNORM, GL_RG, GL_SHORT}, // RG16S
|
||||||
{GL_RGB32F, GL_RGB, GL_FLOAT, false}, // RGB32F
|
{GL_RGB32F, GL_RGB, GL_FLOAT}, // RGB32F
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // RGBA8_SRGB
|
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV}, // RGBA8_SRGB
|
||||||
{GL_RG8, GL_RG, GL_UNSIGNED_BYTE, false}, // RG8U
|
{GL_RG8, GL_RG, GL_UNSIGNED_BYTE}, // RG8U
|
||||||
{GL_RG8_SNORM, GL_RG, GL_BYTE, false}, // RG8S
|
{GL_RG8_SNORM, GL_RG, GL_BYTE}, // RG8S
|
||||||
{GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, false}, // RG32UI
|
{GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT}, // RG32UI
|
||||||
{GL_RGB16F, GL_RGBA, GL_HALF_FLOAT, false}, // RGBX16F
|
{GL_RGB16F, GL_RGBA, GL_HALF_FLOAT}, // RGBX16F
|
||||||
{GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, false}, // R32UI
|
{GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT}, // R32UI
|
||||||
{GL_R32I, GL_RED_INTEGER, GL_INT, false}, // R32I
|
{GL_R32I, GL_RED_INTEGER, GL_INT}, // R32I
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X8
|
{GL_COMPRESSED_RGBA_ASTC_8x8_KHR}, // ASTC_2D_8X8
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X5
|
{GL_COMPRESSED_RGBA_ASTC_8x5_KHR}, // ASTC_2D_8X5
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X4
|
{GL_COMPRESSED_RGBA_ASTC_5x4_KHR}, // ASTC_2D_5X4
|
||||||
{GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, false}, // BGRA8
|
{GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE}, // BGRA8
|
||||||
// Compressed sRGB formats
|
// Compressed sRGB formats
|
||||||
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1_SRGB
|
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT}, // DXT1_SRGB
|
||||||
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23_SRGB
|
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT}, // DXT23_SRGB
|
||||||
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45_SRGB
|
{GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // DXT45_SRGB
|
||||||
{GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // BC7U_SRGB
|
{GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7U_SRGB
|
||||||
{GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV, false}, // R4G4B4A4U
|
{GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // R4G4B4A4U
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_4X4_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X8_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X5_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X4_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR}, // ASTC_2D_5X4_SRGB
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X5
|
{GL_COMPRESSED_RGBA_ASTC_5x5_KHR}, // ASTC_2D_5X5
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_5X5_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR}, // ASTC_2D_5X5_SRGB
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X8
|
{GL_COMPRESSED_RGBA_ASTC_10x8_KHR}, // ASTC_2D_10X8
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X8_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR}, // ASTC_2D_10X8_SRGB
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X6
|
{GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X6_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X10
|
{GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_10X10_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_12X12
|
{GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_12X12_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR}, // ASTC_2D_12X12_SRGB
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X6
|
{GL_COMPRESSED_RGBA_ASTC_8x6_KHR}, // ASTC_2D_8X6
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_8X6_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR}, // ASTC_2D_8X6_SRGB
|
||||||
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X5
|
{GL_COMPRESSED_RGBA_ASTC_6x5_KHR}, // ASTC_2D_6X5
|
||||||
{GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, false}, // ASTC_2D_6X5_SRGB
|
{GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR}, // ASTC_2D_6X5_SRGB
|
||||||
{GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, false}, // E5B9G9R9F
|
{GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV}, // E5B9G9R9F
|
||||||
|
|
||||||
// Depth formats
|
// Depth formats
|
||||||
{GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, false}, // Z32F
|
{GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT}, // Z32F
|
||||||
{GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, false}, // Z16
|
{GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT}, // Z16
|
||||||
|
|
||||||
// DepthStencil formats
|
// DepthStencil formats
|
||||||
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, false}, // Z24S8
|
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // Z24S8
|
||||||
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, false}, // S8Z24
|
{GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // S8Z24
|
||||||
{GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, false}, // Z32FS8
|
{GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV}, // Z32FS8
|
||||||
}};
|
}};
|
||||||
|
|
||||||
const FormatTuple& GetFormatTuple(PixelFormat pixel_format) {
|
const FormatTuple& GetFormatTuple(PixelFormat pixel_format) {
|
||||||
ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
|
ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
|
||||||
const auto& format{tex_format_tuples[static_cast<std::size_t>(pixel_format)]};
|
return tex_format_tuples[static_cast<std::size_t>(pixel_format)];
|
||||||
return format;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GLenum GetTextureTarget(const SurfaceTarget& target) {
|
GLenum GetTextureTarget(const SurfaceTarget& target) {
|
||||||
|
@ -242,13 +239,20 @@ OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum inte
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params)
|
CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params,
|
||||||
: VideoCommon::SurfaceBase<View>(gpu_addr, params) {
|
bool is_astc_supported)
|
||||||
|
: VideoCommon::SurfaceBase<View>(gpu_addr, params, is_astc_supported) {
|
||||||
|
if (is_converted) {
|
||||||
|
internal_format = params.srgb_conversion ? GL_SRGB8_ALPHA8 : GL_RGBA8;
|
||||||
|
format = GL_RGBA;
|
||||||
|
type = GL_UNSIGNED_BYTE;
|
||||||
|
} else {
|
||||||
const auto& tuple{GetFormatTuple(params.pixel_format)};
|
const auto& tuple{GetFormatTuple(params.pixel_format)};
|
||||||
internal_format = tuple.internal_format;
|
internal_format = tuple.internal_format;
|
||||||
format = tuple.format;
|
format = tuple.format;
|
||||||
type = tuple.type;
|
type = tuple.type;
|
||||||
is_compressed = tuple.compressed;
|
is_compressed = params.IsCompressed();
|
||||||
|
}
|
||||||
target = GetTextureTarget(params.target);
|
target = GetTextureTarget(params.target);
|
||||||
texture = CreateTexture(params, target, internal_format, texture_buffer);
|
texture = CreateTexture(params, target, internal_format, texture_buffer);
|
||||||
DecorateSurfaceName();
|
DecorateSurfaceName();
|
||||||
|
@ -264,7 +268,7 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
|
||||||
|
|
||||||
if (params.IsBuffer()) {
|
if (params.IsBuffer()) {
|
||||||
glGetNamedBufferSubData(texture_buffer.handle, 0,
|
glGetNamedBufferSubData(texture_buffer.handle, 0,
|
||||||
static_cast<GLsizeiptr>(params.GetHostSizeInBytes()),
|
static_cast<GLsizeiptr>(params.GetHostSizeInBytes(false)),
|
||||||
staging_buffer.data());
|
staging_buffer.data());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -272,9 +276,10 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
|
||||||
SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); });
|
SCOPE_EXIT({ glPixelStorei(GL_PACK_ROW_LENGTH, 0); });
|
||||||
|
|
||||||
for (u32 level = 0; level < params.emulated_levels; ++level) {
|
for (u32 level = 0; level < params.emulated_levels; ++level) {
|
||||||
glPixelStorei(GL_PACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
|
glPixelStorei(GL_PACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level, is_converted)));
|
||||||
glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
|
glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
|
||||||
const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level);
|
const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted);
|
||||||
|
|
||||||
u8* const mip_data = staging_buffer.data() + mip_offset;
|
u8* const mip_data = staging_buffer.data() + mip_offset;
|
||||||
const GLsizei size = static_cast<GLsizei>(params.GetHostMipmapSize(level));
|
const GLsizei size = static_cast<GLsizei>(params.GetHostMipmapSize(level));
|
||||||
if (is_compressed) {
|
if (is_compressed) {
|
||||||
|
@ -294,14 +299,10 @@ void CachedSurface::UploadTexture(const std::vector<u8>& staging_buffer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer) {
|
void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer) {
|
||||||
glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
|
glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level, is_converted)));
|
||||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
|
glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
|
||||||
|
|
||||||
auto compression_type = params.GetCompressionType();
|
const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted);
|
||||||
|
|
||||||
const std::size_t mip_offset = compression_type == SurfaceCompression::Converted
|
|
||||||
? params.GetConvertedMipmapOffset(level)
|
|
||||||
: params.GetHostMipmapLevelOffset(level);
|
|
||||||
const u8* buffer{staging_buffer.data() + mip_offset};
|
const u8* buffer{staging_buffer.data() + mip_offset};
|
||||||
if (is_compressed) {
|
if (is_compressed) {
|
||||||
const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))};
|
const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))};
|
||||||
|
@ -482,7 +483,7 @@ OGLTextureView CachedSurfaceView::CreateTextureView() const {
|
||||||
TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system,
|
TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system,
|
||||||
VideoCore::RasterizerInterface& rasterizer,
|
VideoCore::RasterizerInterface& rasterizer,
|
||||||
const Device& device, StateTracker& state_tracker)
|
const Device& device, StateTracker& state_tracker)
|
||||||
: TextureCacheBase{system, rasterizer}, state_tracker{state_tracker} {
|
: TextureCacheBase{system, rasterizer, device.HasASTC()}, state_tracker{state_tracker} {
|
||||||
src_framebuffer.Create();
|
src_framebuffer.Create();
|
||||||
dst_framebuffer.Create();
|
dst_framebuffer.Create();
|
||||||
}
|
}
|
||||||
|
@ -490,7 +491,7 @@ TextureCacheOpenGL::TextureCacheOpenGL(Core::System& system,
|
||||||
TextureCacheOpenGL::~TextureCacheOpenGL() = default;
|
TextureCacheOpenGL::~TextureCacheOpenGL() = default;
|
||||||
|
|
||||||
Surface TextureCacheOpenGL::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) {
|
Surface TextureCacheOpenGL::CreateSurface(GPUVAddr gpu_addr, const SurfaceParams& params) {
|
||||||
return std::make_shared<CachedSurface>(gpu_addr, params);
|
return std::make_shared<CachedSurface>(gpu_addr, params, is_astc_supported);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TextureCacheOpenGL::ImageCopy(Surface& src_surface, Surface& dst_surface,
|
void TextureCacheOpenGL::ImageCopy(Surface& src_surface, Surface& dst_surface,
|
||||||
|
@ -596,7 +597,7 @@ void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface)
|
||||||
|
|
||||||
glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle);
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, copy_pbo_handle);
|
||||||
|
|
||||||
if (source_format.compressed) {
|
if (src_surface->IsCompressed()) {
|
||||||
glGetCompressedTextureImage(src_surface->GetTexture(), 0, static_cast<GLsizei>(source_size),
|
glGetCompressedTextureImage(src_surface->GetTexture(), 0, static_cast<GLsizei>(source_size),
|
||||||
nullptr);
|
nullptr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -610,7 +611,7 @@ void TextureCacheOpenGL::BufferCopy(Surface& src_surface, Surface& dst_surface)
|
||||||
const GLsizei width = static_cast<GLsizei>(dst_params.width);
|
const GLsizei width = static_cast<GLsizei>(dst_params.width);
|
||||||
const GLsizei height = static_cast<GLsizei>(dst_params.height);
|
const GLsizei height = static_cast<GLsizei>(dst_params.height);
|
||||||
const GLsizei depth = static_cast<GLsizei>(dst_params.depth);
|
const GLsizei depth = static_cast<GLsizei>(dst_params.depth);
|
||||||
if (dest_format.compressed) {
|
if (dst_surface->IsCompressed()) {
|
||||||
LOG_CRITICAL(HW_GPU, "Compressed buffer copy is unimplemented!");
|
LOG_CRITICAL(HW_GPU, "Compressed buffer copy is unimplemented!");
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -37,7 +37,7 @@ class CachedSurface final : public VideoCommon::SurfaceBase<View> {
|
||||||
friend CachedSurfaceView;
|
friend CachedSurfaceView;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params);
|
explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params, bool is_astc_supported);
|
||||||
~CachedSurface();
|
~CachedSurface();
|
||||||
|
|
||||||
void UploadTexture(const std::vector<u8>& staging_buffer) override;
|
void UploadTexture(const std::vector<u8>& staging_buffer) override;
|
||||||
|
@ -51,6 +51,10 @@ public:
|
||||||
return texture.handle;
|
return texture.handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IsCompressed() const {
|
||||||
|
return is_compressed;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void DecorateSurfaceName() override;
|
void DecorateSurfaceName() override;
|
||||||
|
|
||||||
|
|
|
@ -237,18 +237,21 @@ void VKDevice::ReportLoss() const {
|
||||||
|
|
||||||
bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features,
|
bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features,
|
||||||
const vk::DispatchLoaderDynamic& dldi) const {
|
const vk::DispatchLoaderDynamic& dldi) const {
|
||||||
// Disable for now to avoid converting ASTC twice.
|
|
||||||
return false;
|
|
||||||
static constexpr std::array astc_formats = {
|
static constexpr std::array astc_formats = {
|
||||||
vk::Format::eAstc4x4SrgbBlock, vk::Format::eAstc8x8SrgbBlock,
|
vk::Format::eAstc4x4UnormBlock, vk::Format::eAstc4x4SrgbBlock,
|
||||||
vk::Format::eAstc8x5SrgbBlock, vk::Format::eAstc5x4SrgbBlock,
|
vk::Format::eAstc5x4UnormBlock, vk::Format::eAstc5x4SrgbBlock,
|
||||||
vk::Format::eAstc5x5UnormBlock, vk::Format::eAstc5x5SrgbBlock,
|
vk::Format::eAstc5x5UnormBlock, vk::Format::eAstc5x5SrgbBlock,
|
||||||
vk::Format::eAstc10x8UnormBlock, vk::Format::eAstc10x8SrgbBlock,
|
vk::Format::eAstc6x5UnormBlock, vk::Format::eAstc6x5SrgbBlock,
|
||||||
vk::Format::eAstc6x6UnormBlock, vk::Format::eAstc6x6SrgbBlock,
|
vk::Format::eAstc6x6UnormBlock, vk::Format::eAstc6x6SrgbBlock,
|
||||||
vk::Format::eAstc10x10UnormBlock, vk::Format::eAstc10x10SrgbBlock,
|
vk::Format::eAstc8x5UnormBlock, vk::Format::eAstc8x5SrgbBlock,
|
||||||
vk::Format::eAstc12x12UnormBlock, vk::Format::eAstc12x12SrgbBlock,
|
|
||||||
vk::Format::eAstc8x6UnormBlock, vk::Format::eAstc8x6SrgbBlock,
|
vk::Format::eAstc8x6UnormBlock, vk::Format::eAstc8x6SrgbBlock,
|
||||||
vk::Format::eAstc6x5UnormBlock, vk::Format::eAstc6x5SrgbBlock};
|
vk::Format::eAstc8x8UnormBlock, vk::Format::eAstc8x8SrgbBlock,
|
||||||
|
vk::Format::eAstc10x5UnormBlock, vk::Format::eAstc10x5SrgbBlock,
|
||||||
|
vk::Format::eAstc10x6UnormBlock, vk::Format::eAstc10x6SrgbBlock,
|
||||||
|
vk::Format::eAstc10x8UnormBlock, vk::Format::eAstc10x8SrgbBlock,
|
||||||
|
vk::Format::eAstc10x10UnormBlock, vk::Format::eAstc10x10SrgbBlock,
|
||||||
|
vk::Format::eAstc12x10UnormBlock, vk::Format::eAstc12x10SrgbBlock,
|
||||||
|
vk::Format::eAstc12x12UnormBlock, vk::Format::eAstc12x12SrgbBlock};
|
||||||
if (!features.textureCompressionASTC_LDR) {
|
if (!features.textureCompressionASTC_LDR) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -572,24 +575,34 @@ std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperti
|
||||||
vk::Format::eBc2SrgbBlock,
|
vk::Format::eBc2SrgbBlock,
|
||||||
vk::Format::eBc3SrgbBlock,
|
vk::Format::eBc3SrgbBlock,
|
||||||
vk::Format::eBc7SrgbBlock,
|
vk::Format::eBc7SrgbBlock,
|
||||||
|
vk::Format::eAstc4x4UnormBlock,
|
||||||
vk::Format::eAstc4x4SrgbBlock,
|
vk::Format::eAstc4x4SrgbBlock,
|
||||||
vk::Format::eAstc8x8SrgbBlock,
|
vk::Format::eAstc5x4UnormBlock,
|
||||||
vk::Format::eAstc8x5SrgbBlock,
|
|
||||||
vk::Format::eAstc5x4SrgbBlock,
|
vk::Format::eAstc5x4SrgbBlock,
|
||||||
vk::Format::eAstc5x5UnormBlock,
|
vk::Format::eAstc5x5UnormBlock,
|
||||||
vk::Format::eAstc5x5SrgbBlock,
|
vk::Format::eAstc5x5SrgbBlock,
|
||||||
vk::Format::eAstc10x8UnormBlock,
|
|
||||||
vk::Format::eAstc10x8SrgbBlock,
|
|
||||||
vk::Format::eAstc6x6UnormBlock,
|
|
||||||
vk::Format::eAstc6x6SrgbBlock,
|
|
||||||
vk::Format::eAstc10x10UnormBlock,
|
|
||||||
vk::Format::eAstc10x10SrgbBlock,
|
|
||||||
vk::Format::eAstc12x12UnormBlock,
|
|
||||||
vk::Format::eAstc12x12SrgbBlock,
|
|
||||||
vk::Format::eAstc8x6UnormBlock,
|
|
||||||
vk::Format::eAstc8x6SrgbBlock,
|
|
||||||
vk::Format::eAstc6x5UnormBlock,
|
vk::Format::eAstc6x5UnormBlock,
|
||||||
vk::Format::eAstc6x5SrgbBlock,
|
vk::Format::eAstc6x5SrgbBlock,
|
||||||
|
vk::Format::eAstc6x6UnormBlock,
|
||||||
|
vk::Format::eAstc6x6SrgbBlock,
|
||||||
|
vk::Format::eAstc8x5UnormBlock,
|
||||||
|
vk::Format::eAstc8x5SrgbBlock,
|
||||||
|
vk::Format::eAstc8x6UnormBlock,
|
||||||
|
vk::Format::eAstc8x6SrgbBlock,
|
||||||
|
vk::Format::eAstc8x8UnormBlock,
|
||||||
|
vk::Format::eAstc8x8SrgbBlock,
|
||||||
|
vk::Format::eAstc10x5UnormBlock,
|
||||||
|
vk::Format::eAstc10x5SrgbBlock,
|
||||||
|
vk::Format::eAstc10x6UnormBlock,
|
||||||
|
vk::Format::eAstc10x6SrgbBlock,
|
||||||
|
vk::Format::eAstc10x8UnormBlock,
|
||||||
|
vk::Format::eAstc10x8SrgbBlock,
|
||||||
|
vk::Format::eAstc10x10UnormBlock,
|
||||||
|
vk::Format::eAstc10x10SrgbBlock,
|
||||||
|
vk::Format::eAstc12x10UnormBlock,
|
||||||
|
vk::Format::eAstc12x10SrgbBlock,
|
||||||
|
vk::Format::eAstc12x12UnormBlock,
|
||||||
|
vk::Format::eAstc12x12SrgbBlock,
|
||||||
vk::Format::eE5B9G9R9UfloatPack32};
|
vk::Format::eE5B9G9R9UfloatPack32};
|
||||||
std::unordered_map<vk::Format, vk::FormatProperties> format_properties;
|
std::unordered_map<vk::Format, vk::FormatProperties> format_properties;
|
||||||
for (const auto format : formats) {
|
for (const auto format : formats) {
|
||||||
|
|
|
@ -35,7 +35,6 @@ using VideoCore::MortonSwizzleMode;
|
||||||
|
|
||||||
using Tegra::Texture::SwizzleSource;
|
using Tegra::Texture::SwizzleSource;
|
||||||
using VideoCore::Surface::PixelFormat;
|
using VideoCore::Surface::PixelFormat;
|
||||||
using VideoCore::Surface::SurfaceCompression;
|
|
||||||
using VideoCore::Surface::SurfaceTarget;
|
using VideoCore::Surface::SurfaceTarget;
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -96,9 +95,10 @@ vk::ImageViewType GetImageViewType(SurfaceTarget target) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params) {
|
UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
|
||||||
|
std::size_t host_memory_size) {
|
||||||
// TODO(Rodrigo): Move texture buffer creation to the buffer cache
|
// TODO(Rodrigo): Move texture buffer creation to the buffer cache
|
||||||
const vk::BufferCreateInfo buffer_ci({}, params.GetHostSizeInBytes(),
|
const vk::BufferCreateInfo buffer_ci({}, host_memory_size,
|
||||||
vk::BufferUsageFlagBits::eUniformTexelBuffer |
|
vk::BufferUsageFlagBits::eUniformTexelBuffer |
|
||||||
vk::BufferUsageFlagBits::eTransferSrc |
|
vk::BufferUsageFlagBits::eTransferSrc |
|
||||||
vk::BufferUsageFlagBits::eTransferDst,
|
vk::BufferUsageFlagBits::eTransferDst,
|
||||||
|
@ -110,12 +110,13 @@ UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params) {
|
||||||
|
|
||||||
vk::BufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
|
vk::BufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
|
||||||
const SurfaceParams& params,
|
const SurfaceParams& params,
|
||||||
vk::Buffer buffer) {
|
vk::Buffer buffer,
|
||||||
|
std::size_t host_memory_size) {
|
||||||
ASSERT(params.IsBuffer());
|
ASSERT(params.IsBuffer());
|
||||||
|
|
||||||
const auto format =
|
const auto format =
|
||||||
MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
|
MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
|
||||||
return vk::BufferViewCreateInfo({}, buffer, format, 0, params.GetHostSizeInBytes());
|
return vk::BufferViewCreateInfo({}, buffer, format, 0, host_memory_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
vk::ImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
|
vk::ImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
|
||||||
|
@ -169,14 +170,15 @@ CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
|
||||||
VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
|
VKResourceManager& resource_manager, VKMemoryManager& memory_manager,
|
||||||
VKScheduler& scheduler, VKStagingBufferPool& staging_pool,
|
VKScheduler& scheduler, VKStagingBufferPool& staging_pool,
|
||||||
GPUVAddr gpu_addr, const SurfaceParams& params)
|
GPUVAddr gpu_addr, const SurfaceParams& params)
|
||||||
: SurfaceBase<View>{gpu_addr, params}, system{system}, device{device},
|
: SurfaceBase<View>{gpu_addr, params, device.IsOptimalAstcSupported()}, system{system},
|
||||||
resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler},
|
device{device}, resource_manager{resource_manager},
|
||||||
staging_pool{staging_pool} {
|
memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {
|
||||||
if (params.IsBuffer()) {
|
if (params.IsBuffer()) {
|
||||||
buffer = CreateBuffer(device, params);
|
buffer = CreateBuffer(device, params, host_memory_size);
|
||||||
commit = memory_manager.Commit(*buffer, false);
|
commit = memory_manager.Commit(*buffer, false);
|
||||||
|
|
||||||
const auto buffer_view_ci = GenerateBufferViewCreateInfo(device, params, *buffer);
|
const auto buffer_view_ci =
|
||||||
|
GenerateBufferViewCreateInfo(device, params, *buffer, host_memory_size);
|
||||||
format = buffer_view_ci.format;
|
format = buffer_view_ci.format;
|
||||||
|
|
||||||
const auto dev = device.GetLogical();
|
const auto dev = device.GetLogical();
|
||||||
|
@ -255,7 +257,7 @@ void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) {
|
||||||
std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
|
std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
|
||||||
|
|
||||||
scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer,
|
scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer,
|
||||||
size = params.GetHostSizeInBytes()](auto cmdbuf, auto& dld) {
|
size = host_memory_size](auto cmdbuf, auto& dld) {
|
||||||
const vk::BufferCopy copy(0, 0, size);
|
const vk::BufferCopy copy(0, 0, size);
|
||||||
cmdbuf.copyBuffer(src_buffer, dst_buffer, {copy}, dld);
|
cmdbuf.copyBuffer(src_buffer, dst_buffer, {copy}, dld);
|
||||||
|
|
||||||
|
@ -299,10 +301,7 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) {
|
||||||
|
|
||||||
vk::BufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
|
vk::BufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
|
||||||
const u32 vk_depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
|
const u32 vk_depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
|
||||||
const auto compression_type = params.GetCompressionType();
|
const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted);
|
||||||
const std::size_t mip_offset = compression_type == SurfaceCompression::Converted
|
|
||||||
? params.GetConvertedMipmapOffset(level)
|
|
||||||
: params.GetHostMipmapLevelOffset(level);
|
|
||||||
|
|
||||||
return vk::BufferImageCopy(
|
return vk::BufferImageCopy(
|
||||||
mip_offset, 0, 0,
|
mip_offset, 0, 0,
|
||||||
|
@ -390,8 +389,9 @@ VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterf
|
||||||
const VKDevice& device, VKResourceManager& resource_manager,
|
const VKDevice& device, VKResourceManager& resource_manager,
|
||||||
VKMemoryManager& memory_manager, VKScheduler& scheduler,
|
VKMemoryManager& memory_manager, VKScheduler& scheduler,
|
||||||
VKStagingBufferPool& staging_pool)
|
VKStagingBufferPool& staging_pool)
|
||||||
: TextureCache(system, rasterizer), device{device}, resource_manager{resource_manager},
|
: TextureCache(system, rasterizer, device.IsOptimalAstcSupported()), device{device},
|
||||||
memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {}
|
resource_manager{resource_manager}, memory_manager{memory_manager}, scheduler{scheduler},
|
||||||
|
staging_pool{staging_pool} {}
|
||||||
|
|
||||||
VKTextureCache::~VKTextureCache() = default;
|
VKTextureCache::~VKTextureCache() = default;
|
||||||
|
|
||||||
|
|
|
@ -504,103 +504,6 @@ static constexpr u32 GetBytesPerPixel(PixelFormat pixel_format) {
|
||||||
return GetFormatBpp(pixel_format) / CHAR_BIT;
|
return GetFormatBpp(pixel_format) / CHAR_BIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum class SurfaceCompression {
|
|
||||||
None, // Not compressed
|
|
||||||
Compressed, // Texture is compressed
|
|
||||||
Converted, // Texture is converted before upload or after download
|
|
||||||
Rearranged, // Texture is swizzled before upload or after download
|
|
||||||
};
|
|
||||||
|
|
||||||
constexpr std::array<SurfaceCompression, MaxPixelFormat> compression_type_table = {{
|
|
||||||
SurfaceCompression::None, // ABGR8U
|
|
||||||
SurfaceCompression::None, // ABGR8S
|
|
||||||
SurfaceCompression::None, // ABGR8UI
|
|
||||||
SurfaceCompression::None, // B5G6R5U
|
|
||||||
SurfaceCompression::None, // A2B10G10R10U
|
|
||||||
SurfaceCompression::None, // A1B5G5R5U
|
|
||||||
SurfaceCompression::None, // R8U
|
|
||||||
SurfaceCompression::None, // R8UI
|
|
||||||
SurfaceCompression::None, // RGBA16F
|
|
||||||
SurfaceCompression::None, // RGBA16U
|
|
||||||
SurfaceCompression::None, // RGBA16S
|
|
||||||
SurfaceCompression::None, // RGBA16UI
|
|
||||||
SurfaceCompression::None, // R11FG11FB10F
|
|
||||||
SurfaceCompression::None, // RGBA32UI
|
|
||||||
SurfaceCompression::Compressed, // DXT1
|
|
||||||
SurfaceCompression::Compressed, // DXT23
|
|
||||||
SurfaceCompression::Compressed, // DXT45
|
|
||||||
SurfaceCompression::Compressed, // DXN1
|
|
||||||
SurfaceCompression::Compressed, // DXN2UNORM
|
|
||||||
SurfaceCompression::Compressed, // DXN2SNORM
|
|
||||||
SurfaceCompression::Compressed, // BC7U
|
|
||||||
SurfaceCompression::Compressed, // BC6H_UF16
|
|
||||||
SurfaceCompression::Compressed, // BC6H_SF16
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_4X4
|
|
||||||
SurfaceCompression::None, // BGRA8
|
|
||||||
SurfaceCompression::None, // RGBA32F
|
|
||||||
SurfaceCompression::None, // RG32F
|
|
||||||
SurfaceCompression::None, // R32F
|
|
||||||
SurfaceCompression::None, // R16F
|
|
||||||
SurfaceCompression::None, // R16U
|
|
||||||
SurfaceCompression::None, // R16S
|
|
||||||
SurfaceCompression::None, // R16UI
|
|
||||||
SurfaceCompression::None, // R16I
|
|
||||||
SurfaceCompression::None, // RG16
|
|
||||||
SurfaceCompression::None, // RG16F
|
|
||||||
SurfaceCompression::None, // RG16UI
|
|
||||||
SurfaceCompression::None, // RG16I
|
|
||||||
SurfaceCompression::None, // RG16S
|
|
||||||
SurfaceCompression::None, // RGB32F
|
|
||||||
SurfaceCompression::None, // RGBA8_SRGB
|
|
||||||
SurfaceCompression::None, // RG8U
|
|
||||||
SurfaceCompression::None, // RG8S
|
|
||||||
SurfaceCompression::None, // RG32UI
|
|
||||||
SurfaceCompression::None, // RGBX16F
|
|
||||||
SurfaceCompression::None, // R32UI
|
|
||||||
SurfaceCompression::None, // R32I
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_8X8
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_8X5
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_5X4
|
|
||||||
SurfaceCompression::None, // BGRA8_SRGB
|
|
||||||
SurfaceCompression::Compressed, // DXT1_SRGB
|
|
||||||
SurfaceCompression::Compressed, // DXT23_SRGB
|
|
||||||
SurfaceCompression::Compressed, // DXT45_SRGB
|
|
||||||
SurfaceCompression::Compressed, // BC7U_SRGB
|
|
||||||
SurfaceCompression::None, // R4G4B4A4U
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_4X4_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_8X8_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_8X5_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_5X4_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_5X5
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_5X5_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_10X8
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_10X8_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_6X6
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_6X6_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_10X10
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_10X10_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_12X12
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_12X12_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_8X6
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_8X6_SRGB
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_6X5
|
|
||||||
SurfaceCompression::Converted, // ASTC_2D_6X5_SRGB
|
|
||||||
SurfaceCompression::None, // E5B9G9R9F
|
|
||||||
SurfaceCompression::None, // Z32F
|
|
||||||
SurfaceCompression::None, // Z16
|
|
||||||
SurfaceCompression::None, // Z24S8
|
|
||||||
SurfaceCompression::Rearranged, // S8Z24
|
|
||||||
SurfaceCompression::None, // Z32FS8
|
|
||||||
}};
|
|
||||||
|
|
||||||
constexpr SurfaceCompression GetFormatCompressionType(PixelFormat format) {
|
|
||||||
if (format == PixelFormat::Invalid) {
|
|
||||||
return SurfaceCompression::None;
|
|
||||||
}
|
|
||||||
DEBUG_ASSERT(static_cast<std::size_t>(format) < compression_type_table.size());
|
|
||||||
return compression_type_table[static_cast<std::size_t>(format)];
|
|
||||||
}
|
|
||||||
|
|
||||||
SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_type);
|
SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_type);
|
||||||
|
|
||||||
bool SurfaceTargetIsLayered(SurfaceTarget target);
|
bool SurfaceTargetIsLayered(SurfaceTarget target);
|
||||||
|
|
|
@ -18,15 +18,20 @@ MICROPROFILE_DEFINE(GPU_Flush_Texture, "GPU", "Texture Flush", MP_RGB(128, 192,
|
||||||
|
|
||||||
using Tegra::Texture::ConvertFromGuestToHost;
|
using Tegra::Texture::ConvertFromGuestToHost;
|
||||||
using VideoCore::MortonSwizzleMode;
|
using VideoCore::MortonSwizzleMode;
|
||||||
using VideoCore::Surface::SurfaceCompression;
|
using VideoCore::Surface::IsPixelFormatASTC;
|
||||||
|
using VideoCore::Surface::PixelFormat;
|
||||||
|
|
||||||
StagingCache::StagingCache() = default;
|
StagingCache::StagingCache() = default;
|
||||||
|
|
||||||
StagingCache::~StagingCache() = default;
|
StagingCache::~StagingCache() = default;
|
||||||
|
|
||||||
SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params)
|
SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params,
|
||||||
: params{params}, host_memory_size{params.GetHostSizeInBytes()}, gpu_addr{gpu_addr},
|
bool is_astc_supported)
|
||||||
mipmap_sizes(params.num_levels), mipmap_offsets(params.num_levels) {
|
: params{params}, gpu_addr{gpu_addr}, mipmap_sizes(params.num_levels),
|
||||||
|
mipmap_offsets(params.num_levels) {
|
||||||
|
is_converted = IsPixelFormatASTC(params.pixel_format) && !is_astc_supported;
|
||||||
|
host_memory_size = params.GetHostSizeInBytes(is_converted);
|
||||||
|
|
||||||
std::size_t offset = 0;
|
std::size_t offset = 0;
|
||||||
for (u32 level = 0; level < params.num_levels; ++level) {
|
for (u32 level = 0; level < params.num_levels; ++level) {
|
||||||
const std::size_t mipmap_size{params.GetGuestMipmapSize(level)};
|
const std::size_t mipmap_size{params.GetGuestMipmapSize(level)};
|
||||||
|
@ -164,7 +169,7 @@ void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const Surf
|
||||||
|
|
||||||
std::size_t guest_offset{mipmap_offsets[level]};
|
std::size_t guest_offset{mipmap_offsets[level]};
|
||||||
if (params.is_layered) {
|
if (params.is_layered) {
|
||||||
std::size_t host_offset{0};
|
std::size_t host_offset = 0;
|
||||||
const std::size_t guest_stride = layer_size;
|
const std::size_t guest_stride = layer_size;
|
||||||
const std::size_t host_stride = params.GetHostLayerSize(level);
|
const std::size_t host_stride = params.GetHostLayerSize(level);
|
||||||
for (u32 layer = 0; layer < params.depth; ++layer) {
|
for (u32 layer = 0; layer < params.depth; ++layer) {
|
||||||
|
@ -206,7 +211,7 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}",
|
ASSERT_MSG(params.block_width == 0, "Block width is defined as {} on texture target {}",
|
||||||
params.block_width, static_cast<u32>(params.target));
|
params.block_width, static_cast<u32>(params.target));
|
||||||
for (u32 level = 0; level < params.num_levels; ++level) {
|
for (u32 level = 0; level < params.num_levels; ++level) {
|
||||||
const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
|
const std::size_t host_offset{params.GetHostMipmapLevelOffset(level, false)};
|
||||||
SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params,
|
SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params,
|
||||||
staging_buffer.data() + host_offset, level);
|
staging_buffer.data() + host_offset, level);
|
||||||
}
|
}
|
||||||
|
@ -219,7 +224,7 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
const u32 height{(params.height + block_height - 1) / block_height};
|
const u32 height{(params.height + block_height - 1) / block_height};
|
||||||
const u32 copy_size{width * bpp};
|
const u32 copy_size{width * bpp};
|
||||||
if (params.pitch == copy_size) {
|
if (params.pitch == copy_size) {
|
||||||
std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes());
|
std::memcpy(staging_buffer.data(), host_ptr, params.GetHostSizeInBytes(false));
|
||||||
} else {
|
} else {
|
||||||
const u8* start{host_ptr};
|
const u8* start{host_ptr};
|
||||||
u8* write_to{staging_buffer.data()};
|
u8* write_to{staging_buffer.data()};
|
||||||
|
@ -231,19 +236,15 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto compression_type = params.GetCompressionType();
|
if (!is_converted && params.pixel_format != PixelFormat::S8Z24) {
|
||||||
if (compression_type == SurfaceCompression::None ||
|
|
||||||
compression_type == SurfaceCompression::Compressed)
|
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (u32 level_up = params.num_levels; level_up > 0; --level_up) {
|
for (u32 level = params.num_levels; level--;) {
|
||||||
const u32 level = level_up - 1;
|
const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level, false)};
|
||||||
const std::size_t in_host_offset{params.GetHostMipmapLevelOffset(level)};
|
const std::size_t out_host_offset{params.GetHostMipmapLevelOffset(level, is_converted)};
|
||||||
const std::size_t out_host_offset = compression_type == SurfaceCompression::Rearranged
|
u8* const in_buffer = staging_buffer.data() + in_host_offset;
|
||||||
? in_host_offset
|
u8* const out_buffer = staging_buffer.data() + out_host_offset;
|
||||||
: params.GetConvertedMipmapOffset(level);
|
|
||||||
u8* in_buffer = staging_buffer.data() + in_host_offset;
|
|
||||||
u8* out_buffer = staging_buffer.data() + out_host_offset;
|
|
||||||
ConvertFromGuestToHost(in_buffer, out_buffer, params.pixel_format,
|
ConvertFromGuestToHost(in_buffer, out_buffer, params.pixel_format,
|
||||||
params.GetMipWidth(level), params.GetMipHeight(level),
|
params.GetMipWidth(level), params.GetMipHeight(level),
|
||||||
params.GetMipDepth(level), true, true);
|
params.GetMipDepth(level), true, true);
|
||||||
|
@ -273,7 +274,7 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
if (params.is_tiled) {
|
if (params.is_tiled) {
|
||||||
ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
|
ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
|
||||||
for (u32 level = 0; level < params.num_levels; ++level) {
|
for (u32 level = 0; level < params.num_levels; ++level) {
|
||||||
const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
|
const std::size_t host_offset{params.GetHostMipmapLevelOffset(level, false)};
|
||||||
SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
|
SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
|
||||||
staging_buffer.data() + host_offset, level);
|
staging_buffer.data() + host_offset, level);
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,6 +131,10 @@ public:
|
||||||
return !params.is_tiled;
|
return !params.is_tiled;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool IsConverted() const {
|
||||||
|
return is_converted;
|
||||||
|
}
|
||||||
|
|
||||||
bool MatchFormat(VideoCore::Surface::PixelFormat pixel_format) const {
|
bool MatchFormat(VideoCore::Surface::PixelFormat pixel_format) const {
|
||||||
return params.pixel_format == pixel_format;
|
return params.pixel_format == pixel_format;
|
||||||
}
|
}
|
||||||
|
@ -160,7 +164,8 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params);
|
explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params,
|
||||||
|
bool is_astc_supported);
|
||||||
~SurfaceBaseImpl() = default;
|
~SurfaceBaseImpl() = default;
|
||||||
|
|
||||||
virtual void DecorateSurfaceName() = 0;
|
virtual void DecorateSurfaceName() = 0;
|
||||||
|
@ -168,12 +173,13 @@ protected:
|
||||||
const SurfaceParams params;
|
const SurfaceParams params;
|
||||||
std::size_t layer_size;
|
std::size_t layer_size;
|
||||||
std::size_t guest_memory_size;
|
std::size_t guest_memory_size;
|
||||||
const std::size_t host_memory_size;
|
std::size_t host_memory_size;
|
||||||
GPUVAddr gpu_addr{};
|
GPUVAddr gpu_addr{};
|
||||||
CacheAddr cache_addr{};
|
CacheAddr cache_addr{};
|
||||||
CacheAddr cache_addr_end{};
|
CacheAddr cache_addr_end{};
|
||||||
VAddr cpu_addr{};
|
VAddr cpu_addr{};
|
||||||
bool is_continuous{};
|
bool is_continuous{};
|
||||||
|
bool is_converted{};
|
||||||
|
|
||||||
std::vector<std::size_t> mipmap_sizes;
|
std::vector<std::size_t> mipmap_sizes;
|
||||||
std::vector<std::size_t> mipmap_offsets;
|
std::vector<std::size_t> mipmap_offsets;
|
||||||
|
@ -288,8 +294,9 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params)
|
explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params,
|
||||||
: SurfaceBaseImpl(gpu_addr, params) {}
|
bool is_astc_supported)
|
||||||
|
: SurfaceBaseImpl(gpu_addr, params, is_astc_supported) {}
|
||||||
|
|
||||||
~SurfaceBase() = default;
|
~SurfaceBase() = default;
|
||||||
|
|
||||||
|
|
|
@ -309,28 +309,26 @@ std::size_t SurfaceParams::GetGuestMipmapLevelOffset(u32 level) const {
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level) const {
|
std::size_t SurfaceParams::GetHostMipmapLevelOffset(u32 level, bool is_converted) const {
|
||||||
std::size_t offset = 0;
|
std::size_t offset = 0;
|
||||||
for (u32 i = 0; i < level; i++) {
|
if (is_converted) {
|
||||||
|
for (u32 i = 0; i < level; ++i) {
|
||||||
|
offset += GetConvertedMipmapSize(i) * GetNumLayers();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (u32 i = 0; i < level; ++i) {
|
||||||
offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers();
|
offset += GetInnerMipmapMemorySize(i, true, false) * GetNumLayers();
|
||||||
}
|
}
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::size_t SurfaceParams::GetConvertedMipmapOffset(u32 level) const {
|
|
||||||
std::size_t offset = 0;
|
|
||||||
for (u32 i = 0; i < level; i++) {
|
|
||||||
offset += GetConvertedMipmapSize(i);
|
|
||||||
}
|
}
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t SurfaceParams::GetConvertedMipmapSize(u32 level) const {
|
std::size_t SurfaceParams::GetConvertedMipmapSize(u32 level) const {
|
||||||
constexpr std::size_t rgba8_bpp = 4ULL;
|
constexpr std::size_t rgba8_bpp = 4ULL;
|
||||||
const std::size_t width_t = GetMipWidth(level);
|
const std::size_t mip_width = GetMipWidth(level);
|
||||||
const std::size_t height_t = GetMipHeight(level);
|
const std::size_t mip_height = GetMipHeight(level);
|
||||||
const std::size_t depth_t = is_layered ? depth : GetMipDepth(level);
|
const std::size_t mip_depth = is_layered ? 1 : GetMipDepth(level);
|
||||||
return width_t * height_t * depth_t * rgba8_bpp;
|
return mip_width * mip_height * mip_depth * rgba8_bpp;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) const {
|
std::size_t SurfaceParams::GetLayerSize(bool as_host_size, bool uncompressed) const {
|
||||||
|
|
|
@ -20,8 +20,6 @@ namespace VideoCommon {
|
||||||
|
|
||||||
class FormatLookupTable;
|
class FormatLookupTable;
|
||||||
|
|
||||||
using VideoCore::Surface::SurfaceCompression;
|
|
||||||
|
|
||||||
class SurfaceParams {
|
class SurfaceParams {
|
||||||
public:
|
public:
|
||||||
/// Creates SurfaceCachedParams from a texture configuration.
|
/// Creates SurfaceCachedParams from a texture configuration.
|
||||||
|
@ -67,16 +65,14 @@ public:
|
||||||
return GetInnerMemorySize(false, false, false);
|
return GetInnerMemorySize(false, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t GetHostSizeInBytes() const {
|
std::size_t GetHostSizeInBytes(bool is_converted) const {
|
||||||
std::size_t host_size_in_bytes;
|
if (!is_converted) {
|
||||||
if (GetCompressionType() == SurfaceCompression::Converted) {
|
return GetInnerMemorySize(true, false, false);
|
||||||
// ASTC is uncompressed in software, in emulated as RGBA8
|
|
||||||
host_size_in_bytes = 0;
|
|
||||||
for (u32 level = 0; level < num_levels; ++level) {
|
|
||||||
host_size_in_bytes += GetConvertedMipmapSize(level);
|
|
||||||
}
|
}
|
||||||
} else {
|
// ASTC is uncompressed in software, in emulated as RGBA8
|
||||||
host_size_in_bytes = GetInnerMemorySize(true, false, false);
|
std::size_t host_size_in_bytes = 0;
|
||||||
|
for (u32 level = 0; level < num_levels; ++level) {
|
||||||
|
host_size_in_bytes += GetConvertedMipmapSize(level) * GetNumLayers();
|
||||||
}
|
}
|
||||||
return host_size_in_bytes;
|
return host_size_in_bytes;
|
||||||
}
|
}
|
||||||
|
@ -107,9 +103,8 @@ public:
|
||||||
u32 GetMipBlockDepth(u32 level) const;
|
u32 GetMipBlockDepth(u32 level) const;
|
||||||
|
|
||||||
/// Returns the best possible row/pitch alignment for the surface.
|
/// Returns the best possible row/pitch alignment for the surface.
|
||||||
u32 GetRowAlignment(u32 level) const {
|
u32 GetRowAlignment(u32 level, bool is_converted) const {
|
||||||
const u32 bpp =
|
const u32 bpp = is_converted ? 4 : GetBytesPerPixel();
|
||||||
GetCompressionType() == SurfaceCompression::Converted ? 4 : GetBytesPerPixel();
|
|
||||||
return 1U << Common::CountTrailingZeroes32(GetMipWidth(level) * bpp);
|
return 1U << Common::CountTrailingZeroes32(GetMipWidth(level) * bpp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,11 +112,7 @@ public:
|
||||||
std::size_t GetGuestMipmapLevelOffset(u32 level) const;
|
std::size_t GetGuestMipmapLevelOffset(u32 level) const;
|
||||||
|
|
||||||
/// Returns the offset in bytes in host memory (linear) of a given mipmap level.
|
/// Returns the offset in bytes in host memory (linear) of a given mipmap level.
|
||||||
std::size_t GetHostMipmapLevelOffset(u32 level) const;
|
std::size_t GetHostMipmapLevelOffset(u32 level, bool is_converted) const;
|
||||||
|
|
||||||
/// Returns the offset in bytes in host memory (linear) of a given mipmap level
|
|
||||||
/// for a texture that is converted in host gpu.
|
|
||||||
std::size_t GetConvertedMipmapOffset(u32 level) const;
|
|
||||||
|
|
||||||
/// Returns the size in bytes in guest memory of a given mipmap level.
|
/// Returns the size in bytes in guest memory of a given mipmap level.
|
||||||
std::size_t GetGuestMipmapSize(u32 level) const {
|
std::size_t GetGuestMipmapSize(u32 level) const {
|
||||||
|
@ -196,11 +187,6 @@ public:
|
||||||
pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat;
|
pixel_format < VideoCore::Surface::PixelFormat::MaxDepthStencilFormat;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns how the compression should be handled for this texture.
|
|
||||||
SurfaceCompression GetCompressionType() const {
|
|
||||||
return VideoCore::Surface::GetFormatCompressionType(pixel_format);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns is the surface is a TextureBuffer type of surface.
|
/// Returns is the surface is a TextureBuffer type of surface.
|
||||||
bool IsBuffer() const {
|
bool IsBuffer() const {
|
||||||
return target == VideoCore::Surface::SurfaceTarget::TextureBuffer;
|
return target == VideoCore::Surface::SurfaceTarget::TextureBuffer;
|
||||||
|
|
|
@ -289,8 +289,9 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer)
|
explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
|
||||||
: system{system}, rasterizer{rasterizer} {
|
bool is_astc_supported)
|
||||||
|
: system{system}, is_astc_supported{is_astc_supported}, rasterizer{rasterizer} {
|
||||||
for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
|
for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; i++) {
|
||||||
SetEmptyColorBuffer(i);
|
SetEmptyColorBuffer(i);
|
||||||
}
|
}
|
||||||
|
@ -381,6 +382,7 @@ protected:
|
||||||
}
|
}
|
||||||
|
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
|
const bool is_astc_supported;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum class RecycleStrategy : u32 {
|
enum class RecycleStrategy : u32 {
|
||||||
|
|
Loading…
Reference in a new issue