2019-12-01 02:53:09 +00:00
|
|
|
using Ryujinx.Common;
|
2019-10-13 07:02:07 +01:00
|
|
|
using Ryujinx.Graphics.GAL;
|
2021-03-08 21:43:39 +00:00
|
|
|
using Ryujinx.Graphics.Gpu.Image;
|
2019-10-13 07:02:07 +01:00
|
|
|
using Ryujinx.Graphics.Gpu.State;
|
|
|
|
using Ryujinx.Graphics.Shader;
|
Memory Read/Write Tracking using Region Handles (#1272)
* WIP Range Tracking
- Texture invalidation seems to have large problems
- Buffer/Pool invalidation may have problems
- Mirror memory tracking puts an additional `add` in compiled code, we likely just want to make HLE access slower if this is the final solution.
- Native project is in the messiest possible location.
- [HACK] JIT memory access always uses native "fast" path
- [HACK] Trying some things with texture invalidation and views.
It works :)
Still a few hacks, messy things, slow things
More work in progress stuff (also move to memory project)
Quite a bit faster now.
- Unmapping GPU VA and CPU VA will now correctly update write tracking regions, and invalidate textures for the former.
- The Virtual range list is now non-overlapping like the physical one.
- Fixed some bugs where regions could leak.
- Introduced a weird bug that I still need to track down (consistent invalid buffer in MK8 ribbon road)
Move some stuff.
I think we'll eventually just put the dll and so for this in a nuget package.
Fix rebase.
[WIP] MultiRegionHandle variable size ranges
- Avoid reprotecting regions that change often (needs some tweaking)
- There's still a bug in buffers, somehow.
- Might want different api for minimum granularity
Fix rebase issue
Commit everything needed for software only tracking.
Remove native components.
Remove more native stuff.
Cleanup
Use a separate window for the background context, update opentk. (fixes linux)
Some experimental changes
Should get things working up to scratch - still need to try some things with flush/modification and res scale.
Include address with the region action.
Initial work to make range tracking work
Still a ton of bugs
Fix some issues with the new stuff.
* Fix texture flush instability
There's still some weird behaviour, but it's much improved without this. (textures with cpu modified data were flushing over it)
* Find the destination texture for Buffer->Texture full copy
Greatly improves performance for nvdec videos (with range tracking)
* Further improve texture tracking
* Disable Memory Tracking for view parents
This is a temporary approach to better match behaviour on master (where invalidations would be soaked up by views, rather than trigger twice)
The assumption is that when views are created to a texture, they will cover all of its data anyways. Of course, this can easily be improved in future.
* Introduce some tracking tests.
WIP
* Complete base tests.
* Add more tests for multiregion, fix existing test.
* Cleanup Part 1
* Remove unnecessary code from memory tracking
* Fix some inconsistencies with 3D texture rule.
* Add dispose tests.
* Use a background thread for the background context.
Rather than setting and unsetting a context as current, doing the work on a dedicated thread with signals seems to be a bit faster.
Also nerf the multithreading test a bit.
* Copy to texture with matching alignment
This extends the copy to work for some videos with unusual size, such as tutorial videos in SMO. It will only occur if the destination texture already exists at XCount size.
* Track reads for buffer copies. Synchronize new buffers before copying overlaps.
* Remove old texture flushing mechanisms.
Range tracking all the way, baby.
* Wake the background thread when disposing.
Avoids a deadlock when games are closed.
* Address Feedback 1
* Separate TextureCopy instance for background thread
Also `BackgroundContextWorker.InBackground` for a more sensible idenfifier for if we're in a background thread.
* Add missing XML docs.
* Address Feedback
* Maybe I should start drinking coffee.
* Some more feedback.
* Remove flush warning, Refocus window after making background context
2020-10-16 21:18:35 +01:00
|
|
|
using Ryujinx.Memory.Range;
|
2019-10-13 07:02:07 +01:00
|
|
|
using System;
|
2021-03-08 21:43:39 +00:00
|
|
|
using System.Collections.Generic;
|
2020-11-08 11:10:00 +00:00
|
|
|
using System.Collections.ObjectModel;
|
|
|
|
using System.Linq;
|
2019-10-13 07:02:07 +01:00
|
|
|
|
|
|
|
namespace Ryujinx.Graphics.Gpu.Memory
|
|
|
|
{
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Buffer manager.
|
|
|
|
/// </summary>
|
2019-10-13 07:02:07 +01:00
|
|
|
class BufferManager
|
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
private const int StackToHeapThreshold = 16;
|
|
|
|
|
2019-11-25 00:29:37 +00:00
|
|
|
private const int OverlapsBufferInitialCapacity = 10;
|
|
|
|
private const int OverlapsBufferMaxCapacity = 10000;
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
private const ulong BufferAlignmentSize = 0x1000;
|
|
|
|
private const ulong BufferAlignmentMask = BufferAlignmentSize - 1;
|
|
|
|
|
|
|
|
private GpuContext _context;
|
|
|
|
|
|
|
|
private RangeList<Buffer> _buffers;
|
|
|
|
|
2019-11-25 00:29:37 +00:00
|
|
|
private Buffer[] _bufferOverlaps;
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
private IndexBuffer _indexBuffer;
|
|
|
|
private VertexBuffer[] _vertexBuffers;
|
2020-07-15 04:01:10 +01:00
|
|
|
private BufferBounds[] _transformFeedbackBuffers;
|
2021-03-08 21:43:39 +00:00
|
|
|
private List<BufferTextureBinding> _bufferTextures;
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Holds shader stage buffer state and binding information.
|
|
|
|
/// </summary>
|
2019-10-13 07:02:07 +01:00
|
|
|
private class BuffersPerStage
|
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Shader buffer binding information.
|
|
|
|
/// </summary>
|
|
|
|
public BufferDescriptor[] Bindings { get; }
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Buffer regions.
|
|
|
|
/// </summary>
|
2019-10-13 07:02:07 +01:00
|
|
|
public BufferBounds[] Buffers { get; }
|
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Total amount of buffers used on the shader.
|
|
|
|
/// </summary>
|
|
|
|
public int Count { get; private set; }
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Creates a new instance of the shader stage buffer information.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="count">Maximum amount of buffers that the shader stage can use</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
public BuffersPerStage(int count)
|
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
Bindings = new BufferDescriptor[count];
|
2019-10-13 07:02:07 +01:00
|
|
|
Buffers = new BufferBounds[count];
|
|
|
|
}
|
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Sets the region of a buffer at a given slot.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="index">Buffer slot</param>
|
|
|
|
/// <param name="address">Region virtual address</param>
|
|
|
|
/// <param name="size">Region size in bytes</param>
|
2021-01-17 20:08:06 +00:00
|
|
|
/// <param name="flags">Buffer usage flags</param>
|
|
|
|
public void SetBounds(int index, ulong address, ulong size, BufferUsageFlags flags = BufferUsageFlags.None)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2021-01-17 20:08:06 +00:00
|
|
|
Buffers[index] = new BufferBounds(address, size, flags);
|
2020-11-08 11:10:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Sets shader buffer binding information.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="descriptors">Buffer binding information</param>
|
|
|
|
public void SetBindings(ReadOnlyCollection<BufferDescriptor> descriptors)
|
|
|
|
{
|
|
|
|
if (descriptors == null)
|
|
|
|
{
|
|
|
|
Count = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
descriptors.CopyTo(Bindings, 0);
|
|
|
|
Count = descriptors.Count;
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private BuffersPerStage _cpStorageBuffers;
|
|
|
|
private BuffersPerStage _cpUniformBuffers;
|
|
|
|
private BuffersPerStage[] _gpStorageBuffers;
|
|
|
|
private BuffersPerStage[] _gpUniformBuffers;
|
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
private int _cpStorageBufferBindings;
|
|
|
|
private int _cpUniformBufferBindings;
|
|
|
|
private int _gpStorageBufferBindings;
|
|
|
|
private int _gpUniformBufferBindings;
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
private bool _gpStorageBuffersDirty;
|
|
|
|
private bool _gpUniformBuffersDirty;
|
|
|
|
|
|
|
|
private bool _indexBufferDirty;
|
|
|
|
private bool _vertexBuffersDirty;
|
2019-11-23 05:17:22 +00:00
|
|
|
private uint _vertexBuffersEnableMask;
|
2020-07-15 04:01:10 +01:00
|
|
|
private bool _transformFeedbackBuffersDirty;
|
2019-10-13 07:02:07 +01:00
|
|
|
|
|
|
|
private bool _rebind;
|
|
|
|
|
POWER - Performance Optimizations With Extensive Ramifications (#2286)
* Refactoring of KMemoryManager class
* Replace some trivial uses of DRAM address with VA
* Get rid of GetDramAddressFromVa
* Abstracting more operations on derived page table class
* Run auto-format on KPageTableBase
* Managed to make TryConvertVaToPa private, few uses remains now
* Implement guest physical pages ref counting, remove manual freeing
* Make DoMmuOperation private and call new abstract methods only from the base class
* Pass pages count rather than size on Map/UnmapMemory
* Change memory managers to take host pointers
* Fix a guest memory leak and simplify KPageTable
* Expose new methods for host range query and mapping
* Some refactoring of MapPagesFromClientProcess to allow proper page ref counting and mapping without KPageLists
* Remove more uses of AddVaRangeToPageList, now only one remains (shared memory page checking)
* Add a SharedMemoryStorage class, will be useful for host mapping
* Sayonara AddVaRangeToPageList, you served us well
* Start to implement host memory mapping (WIP)
* Support memory tracking through host exception handling
* Fix some access violations from HLE service guest memory access and CPU
* Fix memory tracking
* Fix mapping list bugs, including a race and a error adding mapping ranges
* Simple page table for memory tracking
* Simple "volatile" region handle mode
* Update UBOs directly (experimental, rough)
* Fix the overlap check
* Only set non-modified buffers as volatile
* Fix some memory tracking issues
* Fix possible race in MapBufferFromClientProcess (block list updates were not locked)
* Write uniform update to memory immediately, only defer the buffer set.
* Fix some memory tracking issues
* Pass correct pages count on shared memory unmap
* Armeilleure Signal Handler v1 + Unix changes
Unix currently behaves like windows, rather than remapping physical
* Actually check if the host platform is unix
* Fix decommit on linux.
* Implement windows 10 placeholder shared memory, fix a buffer issue.
* Make PTC version something that will never match with master
* Remove testing variable for block count
* Add reference count for memory manager, fix dispose
Can still deadlock with OpenAL
* Add address validation, use page table for mapped check, add docs
Might clean up the page table traversing routines.
* Implement batched mapping/tracking.
* Move documentation, fix tests.
* Cleanup uniform buffer update stuff.
* Remove unnecessary assignment.
* Add unsafe host mapped memory switch
On by default. Would be good to turn this off for untrusted code (homebrew, exefs mods) and give the user the option to turn it on manually, though that requires some UI work.
* Remove C# exception handlers
They have issues due to current .NET limitations, so the meilleure one fully replaces them for now.
* Fix MapPhysicalMemory on the software MemoryManager.
* Null check for GetHostAddress, docs
* Add configuration for setting memory manager mode (not in UI yet)
* Add config to UI
* Fix type mismatch on Unix signal handler code emit
* Fix 6GB DRAM mode.
The size can be greater than `uint.MaxValue` when the DRAM is >4GB.
* Address some feedback.
* More detailed error if backing memory cannot be mapped.
* SetLastError on all OS functions for consistency
* Force pages dirty with UBO update instead of setting them directly.
Seems to be much faster across a few games. Need retesting.
* Rebase, configuration rework, fix mem tracking regression
* Fix race in FreePages
* Set memory managers null after decrementing ref count
* Remove readonly keyword, as this is now modified.
* Use a local variable for the signal handler rather than a register.
* Fix bug with buffer resize, and index/uniform buffer binding.
Should fix flickering in games.
* Add InvalidAccessHandler to MemoryTracking
Doesn't do anything yet
* Call invalid access handler on unmapped read/write.
Same rules as the regular memory manager.
* Make unsafe mapped memory its own MemoryManagerType
* Move FlushUboDirty into UpdateState.
* Buffer dirty cache, rather than ubo cache
Much cleaner, may be reusable for Inline2Memory updates.
* This doesn't return anything anymore.
* Add sigaction remove methods, correct a few function signatures.
* Return empty list of physical regions for size 0.
* Also on AddressSpaceManager
Co-authored-by: gdkchan <gab.dark.100@gmail.com>
2021-05-24 21:52:44 +01:00
|
|
|
private Dictionary<ulong, BufferCacheEntry> _dirtyCache;
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Creates a new instance of the buffer manager.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="context">The GPU context that the buffer manager belongs to</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
public BufferManager(GpuContext context)
|
|
|
|
{
|
|
|
|
_context = context;
|
|
|
|
|
|
|
|
_buffers = new RangeList<Buffer>();
|
|
|
|
|
2019-11-25 00:29:37 +00:00
|
|
|
_bufferOverlaps = new Buffer[OverlapsBufferInitialCapacity];
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
_vertexBuffers = new VertexBuffer[Constants.TotalVertexBuffers];
|
|
|
|
|
2020-07-15 04:01:10 +01:00
|
|
|
_transformFeedbackBuffers = new BufferBounds[Constants.TotalTransformFeedbackBuffers];
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
_cpStorageBuffers = new BuffersPerStage(Constants.TotalCpStorageBuffers);
|
|
|
|
_cpUniformBuffers = new BuffersPerStage(Constants.TotalCpUniformBuffers);
|
|
|
|
|
2020-01-01 15:39:09 +00:00
|
|
|
_gpStorageBuffers = new BuffersPerStage[Constants.ShaderStages];
|
|
|
|
_gpUniformBuffers = new BuffersPerStage[Constants.ShaderStages];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-01-01 15:39:09 +00:00
|
|
|
for (int index = 0; index < Constants.ShaderStages; index++)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
|
|
|
_gpStorageBuffers[index] = new BuffersPerStage(Constants.TotalGpStorageBuffers);
|
|
|
|
_gpUniformBuffers[index] = new BuffersPerStage(Constants.TotalGpUniformBuffers);
|
|
|
|
}
|
2021-03-08 21:43:39 +00:00
|
|
|
|
|
|
|
_bufferTextures = new List<BufferTextureBinding>();
|
POWER - Performance Optimizations With Extensive Ramifications (#2286)
* Refactoring of KMemoryManager class
* Replace some trivial uses of DRAM address with VA
* Get rid of GetDramAddressFromVa
* Abstracting more operations on derived page table class
* Run auto-format on KPageTableBase
* Managed to make TryConvertVaToPa private, few uses remains now
* Implement guest physical pages ref counting, remove manual freeing
* Make DoMmuOperation private and call new abstract methods only from the base class
* Pass pages count rather than size on Map/UnmapMemory
* Change memory managers to take host pointers
* Fix a guest memory leak and simplify KPageTable
* Expose new methods for host range query and mapping
* Some refactoring of MapPagesFromClientProcess to allow proper page ref counting and mapping without KPageLists
* Remove more uses of AddVaRangeToPageList, now only one remains (shared memory page checking)
* Add a SharedMemoryStorage class, will be useful for host mapping
* Sayonara AddVaRangeToPageList, you served us well
* Start to implement host memory mapping (WIP)
* Support memory tracking through host exception handling
* Fix some access violations from HLE service guest memory access and CPU
* Fix memory tracking
* Fix mapping list bugs, including a race and a error adding mapping ranges
* Simple page table for memory tracking
* Simple "volatile" region handle mode
* Update UBOs directly (experimental, rough)
* Fix the overlap check
* Only set non-modified buffers as volatile
* Fix some memory tracking issues
* Fix possible race in MapBufferFromClientProcess (block list updates were not locked)
* Write uniform update to memory immediately, only defer the buffer set.
* Fix some memory tracking issues
* Pass correct pages count on shared memory unmap
* Armeilleure Signal Handler v1 + Unix changes
Unix currently behaves like windows, rather than remapping physical
* Actually check if the host platform is unix
* Fix decommit on linux.
* Implement windows 10 placeholder shared memory, fix a buffer issue.
* Make PTC version something that will never match with master
* Remove testing variable for block count
* Add reference count for memory manager, fix dispose
Can still deadlock with OpenAL
* Add address validation, use page table for mapped check, add docs
Might clean up the page table traversing routines.
* Implement batched mapping/tracking.
* Move documentation, fix tests.
* Cleanup uniform buffer update stuff.
* Remove unnecessary assignment.
* Add unsafe host mapped memory switch
On by default. Would be good to turn this off for untrusted code (homebrew, exefs mods) and give the user the option to turn it on manually, though that requires some UI work.
* Remove C# exception handlers
They have issues due to current .NET limitations, so the meilleure one fully replaces them for now.
* Fix MapPhysicalMemory on the software MemoryManager.
* Null check for GetHostAddress, docs
* Add configuration for setting memory manager mode (not in UI yet)
* Add config to UI
* Fix type mismatch on Unix signal handler code emit
* Fix 6GB DRAM mode.
The size can be greater than `uint.MaxValue` when the DRAM is >4GB.
* Address some feedback.
* More detailed error if backing memory cannot be mapped.
* SetLastError on all OS functions for consistency
* Force pages dirty with UBO update instead of setting them directly.
Seems to be much faster across a few games. Need retesting.
* Rebase, configuration rework, fix mem tracking regression
* Fix race in FreePages
* Set memory managers null after decrementing ref count
* Remove readonly keyword, as this is now modified.
* Use a local variable for the signal handler rather than a register.
* Fix bug with buffer resize, and index/uniform buffer binding.
Should fix flickering in games.
* Add InvalidAccessHandler to MemoryTracking
Doesn't do anything yet
* Call invalid access handler on unmapped read/write.
Same rules as the regular memory manager.
* Make unsafe mapped memory its own MemoryManagerType
* Move FlushUboDirty into UpdateState.
* Buffer dirty cache, rather than ubo cache
Much cleaner, may be reusable for Inline2Memory updates.
* This doesn't return anything anymore.
* Add sigaction remove methods, correct a few function signatures.
* Return empty list of physical regions for size 0.
* Also on AddressSpaceManager
Co-authored-by: gdkchan <gab.dark.100@gmail.com>
2021-05-24 21:52:44 +01:00
|
|
|
|
|
|
|
_dirtyCache = new Dictionary<ulong, BufferCacheEntry>();
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Sets the memory range with the index buffer data, to be used for subsequent draw calls.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="gpuVa">Start GPU virtual address of the index buffer</param>
|
|
|
|
/// <param name="size">Size, in bytes, of the index buffer</param>
|
|
|
|
/// <param name="type">Type of each index buffer element</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
public void SetIndexBuffer(ulong gpuVa, ulong size, IndexType type)
|
|
|
|
{
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa, size);
|
|
|
|
|
|
|
|
_indexBuffer.Address = address;
|
|
|
|
_indexBuffer.Size = size;
|
|
|
|
_indexBuffer.Type = type;
|
|
|
|
|
|
|
|
_indexBufferDirty = true;
|
|
|
|
}
|
|
|
|
|
2020-07-03 23:41:27 +01:00
|
|
|
/// <summary>
|
|
|
|
/// Sets a new index buffer that overrides the one set on the call to <see cref="CommitGraphicsBindings"/>.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="buffer">Buffer to be used as index buffer</param>
|
|
|
|
/// <param name="type">Type of each index buffer element</param>
|
|
|
|
public void SetIndexBuffer(BufferRange buffer, IndexType type)
|
|
|
|
{
|
|
|
|
_context.Renderer.Pipeline.SetIndexBuffer(buffer, type);
|
|
|
|
|
|
|
|
_indexBufferDirty = true;
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Sets the memory range with vertex buffer data, to be used for subsequent draw calls.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="index">Index of the vertex buffer (up to 16)</param>
|
|
|
|
/// <param name="gpuVa">GPU virtual address of the buffer</param>
|
|
|
|
/// <param name="size">Size in bytes of the buffer</param>
|
|
|
|
/// <param name="stride">Stride of the buffer, defined as the number of bytes of each vertex</param>
|
|
|
|
/// <param name="divisor">Vertex divisor of the buffer, for instanced draws</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
public void SetVertexBuffer(int index, ulong gpuVa, ulong size, int stride, int divisor)
|
|
|
|
{
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa, size);
|
|
|
|
|
|
|
|
_vertexBuffers[index].Address = address;
|
|
|
|
_vertexBuffers[index].Size = size;
|
|
|
|
_vertexBuffers[index].Stride = stride;
|
|
|
|
_vertexBuffers[index].Divisor = divisor;
|
|
|
|
|
|
|
|
_vertexBuffersDirty = true;
|
2019-11-23 05:17:22 +00:00
|
|
|
|
|
|
|
if (address != 0)
|
|
|
|
{
|
|
|
|
_vertexBuffersEnableMask |= 1u << index;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
_vertexBuffersEnableMask &= ~(1u << index);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2020-10-13 01:40:50 +01:00
|
|
|
/// <summary>
|
|
|
|
/// Sets a transform feedback buffer on the graphics pipeline.
|
|
|
|
/// The output from the vertex transformation stages are written into the feedback buffer.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="index">Index of the transform feedback buffer</param>
|
|
|
|
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
|
|
|
|
/// <param name="size">Size in bytes of the transform feedback buffer</param>
|
2020-07-15 04:01:10 +01:00
|
|
|
public void SetTransformFeedbackBuffer(int index, ulong gpuVa, ulong size)
|
|
|
|
{
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa, size);
|
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
_transformFeedbackBuffers[index] = new BufferBounds(address, size);
|
2020-07-15 04:01:10 +01:00
|
|
|
_transformFeedbackBuffersDirty = true;
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Sets a storage buffer on the compute pipeline.
|
|
|
|
/// Storage buffers can be read and written to on shaders.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="index">Index of the storage buffer</param>
|
|
|
|
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
|
|
|
|
/// <param name="size">Size in bytes of the storage buffer</param>
|
2021-01-17 20:08:06 +00:00
|
|
|
/// <param name="flags">Buffer usage flags</param>
|
|
|
|
public void SetComputeStorageBuffer(int index, ulong gpuVa, ulong size, BufferUsageFlags flags)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2019-12-01 02:53:09 +00:00
|
|
|
size += gpuVa & ((ulong)_context.Capabilities.StorageBufferOffsetAlignment - 1);
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2019-12-01 02:53:09 +00:00
|
|
|
gpuVa = BitUtils.AlignDown(gpuVa, _context.Capabilities.StorageBufferOffsetAlignment);
|
2019-10-13 07:02:07 +01:00
|
|
|
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa, size);
|
|
|
|
|
2021-01-17 20:08:06 +00:00
|
|
|
_cpStorageBuffers.SetBounds(index, address, size, flags);
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Sets a storage buffer on the graphics pipeline.
|
|
|
|
/// Storage buffers can be read and written to on shaders.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="stage">Index of the shader stage</param>
|
|
|
|
/// <param name="index">Index of the storage buffer</param>
|
|
|
|
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
|
|
|
|
/// <param name="size">Size in bytes of the storage buffer</param>
|
2021-01-17 20:08:06 +00:00
|
|
|
/// <param name="flags">Buffer usage flags</param>
|
|
|
|
public void SetGraphicsStorageBuffer(int stage, int index, ulong gpuVa, ulong size, BufferUsageFlags flags)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2019-12-01 02:53:09 +00:00
|
|
|
size += gpuVa & ((ulong)_context.Capabilities.StorageBufferOffsetAlignment - 1);
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2019-12-01 02:53:09 +00:00
|
|
|
gpuVa = BitUtils.AlignDown(gpuVa, _context.Capabilities.StorageBufferOffsetAlignment);
|
2019-10-13 07:02:07 +01:00
|
|
|
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa, size);
|
|
|
|
|
2019-10-26 18:50:52 +01:00
|
|
|
if (_gpStorageBuffers[stage].Buffers[index].Address != address ||
|
|
|
|
_gpStorageBuffers[stage].Buffers[index].Size != size)
|
|
|
|
{
|
|
|
|
_gpStorageBuffersDirty = true;
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2021-01-17 20:08:06 +00:00
|
|
|
_gpStorageBuffers[stage].SetBounds(index, address, size, flags);
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Sets a uniform buffer on the compute pipeline.
|
|
|
|
/// Uniform buffers are read-only from shaders, and have a small capacity.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="index">Index of the uniform buffer</param>
|
|
|
|
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
|
|
|
|
/// <param name="size">Size in bytes of the storage buffer</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
public void SetComputeUniformBuffer(int index, ulong gpuVa, ulong size)
|
|
|
|
{
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa, size);
|
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
_cpUniformBuffers.SetBounds(index, address, size);
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Sets a uniform buffer on the graphics pipeline.
|
|
|
|
/// Uniform buffers are read-only from shaders, and have a small capacity.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="stage">Index of the shader stage</param>
|
|
|
|
/// <param name="index">Index of the uniform buffer</param>
|
|
|
|
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
|
|
|
|
/// <param name="size">Size in bytes of the storage buffer</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
public void SetGraphicsUniformBuffer(int stage, int index, ulong gpuVa, ulong size)
|
|
|
|
{
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa, size);
|
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
_gpUniformBuffers[stage].SetBounds(index, address, size);
|
2019-10-13 07:02:07 +01:00
|
|
|
_gpUniformBuffersDirty = true;
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
2020-11-08 11:10:00 +00:00
|
|
|
/// Sets the binding points for the storage buffers bound on the compute pipeline.
|
2019-12-31 03:22:58 +00:00
|
|
|
/// </summary>
|
2020-11-08 11:10:00 +00:00
|
|
|
/// <param name="descriptors">Buffer descriptors with the binding point values</param>
|
|
|
|
public void SetComputeStorageBufferBindings(ReadOnlyCollection<BufferDescriptor> descriptors)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
_cpStorageBuffers.SetBindings(descriptors);
|
|
|
|
_cpStorageBufferBindings = descriptors.Count != 0 ? descriptors.Max(x => x.Binding) + 1 : 0;
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
2020-11-08 11:10:00 +00:00
|
|
|
/// Sets the binding points for the storage buffers bound on the graphics pipeline.
|
2019-12-31 03:22:58 +00:00
|
|
|
/// </summary>
|
|
|
|
/// <param name="stage">Index of the shader stage</param>
|
2020-11-08 11:10:00 +00:00
|
|
|
/// <param name="descriptors">Buffer descriptors with the binding point values</param>
|
|
|
|
public void SetGraphicsStorageBufferBindings(int stage, ReadOnlyCollection<BufferDescriptor> descriptors)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
_gpStorageBuffers[stage].SetBindings(descriptors);
|
2019-10-13 07:02:07 +01:00
|
|
|
_gpStorageBuffersDirty = true;
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
2020-11-08 11:10:00 +00:00
|
|
|
/// Sets the total number of storage buffer bindings used.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="count">Number of storage buffer bindings used</param>
|
|
|
|
public void SetGraphicsStorageBufferBindingsCount(int count)
|
|
|
|
{
|
|
|
|
_gpStorageBufferBindings = count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Sets the binding points for the uniform buffers bound on the compute pipeline.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="descriptors">Buffer descriptors with the binding point values</param>
|
|
|
|
public void SetComputeUniformBufferBindings(ReadOnlyCollection<BufferDescriptor> descriptors)
|
|
|
|
{
|
|
|
|
_cpUniformBuffers.SetBindings(descriptors);
|
|
|
|
_cpUniformBufferBindings = descriptors.Count != 0 ? descriptors.Max(x => x.Binding) + 1 : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Sets the enabled uniform buffers mask on the graphics pipeline.
|
2019-12-31 03:22:58 +00:00
|
|
|
/// Each bit set on the mask indicates that the respective buffer index is enabled.
|
|
|
|
/// </summary>
|
2020-11-08 11:10:00 +00:00
|
|
|
/// <param name="stage">Index of the shader stage</param>
|
|
|
|
/// <param name="descriptors">Buffer descriptors with the binding point values</param>
|
|
|
|
public void SetGraphicsUniformBufferBindings(int stage, ReadOnlyCollection<BufferDescriptor> descriptors)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
_gpUniformBuffers[stage].SetBindings(descriptors);
|
|
|
|
_gpUniformBuffersDirty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Sets the total number of uniform buffer bindings used.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="count">Number of uniform buffer bindings used</param>
|
|
|
|
public void SetGraphicsUniformBufferBindingsCount(int count)
|
|
|
|
{
|
|
|
|
_gpUniformBufferBindings = count;
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2020-10-13 01:40:50 +01:00
|
|
|
/// <summary>
|
|
|
|
/// Gets a bit mask indicating which compute uniform buffers are currently bound.
|
|
|
|
/// </summary>
|
|
|
|
/// <returns>Mask where each bit set indicates a bound constant buffer</returns>
|
|
|
|
public uint GetComputeUniformBufferUseMask()
|
|
|
|
{
|
|
|
|
uint mask = 0;
|
|
|
|
|
|
|
|
for (int i = 0; i < _cpUniformBuffers.Buffers.Length; i++)
|
|
|
|
{
|
|
|
|
if (_cpUniformBuffers.Buffers[i].Address != 0)
|
|
|
|
{
|
|
|
|
mask |= 1u << i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Gets a bit mask indicating which graphics uniform buffers are currently bound.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="stage">Index of the shader stage</param>
|
|
|
|
/// <returns>Mask where each bit set indicates a bound constant buffer</returns>
|
|
|
|
public uint GetGraphicsUniformBufferUseMask(int stage)
|
|
|
|
{
|
|
|
|
uint mask = 0;
|
|
|
|
|
|
|
|
for (int i = 0; i < _gpUniformBuffers[stage].Buffers.Length; i++)
|
|
|
|
{
|
|
|
|
if (_gpUniformBuffers[stage].Buffers[i].Address != 0)
|
|
|
|
{
|
|
|
|
mask |= 1u << i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2021-01-17 20:08:06 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Handles removal of buffers written to a memory region being unmapped.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="sender">Sender object</param>
|
|
|
|
/// <param name="e">Event arguments</param>
|
|
|
|
public void MemoryUnmappedHandler(object sender, UnmapEventArgs e)
|
|
|
|
{
|
|
|
|
Buffer[] overlaps = new Buffer[10];
|
|
|
|
int overlapCount;
|
|
|
|
|
|
|
|
ulong address = _context.MemoryManager.Translate(e.Address);
|
|
|
|
ulong size = e.Size;
|
|
|
|
|
|
|
|
lock (_buffers)
|
|
|
|
{
|
|
|
|
overlapCount = _buffers.FindOverlaps(address, size, ref overlaps);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < overlapCount; i++)
|
|
|
|
{
|
|
|
|
overlaps[i].Unmapped(address, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Performs address translation of the GPU virtual address, and creates a
|
|
|
|
/// new buffer, if needed, for the specified range.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
|
|
|
|
/// <param name="size">Size in bytes of the buffer</param>
|
|
|
|
/// <returns>CPU virtual address of the buffer, after address translation</returns>
|
2019-10-13 07:02:07 +01:00
|
|
|
private ulong TranslateAndCreateBuffer(ulong gpuVa, ulong size)
|
|
|
|
{
|
|
|
|
if (gpuVa == 0)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ulong address = _context.MemoryManager.Translate(gpuVa);
|
|
|
|
|
2020-12-09 22:26:05 +00:00
|
|
|
if (address == MemoryManager.PteUnmapped)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-25 14:02:18 +01:00
|
|
|
CreateBuffer(address, size);
|
|
|
|
|
|
|
|
return address;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Creates a new buffer for the specified range, if it does not yet exist.
|
|
|
|
/// This can be used to ensure the existance of a buffer.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="address">Address of the buffer in memory</param>
|
|
|
|
/// <param name="size">Size of the buffer in bytes</param>
|
|
|
|
public void CreateBuffer(ulong address, ulong size)
|
|
|
|
{
|
2019-10-13 07:02:07 +01:00
|
|
|
ulong endAddress = address + size;
|
|
|
|
|
|
|
|
ulong alignedAddress = address & ~BufferAlignmentMask;
|
|
|
|
|
|
|
|
ulong alignedEndAddress = (endAddress + BufferAlignmentMask) & ~BufferAlignmentMask;
|
|
|
|
|
|
|
|
// The buffer must have the size of at least one page.
|
|
|
|
if (alignedEndAddress == alignedAddress)
|
|
|
|
{
|
|
|
|
alignedEndAddress += BufferAlignmentSize;
|
|
|
|
}
|
|
|
|
|
2020-04-25 14:02:18 +01:00
|
|
|
CreateBufferAligned(alignedAddress, alignedEndAddress - alignedAddress);
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
POWER - Performance Optimizations With Extensive Ramifications (#2286)
* Refactoring of KMemoryManager class
* Replace some trivial uses of DRAM address with VA
* Get rid of GetDramAddressFromVa
* Abstracting more operations on derived page table class
* Run auto-format on KPageTableBase
* Managed to make TryConvertVaToPa private, few uses remains now
* Implement guest physical pages ref counting, remove manual freeing
* Make DoMmuOperation private and call new abstract methods only from the base class
* Pass pages count rather than size on Map/UnmapMemory
* Change memory managers to take host pointers
* Fix a guest memory leak and simplify KPageTable
* Expose new methods for host range query and mapping
* Some refactoring of MapPagesFromClientProcess to allow proper page ref counting and mapping without KPageLists
* Remove more uses of AddVaRangeToPageList, now only one remains (shared memory page checking)
* Add a SharedMemoryStorage class, will be useful for host mapping
* Sayonara AddVaRangeToPageList, you served us well
* Start to implement host memory mapping (WIP)
* Support memory tracking through host exception handling
* Fix some access violations from HLE service guest memory access and CPU
* Fix memory tracking
* Fix mapping list bugs, including a race and a error adding mapping ranges
* Simple page table for memory tracking
* Simple "volatile" region handle mode
* Update UBOs directly (experimental, rough)
* Fix the overlap check
* Only set non-modified buffers as volatile
* Fix some memory tracking issues
* Fix possible race in MapBufferFromClientProcess (block list updates were not locked)
* Write uniform update to memory immediately, only defer the buffer set.
* Fix some memory tracking issues
* Pass correct pages count on shared memory unmap
* Armeilleure Signal Handler v1 + Unix changes
Unix currently behaves like windows, rather than remapping physical
* Actually check if the host platform is unix
* Fix decommit on linux.
* Implement windows 10 placeholder shared memory, fix a buffer issue.
* Make PTC version something that will never match with master
* Remove testing variable for block count
* Add reference count for memory manager, fix dispose
Can still deadlock with OpenAL
* Add address validation, use page table for mapped check, add docs
Might clean up the page table traversing routines.
* Implement batched mapping/tracking.
* Move documentation, fix tests.
* Cleanup uniform buffer update stuff.
* Remove unnecessary assignment.
* Add unsafe host mapped memory switch
On by default. Would be good to turn this off for untrusted code (homebrew, exefs mods) and give the user the option to turn it on manually, though that requires some UI work.
* Remove C# exception handlers
They have issues due to current .NET limitations, so the meilleure one fully replaces them for now.
* Fix MapPhysicalMemory on the software MemoryManager.
* Null check for GetHostAddress, docs
* Add configuration for setting memory manager mode (not in UI yet)
* Add config to UI
* Fix type mismatch on Unix signal handler code emit
* Fix 6GB DRAM mode.
The size can be greater than `uint.MaxValue` when the DRAM is >4GB.
* Address some feedback.
* More detailed error if backing memory cannot be mapped.
* SetLastError on all OS functions for consistency
* Force pages dirty with UBO update instead of setting them directly.
Seems to be much faster across a few games. Need retesting.
* Rebase, configuration rework, fix mem tracking regression
* Fix race in FreePages
* Set memory managers null after decrementing ref count
* Remove readonly keyword, as this is now modified.
* Use a local variable for the signal handler rather than a register.
* Fix bug with buffer resize, and index/uniform buffer binding.
Should fix flickering in games.
* Add InvalidAccessHandler to MemoryTracking
Doesn't do anything yet
* Call invalid access handler on unmapped read/write.
Same rules as the regular memory manager.
* Make unsafe mapped memory its own MemoryManagerType
* Move FlushUboDirty into UpdateState.
* Buffer dirty cache, rather than ubo cache
Much cleaner, may be reusable for Inline2Memory updates.
* This doesn't return anything anymore.
* Add sigaction remove methods, correct a few function signatures.
* Return empty list of physical regions for size 0.
* Also on AddressSpaceManager
Co-authored-by: gdkchan <gab.dark.100@gmail.com>
2021-05-24 21:52:44 +01:00
|
|
|
/// <summary>
|
|
|
|
/// Performs address translation of the GPU virtual address, and attempts to force
|
|
|
|
/// the buffer in the region as dirty.
|
|
|
|
/// The buffer lookup for this function is cached in a dictionary for quick access, which
|
|
|
|
/// accelerates common UBO updates.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="gpuVa">Start GPU virtual address of the buffer</param>
|
|
|
|
/// <param name="size">Size in bytes of the buffer</param>
|
|
|
|
public void ForceDirty(ulong gpuVa, ulong size)
|
|
|
|
{
|
|
|
|
BufferCacheEntry result;
|
|
|
|
|
|
|
|
if (!_dirtyCache.TryGetValue(gpuVa, out result) || result.EndGpuAddress < gpuVa + size || result.UnmappedSequence != result.Buffer.UnmappedSequence)
|
|
|
|
{
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa, size);
|
|
|
|
result = new BufferCacheEntry(address, gpuVa, GetBuffer(address, size));
|
|
|
|
|
|
|
|
_dirtyCache[gpuVa] = result;
|
|
|
|
}
|
|
|
|
|
|
|
|
result.Buffer.ForceDirty(result.Address, size);
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Creates a new buffer for the specified range, if needed.
|
|
|
|
/// If a buffer where this range can be fully contained already exists,
|
|
|
|
/// then the creation of a new buffer is not necessary.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="address">Address of the buffer in guest memory</param>
|
|
|
|
/// <param name="size">Size in bytes of the buffer</param>
|
2020-04-25 14:02:18 +01:00
|
|
|
private void CreateBufferAligned(ulong address, ulong size)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2021-01-17 20:08:06 +00:00
|
|
|
int overlapsCount;
|
|
|
|
|
|
|
|
lock (_buffers)
|
|
|
|
{
|
|
|
|
overlapsCount = _buffers.FindOverlapsNonOverlapping(address, size, ref _bufferOverlaps);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2019-11-25 00:29:37 +00:00
|
|
|
if (overlapsCount != 0)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
|
|
|
// The buffer already exists. We can just return the existing buffer
|
|
|
|
// if the buffer we need is fully contained inside the overlapping buffer.
|
|
|
|
// Otherwise, we must delete the overlapping buffers and create a bigger buffer
|
|
|
|
// that fits all the data we need. We also need to copy the contents from the
|
|
|
|
// old buffer(s) to the new buffer.
|
|
|
|
ulong endAddress = address + size;
|
|
|
|
|
2019-11-25 00:29:37 +00:00
|
|
|
if (_bufferOverlaps[0].Address > address || _bufferOverlaps[0].EndAddress < endAddress)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2019-11-25 00:29:37 +00:00
|
|
|
for (int index = 0; index < overlapsCount; index++)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2019-11-25 00:29:37 +00:00
|
|
|
Buffer buffer = _bufferOverlaps[index];
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
address = Math.Min(address, buffer.Address);
|
|
|
|
endAddress = Math.Max(endAddress, buffer.EndAddress);
|
|
|
|
|
2021-01-17 20:08:06 +00:00
|
|
|
lock (_buffers)
|
|
|
|
{
|
|
|
|
_buffers.Remove(buffer);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Buffer newBuffer = new Buffer(_context, address, endAddress - address);
|
Memory Read/Write Tracking using Region Handles (#1272)
* WIP Range Tracking
- Texture invalidation seems to have large problems
- Buffer/Pool invalidation may have problems
- Mirror memory tracking puts an additional `add` in compiled code, we likely just want to make HLE access slower if this is the final solution.
- Native project is in the messiest possible location.
- [HACK] JIT memory access always uses native "fast" path
- [HACK] Trying some things with texture invalidation and views.
It works :)
Still a few hacks, messy things, slow things
More work in progress stuff (also move to memory project)
Quite a bit faster now.
- Unmapping GPU VA and CPU VA will now correctly update write tracking regions, and invalidate textures for the former.
- The Virtual range list is now non-overlapping like the physical one.
- Fixed some bugs where regions could leak.
- Introduced a weird bug that I still need to track down (consistent invalid buffer in MK8 ribbon road)
Move some stuff.
I think we'll eventually just put the dll and so for this in a nuget package.
Fix rebase.
[WIP] MultiRegionHandle variable size ranges
- Avoid reprotecting regions that change often (needs some tweaking)
- There's still a bug in buffers, somehow.
- Might want different api for minimum granularity
Fix rebase issue
Commit everything needed for software only tracking.
Remove native components.
Remove more native stuff.
Cleanup
Use a separate window for the background context, update opentk. (fixes linux)
Some experimental changes
Should get things working up to scratch - still need to try some things with flush/modification and res scale.
Include address with the region action.
Initial work to make range tracking work
Still a ton of bugs
Fix some issues with the new stuff.
* Fix texture flush instability
There's still some weird behaviour, but it's much improved without this. (textures with cpu modified data were flushing over it)
* Find the destination texture for Buffer->Texture full copy
Greatly improves performance for nvdec videos (with range tracking)
* Further improve texture tracking
* Disable Memory Tracking for view parents
This is a temporary approach to better match behaviour on master (where invalidations would be soaked up by views, rather than trigger twice)
The assumption is that when views are created to a texture, they will cover all of its data anyways. Of course, this can easily be improved in future.
* Introduce some tracking tests.
WIP
* Complete base tests.
* Add more tests for multiregion, fix existing test.
* Cleanup Part 1
* Remove unnecessary code from memory tracking
* Fix some inconsistencies with 3D texture rule.
* Add dispose tests.
* Use a background thread for the background context.
Rather than setting and unsetting a context as current, doing the work on a dedicated thread with signals seems to be a bit faster.
Also nerf the multithreading test a bit.
* Copy to texture with matching alignment
This extends the copy to work for some videos with unusual size, such as tutorial videos in SMO. It will only occur if the destination texture already exists at XCount size.
* Track reads for buffer copies. Synchronize new buffers before copying overlaps.
* Remove old texture flushing mechanisms.
Range tracking all the way, baby.
* Wake the background thread when disposing.
Avoids a deadlock when games are closed.
* Address Feedback 1
* Separate TextureCopy instance for background thread
Also `BackgroundContextWorker.InBackground` for a more sensible idenfifier for if we're in a background thread.
* Add missing XML docs.
* Address Feedback
* Maybe I should start drinking coffee.
* Some more feedback.
* Remove flush warning, Refocus window after making background context
2020-10-16 21:18:35 +01:00
|
|
|
newBuffer.SynchronizeMemory(address, endAddress - address);
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2021-01-17 20:08:06 +00:00
|
|
|
lock (_buffers)
|
|
|
|
{
|
|
|
|
_buffers.Add(newBuffer);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2019-11-25 00:29:37 +00:00
|
|
|
for (int index = 0; index < overlapsCount; index++)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2019-11-25 00:29:37 +00:00
|
|
|
Buffer buffer = _bufferOverlaps[index];
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
int dstOffset = (int)(buffer.Address - newBuffer.Address);
|
|
|
|
|
POWER - Performance Optimizations With Extensive Ramifications (#2286)
* Refactoring of KMemoryManager class
* Replace some trivial uses of DRAM address with VA
* Get rid of GetDramAddressFromVa
* Abstracting more operations on derived page table class
* Run auto-format on KPageTableBase
* Managed to make TryConvertVaToPa private, few uses remains now
* Implement guest physical pages ref counting, remove manual freeing
* Make DoMmuOperation private and call new abstract methods only from the base class
* Pass pages count rather than size on Map/UnmapMemory
* Change memory managers to take host pointers
* Fix a guest memory leak and simplify KPageTable
* Expose new methods for host range query and mapping
* Some refactoring of MapPagesFromClientProcess to allow proper page ref counting and mapping without KPageLists
* Remove more uses of AddVaRangeToPageList, now only one remains (shared memory page checking)
* Add a SharedMemoryStorage class, will be useful for host mapping
* Sayonara AddVaRangeToPageList, you served us well
* Start to implement host memory mapping (WIP)
* Support memory tracking through host exception handling
* Fix some access violations from HLE service guest memory access and CPU
* Fix memory tracking
* Fix mapping list bugs, including a race and a error adding mapping ranges
* Simple page table for memory tracking
* Simple "volatile" region handle mode
* Update UBOs directly (experimental, rough)
* Fix the overlap check
* Only set non-modified buffers as volatile
* Fix some memory tracking issues
* Fix possible race in MapBufferFromClientProcess (block list updates were not locked)
* Write uniform update to memory immediately, only defer the buffer set.
* Fix some memory tracking issues
* Pass correct pages count on shared memory unmap
* Armeilleure Signal Handler v1 + Unix changes
Unix currently behaves like windows, rather than remapping physical
* Actually check if the host platform is unix
* Fix decommit on linux.
* Implement windows 10 placeholder shared memory, fix a buffer issue.
* Make PTC version something that will never match with master
* Remove testing variable for block count
* Add reference count for memory manager, fix dispose
Can still deadlock with OpenAL
* Add address validation, use page table for mapped check, add docs
Might clean up the page table traversing routines.
* Implement batched mapping/tracking.
* Move documentation, fix tests.
* Cleanup uniform buffer update stuff.
* Remove unnecessary assignment.
* Add unsafe host mapped memory switch
On by default. Would be good to turn this off for untrusted code (homebrew, exefs mods) and give the user the option to turn it on manually, though that requires some UI work.
* Remove C# exception handlers
They have issues due to current .NET limitations, so the meilleure one fully replaces them for now.
* Fix MapPhysicalMemory on the software MemoryManager.
* Null check for GetHostAddress, docs
* Add configuration for setting memory manager mode (not in UI yet)
* Add config to UI
* Fix type mismatch on Unix signal handler code emit
* Fix 6GB DRAM mode.
The size can be greater than `uint.MaxValue` when the DRAM is >4GB.
* Address some feedback.
* More detailed error if backing memory cannot be mapped.
* SetLastError on all OS functions for consistency
* Force pages dirty with UBO update instead of setting them directly.
Seems to be much faster across a few games. Need retesting.
* Rebase, configuration rework, fix mem tracking regression
* Fix race in FreePages
* Set memory managers null after decrementing ref count
* Remove readonly keyword, as this is now modified.
* Use a local variable for the signal handler rather than a register.
* Fix bug with buffer resize, and index/uniform buffer binding.
Should fix flickering in games.
* Add InvalidAccessHandler to MemoryTracking
Doesn't do anything yet
* Call invalid access handler on unmapped read/write.
Same rules as the regular memory manager.
* Make unsafe mapped memory its own MemoryManagerType
* Move FlushUboDirty into UpdateState.
* Buffer dirty cache, rather than ubo cache
Much cleaner, may be reusable for Inline2Memory updates.
* This doesn't return anything anymore.
* Add sigaction remove methods, correct a few function signatures.
* Return empty list of physical regions for size 0.
* Also on AddressSpaceManager
Co-authored-by: gdkchan <gab.dark.100@gmail.com>
2021-05-24 21:52:44 +01:00
|
|
|
buffer.ForceSynchronizeMemory(buffer.Address, buffer.Size);
|
2021-01-17 20:08:06 +00:00
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
buffer.CopyTo(newBuffer, dstOffset);
|
2021-01-17 20:08:06 +00:00
|
|
|
newBuffer.InheritModifiedRanges(buffer);
|
2019-10-13 07:02:07 +01:00
|
|
|
|
|
|
|
buffer.Dispose();
|
|
|
|
}
|
|
|
|
|
2020-04-25 14:02:18 +01:00
|
|
|
// Existing buffers were modified, we need to rebind everything.
|
2019-10-13 07:02:07 +01:00
|
|
|
_rebind = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// No overlap, just create a new buffer.
|
|
|
|
Buffer buffer = new Buffer(_context, address, size);
|
|
|
|
|
2021-01-17 20:08:06 +00:00
|
|
|
lock (_buffers)
|
|
|
|
{
|
|
|
|
_buffers.Add(buffer);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
2019-11-25 00:29:37 +00:00
|
|
|
|
|
|
|
ShrinkOverlapsBufferIfNeeded();
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Resizes the temporary buffer used for range list intersection results, if it has grown too much.
|
|
|
|
/// </summary>
|
2019-11-25 00:29:37 +00:00
|
|
|
private void ShrinkOverlapsBufferIfNeeded()
|
|
|
|
{
|
|
|
|
if (_bufferOverlaps.Length > OverlapsBufferMaxCapacity)
|
|
|
|
{
|
|
|
|
Array.Resize(ref _bufferOverlaps, OverlapsBufferMaxCapacity);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Gets the address of the compute uniform buffer currently bound at the given index.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="index">Index of the uniform buffer binding</param>
|
2020-01-01 23:14:18 +00:00
|
|
|
/// <returns>The uniform buffer address, or an undefined value if the buffer is not currently bound</returns>
|
2019-10-13 07:02:07 +01:00
|
|
|
public ulong GetComputeUniformBufferAddress(int index)
|
|
|
|
{
|
|
|
|
return _cpUniformBuffers.Buffers[index].Address;
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Gets the address of the graphics uniform buffer currently bound at the given index.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="stage">Index of the shader stage</param>
|
|
|
|
/// <param name="index">Index of the uniform buffer binding</param>
|
2020-01-01 23:14:18 +00:00
|
|
|
/// <returns>The uniform buffer address, or an undefined value if the buffer is not currently bound</returns>
|
2019-10-13 07:02:07 +01:00
|
|
|
public ulong GetGraphicsUniformBufferAddress(int stage, int index)
|
|
|
|
{
|
|
|
|
return _gpUniformBuffers[stage].Buffers[index].Address;
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Ensures that the compute engine bindings are visible to the host GPU.
|
2020-01-01 15:39:09 +00:00
|
|
|
/// Note: this actually performs the binding using the host graphics API.
|
2019-12-31 03:22:58 +00:00
|
|
|
/// </summary>
|
2019-10-13 07:02:07 +01:00
|
|
|
public void CommitComputeBindings()
|
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
int sCount = _cpStorageBufferBindings;
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
Span<BufferRange> sRanges = sCount < StackToHeapThreshold ? stackalloc BufferRange[sCount] : new BufferRange[sCount];
|
|
|
|
|
|
|
|
for (int index = 0; index < _cpStorageBuffers.Count; index++)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
ref var bindingInfo = ref _cpStorageBuffers.Bindings[index];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
BufferBounds bounds = _cpStorageBuffers.Buffers[bindingInfo.Slot];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
if (bounds.Address != 0)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2021-01-24 22:22:19 +00:00
|
|
|
// The storage buffer size is not reliable (it might be lower than the actual size),
|
|
|
|
// so we bind the entire buffer to allow otherwise out of range accesses to work.
|
|
|
|
sRanges[bindingInfo.Binding] = GetBufferRangeTillEnd(
|
|
|
|
bounds.Address,
|
|
|
|
bounds.Size,
|
|
|
|
bounds.Flags.HasFlag(BufferUsageFlags.Write));
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
2020-11-08 11:10:00 +00:00
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
_context.Renderer.Pipeline.SetStorageBuffers(sRanges);
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
int uCount = _cpUniformBufferBindings;
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
Span<BufferRange> uRanges = uCount < StackToHeapThreshold ? stackalloc BufferRange[uCount] : new BufferRange[uCount];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
for (int index = 0; index < _cpUniformBuffers.Count; index++)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
ref var bindingInfo = ref _cpUniformBuffers.Bindings[index];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
BufferBounds bounds = _cpUniformBuffers.Buffers[bindingInfo.Slot];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
if (bounds.Address != 0)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
uRanges[bindingInfo.Binding] = GetBufferRange(bounds.Address, bounds.Size);
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
}
|
2019-10-18 03:41:18 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
_context.Renderer.Pipeline.SetUniformBuffers(uRanges);
|
|
|
|
|
2021-03-08 21:43:39 +00:00
|
|
|
CommitBufferTextureBindings();
|
|
|
|
|
2019-10-18 03:41:18 +01:00
|
|
|
// Force rebind after doing compute work.
|
|
|
|
_rebind = true;
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2021-03-08 21:43:39 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Commit any queued buffer texture bindings.
|
|
|
|
/// </summary>
|
|
|
|
private void CommitBufferTextureBindings()
|
|
|
|
{
|
|
|
|
if (_bufferTextures.Count > 0)
|
|
|
|
{
|
|
|
|
foreach (var binding in _bufferTextures)
|
|
|
|
{
|
|
|
|
binding.Texture.SetStorage(GetBufferRange(binding.Address, binding.Size, binding.BindingInfo.Flags.HasFlag(TextureUsageFlags.ImageStore)));
|
|
|
|
|
|
|
|
// The texture must be rebound to use the new storage if it was updated.
|
|
|
|
|
|
|
|
if (binding.IsImage)
|
|
|
|
{
|
|
|
|
_context.Renderer.Pipeline.SetImage(binding.BindingInfo.Binding, binding.Texture, binding.Format);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
_context.Renderer.Pipeline.SetTexture(binding.BindingInfo.Binding, binding.Texture);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_bufferTextures.Clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Ensures that the graphics engine bindings are visible to the host GPU.
|
2020-01-01 15:39:09 +00:00
|
|
|
/// Note: this actually performs the binding using the host graphics API.
|
2019-12-31 03:22:58 +00:00
|
|
|
/// </summary>
|
2020-04-25 14:02:18 +01:00
|
|
|
public void CommitGraphicsBindings()
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
|
|
|
if (_indexBufferDirty || _rebind)
|
|
|
|
{
|
|
|
|
_indexBufferDirty = false;
|
|
|
|
|
|
|
|
if (_indexBuffer.Address != 0)
|
|
|
|
{
|
|
|
|
BufferRange buffer = GetBufferRange(_indexBuffer.Address, _indexBuffer.Size);
|
|
|
|
|
2019-12-29 17:41:50 +00:00
|
|
|
_context.Renderer.Pipeline.SetIndexBuffer(buffer, _indexBuffer.Type);
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (_indexBuffer.Address != 0)
|
|
|
|
{
|
|
|
|
SynchronizeBufferRange(_indexBuffer.Address, _indexBuffer.Size);
|
|
|
|
}
|
|
|
|
|
2019-11-23 05:17:22 +00:00
|
|
|
uint vbEnableMask = _vertexBuffersEnableMask;
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
if (_vertexBuffersDirty || _rebind)
|
|
|
|
{
|
|
|
|
_vertexBuffersDirty = false;
|
|
|
|
|
2020-05-23 10:46:09 +01:00
|
|
|
Span<VertexBufferDescriptor> vertexBuffers = stackalloc VertexBufferDescriptor[Constants.TotalVertexBuffers];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2019-11-23 05:17:22 +00:00
|
|
|
for (int index = 0; (vbEnableMask >> index) != 0; index++)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
|
|
|
VertexBuffer vb = _vertexBuffers[index];
|
|
|
|
|
|
|
|
if (vb.Address == 0)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
BufferRange buffer = GetBufferRange(vb.Address, vb.Size);
|
|
|
|
|
|
|
|
vertexBuffers[index] = new VertexBufferDescriptor(buffer, vb.Stride, vb.Divisor);
|
|
|
|
}
|
|
|
|
|
2019-12-29 17:41:50 +00:00
|
|
|
_context.Renderer.Pipeline.SetVertexBuffers(vertexBuffers);
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-11-23 05:17:22 +00:00
|
|
|
for (int index = 0; (vbEnableMask >> index) != 0; index++)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
|
|
|
VertexBuffer vb = _vertexBuffers[index];
|
|
|
|
|
|
|
|
if (vb.Address == 0)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
SynchronizeBufferRange(vb.Address, vb.Size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-15 23:05:06 +01:00
|
|
|
if (_transformFeedbackBuffersDirty || _rebind)
|
2020-07-15 04:01:10 +01:00
|
|
|
{
|
|
|
|
_transformFeedbackBuffersDirty = false;
|
|
|
|
|
2020-10-25 20:23:42 +00:00
|
|
|
Span<BufferRange> tfbs = stackalloc BufferRange[Constants.TotalTransformFeedbackBuffers];
|
|
|
|
|
2020-07-15 04:01:10 +01:00
|
|
|
for (int index = 0; index < Constants.TotalTransformFeedbackBuffers; index++)
|
|
|
|
{
|
|
|
|
BufferBounds tfb = _transformFeedbackBuffers[index];
|
|
|
|
|
|
|
|
if (tfb.Address == 0)
|
|
|
|
{
|
2020-10-25 20:23:42 +00:00
|
|
|
tfbs[index] = BufferRange.Empty;
|
2020-07-15 04:01:10 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-10-25 20:23:42 +00:00
|
|
|
tfbs[index] = GetBufferRange(tfb.Address, tfb.Size);
|
2020-07-15 04:01:10 +01:00
|
|
|
}
|
2020-10-25 20:23:42 +00:00
|
|
|
|
|
|
|
_context.Renderer.Pipeline.SetTransformFeedbackBuffers(tfbs);
|
2020-07-15 04:01:10 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (int index = 0; index < Constants.TotalTransformFeedbackBuffers; index++)
|
|
|
|
{
|
|
|
|
BufferBounds tfb = _transformFeedbackBuffers[index];
|
|
|
|
|
|
|
|
if (tfb.Address == 0)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
SynchronizeBufferRange(tfb.Address, tfb.Size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
if (_gpStorageBuffersDirty || _rebind)
|
|
|
|
{
|
|
|
|
_gpStorageBuffersDirty = false;
|
|
|
|
|
|
|
|
BindBuffers(_gpStorageBuffers, isStorage: true);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
UpdateBuffers(_gpStorageBuffers);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_gpUniformBuffersDirty || _rebind)
|
|
|
|
{
|
|
|
|
_gpUniformBuffersDirty = false;
|
|
|
|
|
|
|
|
BindBuffers(_gpUniformBuffers, isStorage: false);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
UpdateBuffers(_gpUniformBuffers);
|
|
|
|
}
|
|
|
|
|
2021-03-08 21:43:39 +00:00
|
|
|
CommitBufferTextureBindings();
|
|
|
|
|
2019-10-13 07:02:07 +01:00
|
|
|
_rebind = false;
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Bind respective buffer bindings on the host API.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="bindings">Bindings to bind</param>
|
|
|
|
/// <param name="isStorage">True to bind as storage buffer, false to bind as uniform buffers</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
private void BindBuffers(BuffersPerStage[] bindings, bool isStorage)
|
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
int count = isStorage ? _gpStorageBufferBindings : _gpUniformBufferBindings;
|
|
|
|
|
|
|
|
Span<BufferRange> ranges = count < StackToHeapThreshold ? stackalloc BufferRange[count] : new BufferRange[count];
|
|
|
|
|
|
|
|
for (ShaderStage stage = ShaderStage.Vertex; stage <= ShaderStage.Fragment; stage++)
|
|
|
|
{
|
|
|
|
ref var buffers = ref bindings[(int)stage - 1];
|
|
|
|
|
|
|
|
for (int index = 0; index < buffers.Count; index++)
|
|
|
|
{
|
|
|
|
ref var bindingInfo = ref buffers.Bindings[index];
|
|
|
|
|
|
|
|
BufferBounds bounds = buffers.Buffers[bindingInfo.Slot];
|
|
|
|
|
|
|
|
if (bounds.Address != 0)
|
|
|
|
{
|
2021-01-24 22:22:19 +00:00
|
|
|
ranges[bindingInfo.Binding] = isStorage
|
|
|
|
? GetBufferRangeTillEnd(bounds.Address, bounds.Size, bounds.Flags.HasFlag(BufferUsageFlags.Write))
|
|
|
|
: GetBufferRange(bounds.Address, bounds.Size, bounds.Flags.HasFlag(BufferUsageFlags.Write));
|
2020-11-08 11:10:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isStorage)
|
|
|
|
{
|
|
|
|
_context.Renderer.Pipeline.SetStorageBuffers(ranges);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
_context.Renderer.Pipeline.SetUniformBuffers(ranges);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Updates data for the already bound buffer bindings.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="bindings">Bindings to update</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
private void UpdateBuffers(BuffersPerStage[] bindings)
|
|
|
|
{
|
|
|
|
for (ShaderStage stage = ShaderStage.Vertex; stage <= ShaderStage.Fragment; stage++)
|
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
ref var buffers = ref bindings[(int)stage - 1];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
for (int index = 0; index < buffers.Count; index++)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
2020-11-08 11:10:00 +00:00
|
|
|
ref var binding = ref buffers.Bindings[index];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
BufferBounds bounds = buffers.Buffers[binding.Slot];
|
2019-10-13 07:02:07 +01:00
|
|
|
|
|
|
|
if (bounds.Address == 0)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-11-08 11:10:00 +00:00
|
|
|
SynchronizeBufferRange(bounds.Address, bounds.Size);
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-25 14:02:18 +01:00
|
|
|
/// <summary>
|
2021-03-08 21:43:39 +00:00
|
|
|
/// Sets the buffer storage of a buffer texture. This will be bound when the buffer manager commits bindings.
|
2020-04-25 14:02:18 +01:00
|
|
|
/// </summary>
|
|
|
|
/// <param name="texture">Buffer texture</param>
|
|
|
|
/// <param name="address">Address of the buffer in memory</param>
|
|
|
|
/// <param name="size">Size of the buffer in bytes</param>
|
2021-03-08 21:43:39 +00:00
|
|
|
/// <param name="bindingInfo">Binding info for the buffer texture</param>
|
|
|
|
/// <param name="format">Format of the buffer texture</param>
|
|
|
|
/// <param name="isImage">Whether the binding is for an image or a sampler</param>
|
|
|
|
public void SetBufferTextureStorage(ITexture texture, ulong address, ulong size, TextureBindingInfo bindingInfo, Format format, bool isImage)
|
2020-04-25 14:02:18 +01:00
|
|
|
{
|
|
|
|
CreateBuffer(address, size);
|
|
|
|
|
2021-03-08 21:43:39 +00:00
|
|
|
_bufferTextures.Add(new BufferTextureBinding(texture, address, size, bindingInfo, format, isImage));
|
2020-04-25 14:02:18 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Copy a buffer data from a given address to another.
|
|
|
|
/// </summary>
|
2020-01-01 15:39:09 +00:00
|
|
|
/// <remarks>
|
|
|
|
/// This does a GPU side copy.
|
|
|
|
/// </remarks>
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <param name="srcVa">GPU virtual address of the copy source</param>
|
|
|
|
/// <param name="dstVa">GPU virtual address of the copy destination</param>
|
|
|
|
/// <param name="size">Size in bytes of the copy</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
public void CopyBuffer(GpuVa srcVa, GpuVa dstVa, ulong size)
|
|
|
|
{
|
|
|
|
ulong srcAddress = TranslateAndCreateBuffer(srcVa.Pack(), size);
|
|
|
|
ulong dstAddress = TranslateAndCreateBuffer(dstVa.Pack(), size);
|
|
|
|
|
2019-10-27 02:41:01 +00:00
|
|
|
Buffer srcBuffer = GetBuffer(srcAddress, size);
|
|
|
|
Buffer dstBuffer = GetBuffer(dstAddress, size);
|
2019-10-13 07:02:07 +01:00
|
|
|
|
2019-10-27 02:41:01 +00:00
|
|
|
int srcOffset = (int)(srcAddress - srcBuffer.Address);
|
|
|
|
int dstOffset = (int)(dstAddress - dstBuffer.Address);
|
|
|
|
|
2020-05-23 10:46:09 +01:00
|
|
|
_context.Renderer.Pipeline.CopyBuffer(
|
|
|
|
srcBuffer.Handle,
|
|
|
|
dstBuffer.Handle,
|
2019-10-27 02:41:01 +00:00
|
|
|
srcOffset,
|
|
|
|
dstOffset,
|
2019-10-13 07:02:07 +01:00
|
|
|
(int)size);
|
2019-10-27 02:41:01 +00:00
|
|
|
|
2021-01-17 20:08:06 +00:00
|
|
|
if (srcBuffer.IsModified(srcAddress, size))
|
|
|
|
{
|
|
|
|
dstBuffer.SignalModified(dstAddress, size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Optimization: If the data being copied is already in memory, then copy it directly instead of flushing from GPU.
|
|
|
|
|
|
|
|
dstBuffer.ClearModified(dstAddress, size);
|
|
|
|
_context.PhysicalMemory.WriteUntracked(dstAddress, _context.PhysicalMemory.GetSpan(srcAddress, (int)size));
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2021-01-12 21:50:54 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Clears a buffer at a given address with the specified value.
|
|
|
|
/// </summary>
|
|
|
|
/// <remarks>
|
|
|
|
/// Both the address and size must be aligned to 4 bytes.
|
|
|
|
/// </remarks>
|
|
|
|
/// <param name="gpuVa">GPU virtual address of the region to clear</param>
|
|
|
|
/// <param name="size">Number of bytes to clear</param>
|
|
|
|
/// <param name="value">Value to be written into the buffer</param>
|
|
|
|
public void ClearBuffer(GpuVa gpuVa, ulong size, uint value)
|
|
|
|
{
|
|
|
|
ulong address = TranslateAndCreateBuffer(gpuVa.Pack(), size);
|
|
|
|
|
|
|
|
Buffer buffer = GetBuffer(address, size);
|
|
|
|
|
|
|
|
int offset = (int)(address - buffer.Address);
|
|
|
|
|
|
|
|
_context.Renderer.Pipeline.ClearBuffer(buffer.Handle, offset, (int)size, value);
|
|
|
|
|
2021-01-17 20:08:06 +00:00
|
|
|
buffer.SignalModified(address, size);
|
2021-01-12 21:50:54 +00:00
|
|
|
}
|
|
|
|
|
2021-01-24 22:22:19 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Gets a buffer sub-range starting at a given memory address.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="address">Start address of the memory range</param>
|
|
|
|
/// <param name="size">Size in bytes of the memory range</param>
|
|
|
|
/// <param name="write">Whether the buffer will be written to by this use</param>
|
|
|
|
/// <returns>The buffer sub-range starting at the given memory address</returns>
|
|
|
|
private BufferRange GetBufferRangeTillEnd(ulong address, ulong size, bool write = false)
|
|
|
|
{
|
|
|
|
return GetBuffer(address, size, write).GetRange(address);
|
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Gets a buffer sub-range for a given memory range.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="address">Start address of the memory range</param>
|
|
|
|
/// <param name="size">Size in bytes of the memory range</param>
|
2021-01-17 20:08:06 +00:00
|
|
|
/// <param name="write">Whether the buffer will be written to by this use</param>
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <returns>The buffer sub-range for the given range</returns>
|
2021-01-17 20:08:06 +00:00
|
|
|
private BufferRange GetBufferRange(ulong address, ulong size, bool write = false)
|
2019-10-27 02:41:01 +00:00
|
|
|
{
|
2021-01-17 20:08:06 +00:00
|
|
|
return GetBuffer(address, size, write).GetRange(address, size);
|
2019-10-27 02:41:01 +00:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Gets a buffer for a given memory range.
|
|
|
|
/// A buffer overlapping with the specified range is assumed to already exist on the cache.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="address">Start address of the memory range</param>
|
|
|
|
/// <param name="size">Size in bytes of the memory range</param>
|
2021-01-17 20:08:06 +00:00
|
|
|
/// <param name="write">Whether the buffer will be written to by this use</param>
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <returns>The buffer where the range is fully contained</returns>
|
2021-01-17 20:08:06 +00:00
|
|
|
private Buffer GetBuffer(ulong address, ulong size, bool write = false)
|
2019-10-13 07:02:07 +01:00
|
|
|
{
|
|
|
|
Buffer buffer;
|
|
|
|
|
|
|
|
if (size != 0)
|
|
|
|
{
|
2021-01-17 20:08:06 +00:00
|
|
|
lock (_buffers)
|
|
|
|
{
|
|
|
|
buffer = _buffers.FindFirstOverlap(address, size);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
|
|
|
|
buffer.SynchronizeMemory(address, size);
|
2021-01-17 20:08:06 +00:00
|
|
|
|
|
|
|
if (write)
|
|
|
|
{
|
|
|
|
buffer.SignalModified(address, size);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-01-17 20:08:06 +00:00
|
|
|
lock (_buffers)
|
|
|
|
{
|
|
|
|
buffer = _buffers.FindFirstOverlap(address, 1);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-10-27 02:41:01 +00:00
|
|
|
return buffer;
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
|
2019-12-31 03:22:58 +00:00
|
|
|
/// <summary>
|
|
|
|
/// Performs guest to host memory synchronization of a given memory range.
|
|
|
|
/// </summary>
|
|
|
|
/// <param name="address">Start address of the memory range</param>
|
|
|
|
/// <param name="size">Size in bytes of the memory range</param>
|
2019-10-13 07:02:07 +01:00
|
|
|
private void SynchronizeBufferRange(ulong address, ulong size)
|
|
|
|
{
|
|
|
|
if (size != 0)
|
|
|
|
{
|
2021-01-17 20:08:06 +00:00
|
|
|
Buffer buffer;
|
|
|
|
|
|
|
|
lock (_buffers)
|
|
|
|
{
|
|
|
|
buffer = _buffers.FindFirstOverlap(address, size);
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
|
|
|
|
buffer.SynchronizeMemory(address, size);
|
|
|
|
}
|
|
|
|
}
|
2019-12-31 22:09:49 +00:00
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
/// Disposes all buffers in the cache.
|
|
|
|
/// It's an error to use the buffer manager after disposal.
|
|
|
|
/// </summary>
|
|
|
|
public void Dispose()
|
|
|
|
{
|
2021-01-17 20:08:06 +00:00
|
|
|
lock (_buffers)
|
2019-12-31 22:09:49 +00:00
|
|
|
{
|
2021-01-17 20:08:06 +00:00
|
|
|
foreach (Buffer buffer in _buffers)
|
|
|
|
{
|
|
|
|
buffer.Dispose();
|
|
|
|
}
|
2019-12-31 22:09:49 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-13 07:02:07 +01:00
|
|
|
}
|
|
|
|
}
|