mirror of
https://github.com/Ryujinx/Ryujinx.git
synced 2024-11-14 05:46:41 +00:00
ec3e848d79
* Initial Implementation
About as fast as nvidia GL multithreading, can be improved with faster command queuing.
* Struct based command list
Speeds up a bit. Still a lot of time lost to resource copy.
* Do shader init while the render thread is active.
* Introduce circular span pool V1
Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.
* Refactor SpanRef some more
Use a struct to represent SpanRef, rather than a reference.
* Flush buffers on background thread
* Use a span for UpdateRenderScale.
Much faster than copying the array.
* Calculate command size using reflection
* WIP parallel shaders
* Some minor optimisation
* Only 2 max refs per command now.
The command with 3 refs is gone. 😌
* Don't cast on the GPU side
* Remove redundant casts, force sync on window present
* Fix Shader Cache
* Fix host shader save.
* Fixup to work with new renderer stuff
* Make command Run static, use array of delegates as lookup
Profile says this takes less time than the previous way.
* Bring up to date
* Add settings toggle. Fix Muiltithreading Off mode.
* Fix warning.
* Release tracking lock for flushes
* Fix Conditional Render fast path with threaded gal
* Make handle iteration safe when releasing the lock
This is mostly temporary.
* Attempt to set backend threading on driver
Only really works on nvidia before launching a game.
* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions
* Update buffer set commands
* Some cleanup
* Only use stutter workaround when using opengl renderer non-threaded
* Add host-conditional reservation of counter events
There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.
* Address Feedback
* Make counter flush tracked again.
Hopefully does not cause any issues this time.
* Wait for FlushTo on the main queue thread.
Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)
* Add SDL2 headless integration
* Add HLE macro commands.
Co-authored-by: Mary <mary@mary.zone>
273 lines
No EOL
9.4 KiB
C#
273 lines
No EOL
9.4 KiB
C#
using Ryujinx.Graphics.GAL;
|
|
using Ryujinx.Graphics.Gpu.Engine.GPFifo;
|
|
using Ryujinx.Graphics.Gpu.Memory;
|
|
using Ryujinx.Graphics.Gpu.Shader;
|
|
using Ryujinx.Graphics.Gpu.Synchronization;
|
|
using System;
|
|
using System.Collections.Concurrent;
|
|
using System.Collections.Generic;
|
|
using System.Threading;
|
|
|
|
namespace Ryujinx.Graphics.Gpu
|
|
{
|
|
/// <summary>
|
|
/// GPU emulation context.
|
|
/// </summary>
|
|
public sealed class GpuContext : IDisposable
|
|
{
|
|
/// <summary>
|
|
/// Event signaled when the host emulation context is ready to be used by the gpu context.
|
|
/// </summary>
|
|
public ManualResetEvent HostInitalized { get; }
|
|
|
|
/// <summary>
|
|
/// Host renderer.
|
|
/// </summary>
|
|
public IRenderer Renderer { get; }
|
|
|
|
/// <summary>
|
|
/// GPU General Purpose FIFO queue.
|
|
/// </summary>
|
|
public GPFifoDevice GPFifo { get; }
|
|
|
|
/// <summary>
|
|
/// GPU synchronization manager.
|
|
/// </summary>
|
|
public SynchronizationManager Synchronization { get; }
|
|
|
|
/// <summary>
|
|
/// Presentation window.
|
|
/// </summary>
|
|
public Window Window { get; }
|
|
|
|
/// <summary>
|
|
/// Internal sequence number, used to avoid needless resource data updates
|
|
/// in the middle of a command buffer before synchronizations.
|
|
/// </summary>
|
|
internal int SequenceNumber { get; private set; }
|
|
|
|
/// <summary>
|
|
/// Internal sync number, used to denote points at which host synchronization can be requested.
|
|
/// </summary>
|
|
internal ulong SyncNumber { get; private set; }
|
|
|
|
/// <summary>
|
|
/// Actions to be performed when a CPU waiting sync point is triggered.
|
|
/// If there are more than 0 items when this happens, a host sync object will be generated for the given <see cref="SyncNumber"/>,
|
|
/// and the SyncNumber will be incremented.
|
|
/// </summary>
|
|
internal List<Action> SyncActions { get; }
|
|
|
|
/// <summary>
|
|
/// Queue with deferred actions that must run on the render thread.
|
|
/// </summary>
|
|
internal Queue<Action> DeferredActions { get; }
|
|
|
|
/// <summary>
|
|
/// Registry with physical memories that can be used with this GPU context, keyed by owner process ID.
|
|
/// </summary>
|
|
internal ConcurrentDictionary<long, PhysicalMemory> PhysicalMemoryRegistry { get; }
|
|
|
|
/// <summary>
|
|
/// Host hardware capabilities.
|
|
/// </summary>
|
|
internal Capabilities Capabilities => _caps.Value;
|
|
|
|
/// <summary>
|
|
/// Event for signalling shader cache loading progress.
|
|
/// </summary>
|
|
public event Action<ShaderCacheState, int, int> ShaderCacheStateChanged;
|
|
|
|
private readonly Lazy<Capabilities> _caps;
|
|
|
|
/// <summary>
|
|
/// Creates a new instance of the GPU emulation context.
|
|
/// </summary>
|
|
/// <param name="renderer">Host renderer</param>
|
|
public GpuContext(IRenderer renderer)
|
|
{
|
|
Renderer = renderer;
|
|
|
|
GPFifo = new GPFifoDevice(this);
|
|
|
|
Synchronization = new SynchronizationManager();
|
|
|
|
Window = new Window(this);
|
|
|
|
HostInitalized = new ManualResetEvent(false);
|
|
|
|
SyncActions = new List<Action>();
|
|
|
|
DeferredActions = new Queue<Action>();
|
|
|
|
PhysicalMemoryRegistry = new ConcurrentDictionary<long, PhysicalMemory>();
|
|
|
|
_caps = new Lazy<Capabilities>(Renderer.GetCapabilities);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Creates a new GPU channel.
|
|
/// </summary>
|
|
/// <returns>The GPU channel</returns>
|
|
public GpuChannel CreateChannel()
|
|
{
|
|
return new GpuChannel(this);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Creates a new GPU memory manager.
|
|
/// </summary>
|
|
/// <param name="pid">ID of the process that owns the memory manager</param>
|
|
/// <returns>The memory manager</returns>
|
|
/// <exception cref="ArgumentException">Thrown when <paramref name="pid"/> is invalid</exception>
|
|
public MemoryManager CreateMemoryManager(long pid)
|
|
{
|
|
if (!PhysicalMemoryRegistry.TryGetValue(pid, out var physicalMemory))
|
|
{
|
|
throw new ArgumentException("The PID is invalid or the process was not registered", nameof(pid));
|
|
}
|
|
|
|
return new MemoryManager(physicalMemory);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Registers virtual memory used by a process for GPU memory access, caching and read/write tracking.
|
|
/// </summary>
|
|
/// <param name="pid">ID of the process that owns <paramref name="cpuMemory"/></param>
|
|
/// <param name="cpuMemory">Virtual memory owned by the process</param>
|
|
/// <exception cref="ArgumentException">Thrown if <paramref name="pid"/> was already registered</exception>
|
|
public void RegisterProcess(long pid, Cpu.IVirtualMemoryManagerTracked cpuMemory)
|
|
{
|
|
var physicalMemory = new PhysicalMemory(this, cpuMemory);
|
|
if (!PhysicalMemoryRegistry.TryAdd(pid, physicalMemory))
|
|
{
|
|
throw new ArgumentException("The PID was already registered", nameof(pid));
|
|
}
|
|
|
|
physicalMemory.ShaderCache.ShaderCacheStateChanged += ShaderCacheStateUpdate;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Unregisters a process, indicating that its memory will no longer be used, and that caches can be freed.
|
|
/// </summary>
|
|
/// <param name="pid">ID of the process</param>
|
|
public void UnregisterProcess(long pid)
|
|
{
|
|
if (PhysicalMemoryRegistry.TryRemove(pid, out var physicalMemory))
|
|
{
|
|
physicalMemory.ShaderCache.ShaderCacheStateChanged -= ShaderCacheStateUpdate;
|
|
physicalMemory.Dispose();
|
|
}
|
|
}
|
|
|
|
/// <summary>
|
|
/// Shader cache state update handler.
|
|
/// </summary>
|
|
/// <param name="state">Current state of the shader cache load process</param>
|
|
/// <param name="current">Number of the current shader being processed</param>
|
|
/// <param name="total">Total number of shaders to process</param>
|
|
private void ShaderCacheStateUpdate(ShaderCacheState state, int current, int total)
|
|
{
|
|
ShaderCacheStateChanged?.Invoke(state, current, total);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Initialize the GPU shader cache.
|
|
/// </summary>
|
|
public void InitializeShaderCache()
|
|
{
|
|
HostInitalized.WaitOne();
|
|
|
|
foreach (var physicalMemory in PhysicalMemoryRegistry.Values)
|
|
{
|
|
physicalMemory.ShaderCache.Initialize();
|
|
}
|
|
}
|
|
|
|
/// <summary>
|
|
/// Processes the queue of shaders that must save their binaries to the disk cache.
|
|
/// </summary>
|
|
public void ProcessShaderCacheQueue()
|
|
{
|
|
foreach (var physicalMemory in PhysicalMemoryRegistry.Values)
|
|
{
|
|
physicalMemory.ShaderCache.ProcessShaderCacheQueue();
|
|
}
|
|
}
|
|
|
|
/// <summary>
|
|
/// Advances internal sequence number.
|
|
/// This forces the update of any modified GPU resource.
|
|
/// </summary>
|
|
internal void AdvanceSequence()
|
|
{
|
|
SequenceNumber++;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Registers an action to be performed the next time a syncpoint is incremented.
|
|
/// This will also ensure a host sync object is created, and <see cref="SyncNumber"/> is incremented.
|
|
/// </summary>
|
|
/// <param name="action">The action to be performed on sync object creation</param>
|
|
public void RegisterSyncAction(Action action)
|
|
{
|
|
SyncActions.Add(action);
|
|
}
|
|
|
|
/// <summary>
|
|
/// Creates a host sync object if there are any pending sync actions. The actions will then be called.
|
|
/// If no actions are present, a host sync object is not created.
|
|
/// </summary>
|
|
public void CreateHostSyncIfNeeded()
|
|
{
|
|
if (SyncActions.Count > 0)
|
|
{
|
|
Renderer.CreateSync(SyncNumber);
|
|
|
|
SyncNumber++;
|
|
|
|
foreach (Action action in SyncActions)
|
|
{
|
|
action();
|
|
}
|
|
|
|
SyncActions.Clear();
|
|
}
|
|
}
|
|
|
|
/// <summary>
|
|
/// Performs deferred actions.
|
|
/// This is useful for actions that must run on the render thread, such as resource disposal.
|
|
/// </summary>
|
|
internal void RunDeferredActions()
|
|
{
|
|
while (DeferredActions.TryDequeue(out Action action))
|
|
{
|
|
action();
|
|
}
|
|
}
|
|
|
|
/// <summary>
|
|
/// Disposes all GPU resources currently cached.
|
|
/// It's an error to push any GPU commands after disposal.
|
|
/// Additionally, the GPU commands FIFO must be empty for disposal,
|
|
/// and processing of all commands must have finished.
|
|
/// </summary>
|
|
public void Dispose()
|
|
{
|
|
Renderer.Dispose();
|
|
GPFifo.Dispose();
|
|
HostInitalized.Dispose();
|
|
|
|
// Has to be disposed before processing deferred actions, as it will produce some.
|
|
foreach (var physicalMemory in PhysicalMemoryRegistry.Values)
|
|
{
|
|
physicalMemory.Dispose();
|
|
}
|
|
|
|
PhysicalMemoryRegistry.Clear();
|
|
|
|
RunDeferredActions();
|
|
}
|
|
}
|
|
} |