using Ryujinx.Audio.Common;
using Ryujinx.Audio.Renderer.Common;
using Ryujinx.Audio.Renderer.Parameter;
using Ryujinx.Audio.Renderer.Server.MemoryPool;
using Ryujinx.Common.Memory;
using Ryujinx.Common.Utilities;
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
using static Ryujinx.Audio.Renderer.Common.BehaviourParameter;
using static Ryujinx.Audio.Renderer.Parameter.VoiceInParameter;
namespace Ryujinx.Audio.Renderer.Server.Voice
{
[StructLayout(LayoutKind.Sequential, Pack = Alignment)]
public struct VoiceState
{
public const int Alignment = 0x10;
///
/// Set to true if the voice is used.
///
[MarshalAs(UnmanagedType.I1)]
public bool InUse;
///
/// Set to true if the voice is new.
///
[MarshalAs(UnmanagedType.I1)]
public bool IsNew;
[MarshalAs(UnmanagedType.I1)]
public bool WasPlaying;
///
/// The of the voice.
///
public SampleFormat SampleFormat;
///
/// The sample rate of the voice.
///
public uint SampleRate;
///
/// The total channel count used.
///
public uint ChannelsCount;
///
/// Id of the voice.
///
public int Id;
///
/// Node id of the voice.
///
public int NodeId;
///
/// The target mix id of the voice.
///
public int MixId;
///
/// The current voice .
///
public Types.PlayState PlayState;
///
/// The previous voice .
///
public Types.PlayState PreviousPlayState;
///
/// The priority of the voice.
///
public uint Priority;
///
/// Target sorting position of the voice. (used to sort voice with the same )
///
public uint SortingOrder;
///
/// The pitch used on the voice.
///
public float Pitch;
///
/// The output volume of the voice.
///
public float Volume;
///
/// The previous output volume of the voice.
///
public float PreviousVolume;
///
/// Biquad filters to apply to the output of the voice.
///
public Array2 BiquadFilters;
///
/// Total count of of the voice.
///
public uint WaveBuffersCount;
///
/// Current playing of the voice.
///
public uint WaveBuffersIndex;
///
/// Change the behaviour of the voice.
///
/// This was added on REV5.
public DecodingBehaviour DecodingBehaviour;
///
/// User state required by the data source.
///
/// Only used for as the GC-ADPCM coefficients.
public AddressInfo DataSourceStateAddressInfo;
///
/// The wavebuffers of this voice.
///
public Array4 WaveBuffers;
///
/// The channel resource ids associated to the voice.
///
public Array6 ChannelResourceIds;
///
/// The target splitter id of the voice.
///
public uint SplitterId;
///
/// Change the Sample Rate Conversion (SRC) quality of the voice.
///
/// This was added on REV8.
public SampleRateConversionQuality SrcQuality;
///
/// If set to true, the voice was dropped.
///
[MarshalAs(UnmanagedType.I1)]
public bool VoiceDropFlag;
///
/// Set to true if the data source state work buffer wasn't mapped.
///
[MarshalAs(UnmanagedType.I1)]
public bool DataSourceStateUnmapped;
///
/// Set to true if any of the work buffer wasn't mapped.
///
[MarshalAs(UnmanagedType.I1)]
public bool BufferInfoUnmapped;
///
/// The biquad filter initialization state storage.
///
private BiquadFilterNeedInitializationArrayStruct _biquadFilterNeedInitialization;
///
/// Flush the amount of wavebuffer specified. This will result in the wavebuffer being skipped and marked played.
///
/// This was added on REV5.
public byte FlushWaveBufferCount;
[StructLayout(LayoutKind.Sequential, Size = Constants.VoiceBiquadFilterCount)]
private struct BiquadFilterNeedInitializationArrayStruct { }
///
/// The biquad filter initialization state array.
///
public Span BiquadFilterNeedInitialization => SpanHelpers.AsSpan(ref _biquadFilterNeedInitialization);
///
/// Initialize the .
///
public void Initialize()
{
IsNew = false;
VoiceDropFlag = false;
DataSourceStateUnmapped = false;
BufferInfoUnmapped = false;
FlushWaveBufferCount = 0;
PlayState = Types.PlayState.Stopped;
Priority = Constants.VoiceLowestPriority;
Id = 0;
NodeId = 0;
SampleRate = 0;
SampleFormat = SampleFormat.Invalid;
ChannelsCount = 0;
Pitch = 0.0f;
Volume = 0.0f;
PreviousVolume = 0.0f;
BiquadFilters.AsSpan().Fill(new BiquadFilterParameter());
WaveBuffersCount = 0;
WaveBuffersIndex = 0;
MixId = Constants.UnusedMixId;
SplitterId = Constants.UnusedSplitterId;
DataSourceStateAddressInfo.Setup(0, 0);
InitializeWaveBuffers();
}
///
/// Initialize the in this .
///
private void InitializeWaveBuffers()
{
for (int i = 0; i < WaveBuffers.Length; i++)
{
WaveBuffers[i].StartSampleOffset = 0;
WaveBuffers[i].EndSampleOffset = 0;
WaveBuffers[i].ShouldLoop = false;
WaveBuffers[i].IsEndOfStream = false;
WaveBuffers[i].BufferAddressInfo.Setup(0, 0);
WaveBuffers[i].ContextAddressInfo.Setup(0, 0);
WaveBuffers[i].IsSendToAudioProcessor = true;
}
}
///
/// Check if the voice needs to be skipped.
///
/// Returns true if the voice needs to be skipped.
public bool ShouldSkip()
{
return !InUse || WaveBuffersCount == 0 || DataSourceStateUnmapped || BufferInfoUnmapped || VoiceDropFlag;
}
///
/// Return true if the mix has any destinations.
///
/// True if the mix has any destinations.
public bool HasAnyDestination()
{
return MixId != Constants.UnusedMixId || SplitterId != Constants.UnusedSplitterId;
}
///
/// Indicate if the server voice information needs to be updated.
///
/// The user parameter.
/// Return true, if the server voice information needs to be updated.
private bool ShouldUpdateParameters(ref VoiceInParameter parameter)
{
if (DataSourceStateAddressInfo.CpuAddress == parameter.DataSourceStateAddress)
{
return DataSourceStateAddressInfo.Size != parameter.DataSourceStateSize;
}
return DataSourceStateAddressInfo.CpuAddress != parameter.DataSourceStateAddress ||
DataSourceStateAddressInfo.Size != parameter.DataSourceStateSize ||
DataSourceStateUnmapped;
}
///
/// Update the internal state from a user parameter.
///
/// The possible that was generated.
/// The user parameter.
/// The mapper to use.
/// The behaviour context.
public void UpdateParameters(out ErrorInfo outErrorInfo, ref VoiceInParameter parameter, ref PoolMapper poolMapper, ref BehaviourContext behaviourContext)
{
InUse = parameter.InUse;
Id = parameter.Id;
NodeId = parameter.NodeId;
UpdatePlayState(parameter.PlayState);
SrcQuality = parameter.SrcQuality;
Priority = parameter.Priority;
SortingOrder = parameter.SortingOrder;
SampleRate = parameter.SampleRate;
SampleFormat = parameter.SampleFormat;
ChannelsCount = parameter.ChannelCount;
Pitch = parameter.Pitch;
Volume = parameter.Volume;
parameter.BiquadFilters.AsSpan().CopyTo(BiquadFilters.AsSpan());
WaveBuffersCount = parameter.WaveBuffersCount;
WaveBuffersIndex = parameter.WaveBuffersIndex;
if (behaviourContext.IsFlushVoiceWaveBuffersSupported())
{
FlushWaveBufferCount += parameter.FlushWaveBufferCount;
}
MixId = parameter.MixId;
if (behaviourContext.IsSplitterSupported())
{
SplitterId = parameter.SplitterId;
}
else
{
SplitterId = Constants.UnusedSplitterId;
}
parameter.ChannelResourceIds.AsSpan().CopyTo(ChannelResourceIds.AsSpan());
DecodingBehaviour behaviour = DecodingBehaviour.Default;
if (behaviourContext.IsDecodingBehaviourFlagSupported())
{
behaviour = parameter.DecodingBehaviourFlags;
}
DecodingBehaviour = behaviour;
if (parameter.ResetVoiceDropFlag)
{
VoiceDropFlag = false;
}
if (ShouldUpdateParameters(ref parameter))
{
DataSourceStateUnmapped = !poolMapper.TryAttachBuffer(out outErrorInfo, ref DataSourceStateAddressInfo, parameter.DataSourceStateAddress, parameter.DataSourceStateSize);
}
else
{
outErrorInfo = new ErrorInfo();
}
}
///
/// Update the internal play state from user play state.
///
/// The target user play state.
public void UpdatePlayState(PlayState userPlayState)
{
Types.PlayState oldServerPlayState = PlayState;
PreviousPlayState = oldServerPlayState;
Types.PlayState newServerPlayState;
switch (userPlayState)
{
case Common.PlayState.Start:
newServerPlayState = Types.PlayState.Started;
break;
case Common.PlayState.Stop:
if (oldServerPlayState == Types.PlayState.Stopped)
{
return;
}
newServerPlayState = Types.PlayState.Stopping;
break;
case Common.PlayState.Pause:
newServerPlayState = Types.PlayState.Paused;
break;
default:
throw new NotImplementedException($"Unhandled PlayState.{userPlayState}");
}
PlayState = newServerPlayState;
}
///
/// Write the status of the voice to the given user output.
///
/// The given user output.
/// The user parameter.
/// The voice states associated to the .
public void WriteOutStatus(ref VoiceOutStatus outStatus, ref VoiceInParameter parameter, ReadOnlySpan> voiceUpdateStates)
{
#if DEBUG
// Sanity check in debug mode of the internal state
if (!parameter.IsNew && !IsNew)
{
for (int i = 1; i < ChannelsCount; i++)
{
ref VoiceUpdateState stateA = ref voiceUpdateStates[i - 1].Span[0];
ref VoiceUpdateState stateB = ref voiceUpdateStates[i].Span[0];
Debug.Assert(stateA.WaveBufferConsumed == stateB.WaveBufferConsumed);
Debug.Assert(stateA.PlayedSampleCount == stateB.PlayedSampleCount);
Debug.Assert(stateA.Offset == stateB.Offset);
Debug.Assert(stateA.WaveBufferIndex == stateB.WaveBufferIndex);
Debug.Assert(stateA.Fraction == stateB.Fraction);
Debug.Assert(stateA.IsWaveBufferValid.SequenceEqual(stateB.IsWaveBufferValid));
}
}
#endif
if (parameter.IsNew || IsNew)
{
IsNew = true;
outStatus.VoiceDropFlag = false;
outStatus.PlayedWaveBuffersCount = 0;
outStatus.PlayedSampleCount = 0;
}
else
{
ref VoiceUpdateState state = ref voiceUpdateStates[0].Span[0];
outStatus.VoiceDropFlag = VoiceDropFlag;
outStatus.PlayedWaveBuffersCount = state.WaveBufferConsumed;
outStatus.PlayedSampleCount = state.PlayedSampleCount;
}
}
///
/// Update the internal state of all the of the .
///
/// An array of used to report errors when mapping any of the .
/// The user parameter.
/// The voice states associated to the .
/// The mapper to use.
/// The behaviour context.
public void UpdateWaveBuffers(out ErrorInfo[] errorInfos, ref VoiceInParameter parameter, ReadOnlySpan> voiceUpdateStates, ref PoolMapper mapper, ref BehaviourContext behaviourContext)
{
errorInfos = new ErrorInfo[Constants.VoiceWaveBufferCount * 2];
if (parameter.IsNew)
{
InitializeWaveBuffers();
for (int i = 0; i < parameter.ChannelCount; i++)
{
voiceUpdateStates[i].Span[0].IsWaveBufferValid.Fill(false);
}
}
ref VoiceUpdateState voiceUpdateState = ref voiceUpdateStates[0].Span[0];
for (int i = 0; i < Constants.VoiceWaveBufferCount; i++)
{
UpdateWaveBuffer(errorInfos.AsSpan(i * 2, 2), ref WaveBuffers[i], ref parameter.WaveBuffers[i], parameter.SampleFormat, voiceUpdateState.IsWaveBufferValid[i], ref mapper, ref behaviourContext);
}
}
///
/// Update the internal state of one of the of the .
///
/// A used to report errors when mapping the .
/// The to update.
/// The from the user input.
/// The from the user input.
/// If set to true, the server side wavebuffer is considered valid.
/// The mapper to use.
/// The behaviour context.
private void UpdateWaveBuffer(Span errorInfos, ref WaveBuffer waveBuffer, ref WaveBufferInternal inputWaveBuffer, SampleFormat sampleFormat, bool isValid, ref PoolMapper mapper, ref BehaviourContext behaviourContext)
{
if (!isValid && waveBuffer.IsSendToAudioProcessor && waveBuffer.BufferAddressInfo.CpuAddress != 0)
{
mapper.ForceUnmap(ref waveBuffer.BufferAddressInfo);
waveBuffer.BufferAddressInfo.Setup(0, 0);
}
if (!inputWaveBuffer.SentToServer || BufferInfoUnmapped)
{
if (inputWaveBuffer.IsSampleOffsetValid(sampleFormat))
{
Debug.Assert(waveBuffer.IsSendToAudioProcessor);
waveBuffer.IsSendToAudioProcessor = false;
waveBuffer.StartSampleOffset = inputWaveBuffer.StartSampleOffset;
waveBuffer.EndSampleOffset = inputWaveBuffer.EndSampleOffset;
waveBuffer.ShouldLoop = inputWaveBuffer.ShouldLoop;
waveBuffer.IsEndOfStream = inputWaveBuffer.IsEndOfStream;
waveBuffer.LoopStartSampleOffset = inputWaveBuffer.LoopFirstSampleOffset;
waveBuffer.LoopEndSampleOffset = inputWaveBuffer.LoopLastSampleOffset;
waveBuffer.LoopCount = inputWaveBuffer.LoopCount;
BufferInfoUnmapped = !mapper.TryAttachBuffer(out ErrorInfo bufferInfoError, ref waveBuffer.BufferAddressInfo, inputWaveBuffer.Address, inputWaveBuffer.Size);
errorInfos[0] = bufferInfoError;
if (sampleFormat == SampleFormat.Adpcm && behaviourContext.IsAdpcmLoopContextBugFixed() && inputWaveBuffer.ContextAddress != 0)
{
bool adpcmLoopContextMapped = mapper.TryAttachBuffer(out ErrorInfo adpcmLoopContextInfoError,
ref waveBuffer.ContextAddressInfo,
inputWaveBuffer.ContextAddress,
inputWaveBuffer.ContextSize);
errorInfos[1] = adpcmLoopContextInfoError;
if (adpcmLoopContextMapped)
{
BufferInfoUnmapped = DataSourceStateUnmapped;
}
else
{
BufferInfoUnmapped = true;
}
}
else
{
waveBuffer.ContextAddressInfo.Setup(0, 0);
}
}
else
{
errorInfos[0].ErrorCode = ResultCode.InvalidAddressInfo;
errorInfos[0].ExtraErrorInfo = inputWaveBuffer.Address;
}
}
}
///
/// Reset the resources associated to this .
///
/// The voice context.
private void ResetResources(VoiceContext context)
{
for (int i = 0; i < ChannelsCount; i++)
{
int channelResourceId = ChannelResourceIds[i];
ref VoiceChannelResource voiceChannelResource = ref context.GetChannelResource(channelResourceId);
Debug.Assert(voiceChannelResource.IsUsed);
Memory dspSharedState = context.GetUpdateStateForDsp(channelResourceId);
MemoryMarshal.Cast(dspSharedState.Span).Fill(0);
voiceChannelResource.UpdateState();
}
}
///
/// Flush a certain amount of .
///
/// The amount of wavebuffer to flush.
/// The voice states associated to the .
/// The channel count from user input.
private void FlushWaveBuffers(uint waveBufferCount, Memory[] voiceUpdateStates, uint channelCount)
{
uint waveBufferIndex = WaveBuffersIndex;
for (int i = 0; i < waveBufferCount; i++)
{
WaveBuffers[(int)waveBufferIndex].IsSendToAudioProcessor = true;
for (int j = 0; j < channelCount; j++)
{
ref VoiceUpdateState voiceUpdateState = ref voiceUpdateStates[j].Span[0];
voiceUpdateState.WaveBufferIndex = (voiceUpdateState.WaveBufferIndex + 1) % Constants.VoiceWaveBufferCount;
voiceUpdateState.WaveBufferConsumed++;
voiceUpdateState.IsWaveBufferValid[(int)waveBufferIndex] = false;
}
waveBufferIndex = (waveBufferIndex + 1) % Constants.VoiceWaveBufferCount;
}
}
///
/// Update the internal parameters for command generation.
///
/// The voice states associated to the .
/// Return true if this voice should be played.
public bool UpdateParametersForCommandGeneration(Memory[] voiceUpdateStates)
{
if (FlushWaveBufferCount != 0)
{
FlushWaveBuffers(FlushWaveBufferCount, voiceUpdateStates, ChannelsCount);
FlushWaveBufferCount = 0;
}
switch (PlayState)
{
case Types.PlayState.Started:
for (int i = 0; i < WaveBuffers.Length; i++)
{
ref WaveBuffer wavebuffer = ref WaveBuffers[i];
if (!wavebuffer.IsSendToAudioProcessor)
{
for (int y = 0; y < ChannelsCount; y++)
{
Debug.Assert(!voiceUpdateStates[y].Span[0].IsWaveBufferValid[i]);
voiceUpdateStates[y].Span[0].IsWaveBufferValid[i] = true;
}
wavebuffer.IsSendToAudioProcessor = true;
}
}
WasPlaying = false;
ref VoiceUpdateState primaryVoiceUpdateState = ref voiceUpdateStates[0].Span[0];
for (int i = 0; i < primaryVoiceUpdateState.IsWaveBufferValid.Length; i++)
{
if (primaryVoiceUpdateState.IsWaveBufferValid[i])
{
return true;
}
}
return false;
case Types.PlayState.Stopping:
for (int i = 0; i < WaveBuffers.Length; i++)
{
ref WaveBuffer wavebuffer = ref WaveBuffers[i];
wavebuffer.IsSendToAudioProcessor = true;
for (int j = 0; j < ChannelsCount; j++)
{
ref VoiceUpdateState voiceUpdateState = ref voiceUpdateStates[j].Span[0];
if (voiceUpdateState.IsWaveBufferValid[i])
{
voiceUpdateState.WaveBufferIndex = (voiceUpdateState.WaveBufferIndex + 1) % Constants.VoiceWaveBufferCount;
voiceUpdateState.WaveBufferConsumed++;
}
voiceUpdateState.IsWaveBufferValid[i] = false;
}
}
for (int i = 0; i < ChannelsCount; i++)
{
ref VoiceUpdateState voiceUpdateState = ref voiceUpdateStates[i].Span[0];
voiceUpdateState.Offset = 0;
voiceUpdateState.PlayedSampleCount = 0;
voiceUpdateState.Pitch.AsSpan().Fill(0);
voiceUpdateState.Fraction = 0;
voiceUpdateState.LoopContext = new Dsp.State.AdpcmLoopContext();
}
PlayState = Types.PlayState.Stopped;
WasPlaying = PreviousPlayState == Types.PlayState.Started;
return WasPlaying;
case Types.PlayState.Stopped:
case Types.PlayState.Paused:
foreach (ref WaveBuffer wavebuffer in WaveBuffers.AsSpan())
{
wavebuffer.BufferAddressInfo.GetReference(true);
wavebuffer.ContextAddressInfo.GetReference(true);
}
if (SampleFormat == SampleFormat.Adpcm)
{
if (DataSourceStateAddressInfo.CpuAddress != 0)
{
DataSourceStateAddressInfo.GetReference(true);
}
}
WasPlaying = PreviousPlayState == Types.PlayState.Started;
return WasPlaying;
default:
throw new NotImplementedException($"{PlayState}");
}
}
///
/// Update the internal state for command generation.
///
/// The voice context.
/// Return true if this voice should be played.
public bool UpdateForCommandGeneration(VoiceContext context)
{
if (IsNew)
{
ResetResources(context);
PreviousVolume = Volume;
IsNew = false;
}
Memory[] voiceUpdateStates = new Memory[Constants.VoiceChannelCountMax];
for (int i = 0; i < ChannelsCount; i++)
{
voiceUpdateStates[i] = context.GetUpdateStateForDsp(ChannelResourceIds[i]);
}
return UpdateParametersForCommandGeneration(voiceUpdateStates);
}
}
}