mirror of
https://github.com/Ryujinx/Ryujinx.git
synced 2024-12-21 07:32:09 +00:00
22b2cb39af
* Turn `MemoryOperand` into a struct * Remove `IntrinsicOperation` * Remove `PhiNode` * Remove `Node` * Turn `Operand` into a struct * Turn `Operation` into a struct * Clean up pool management methods * Add `Arena` allocator * Move `OperationHelper` to `Operation.Factory` * Move `OperandHelper` to `Operand.Factory` * Optimize `Operation` a bit * Fix `Arena` initialization * Rename `NativeList<T>` to `ArenaList<T>` * Reduce `Operand` size from 88 to 56 bytes * Reduce `Operation` size from 56 to 40 bytes * Add optimistic interning of Register & Constant operands * Optimize `RegisterUsage` pass a bit * Optimize `RemoveUnusedNodes` pass a bit Iterating in reverse-order allows killing dependency chains in a single pass. * Fix PPTC symbols * Optimize `BasicBlock` a bit Reduce allocations from `_successor` & `DominanceFrontiers` * Fix `Operation` resize * Make `Arena` expandable Change the arena allocator to be expandable by allocating in pages, with some of them being pooled. Currently 32 pages are pooled. An LRU removal mechanism should probably be added to it. Apparently MHR can allocate bitmaps large enough to exceed the 16MB limit for the type. * Move `Arena` & `ArenaList` to `Common` * Remove `ThreadStaticPool` & co * Add `PhiOperation` * Reduce `Operand` size from 56 from 48 bytes * Add linear-probing to `Operand` intern table * Optimize `HybridAllocator` a bit * Add `Allocators` class * Tune `ArenaAllocator` sizes * Add page removal mechanism to `ArenaAllocator` Remove pages which have not been used for more than 5s after each reset. I am on fence if this would be better using a Gen2 callback object like the one in System.Buffers.ArrayPool<T>, to trim the pool. Because right now if a large translation happens, the pages will be freed only after a reset. This reset may not happen for a while because no new translation is hit, but the arena base sizes are rather small. * Fix `OOM` when allocating larger than page size in `ArenaAllocator` Tweak resizing mechanism for Operand.Uses and Assignemnts. * Optimize `Optimizer` a bit * Optimize `Operand.Add<T>/Remove<T>` a bit * Clean up `PreAllocator` * Fix phi insertion order Reduce codegen diffs. * Fix code alignment * Use new heuristics for degree of parallelism * Suppress warnings * Address gdkchan's feedback Renamed `GetValue()` to `GetValueUnsafe()` to make it more clear that `Operand.Value` should usually not be modified directly. * Add fast path to `ArenaAllocator` * Assembly for `ArenaAllocator.Allocate(ulong)`: .L0: mov rax, [rcx+0x18] lea r8, [rax+rdx] cmp r8, [rcx+0x10] ja short .L2 .L1: mov rdx, [rcx+8] add rax, [rdx+8] mov [rcx+0x18], r8 ret .L2: jmp ArenaAllocator.AllocateSlow(UInt64) A few variable/field had to be changed to ulong so that RyuJIT avoids emitting zero-extends. * Implement a new heuristic to free pooled pages. If an arena is used often, it is more likely that its pages will be needed, so the pages are kept for longer (e.g: during PPTC rebuild or burst sof compilations). If is not used often, then it is more likely that its pages will not be needed (e.g: after PPTC rebuild or bursts of compilations). * Address riperiperi's feedback * Use `EqualityComparer<T>` in `IntrusiveList<T>` Avoids a potential GC hole in `Equals(T, T)`.
174 lines
6.5 KiB
C#
174 lines
6.5 KiB
C#
using ARMeilleure.IntermediateRepresentation;
|
|
using ARMeilleure.State;
|
|
using ARMeilleure.Translation;
|
|
|
|
using static ARMeilleure.Instructions.InstEmitHelper;
|
|
using static ARMeilleure.IntermediateRepresentation.Operand.Factory;
|
|
|
|
namespace ARMeilleure.Instructions
|
|
{
|
|
static class InstEmitMemoryExHelper
|
|
{
|
|
private const int ErgSizeLog2 = 4;
|
|
|
|
public static Operand EmitLoadExclusive(ArmEmitterContext context, Operand address, bool exclusive, int size)
|
|
{
|
|
if (exclusive)
|
|
{
|
|
Operand value;
|
|
|
|
if (size == 4)
|
|
{
|
|
// Only 128-bit CAS is guaranteed to have a atomic load.
|
|
Operand physAddr = InstEmitMemoryHelper.EmitPtPointerLoad(context, address, default, write: false, 4);
|
|
|
|
Operand zero = context.VectorZero();
|
|
|
|
value = context.CompareAndSwap(physAddr, zero, zero);
|
|
}
|
|
else
|
|
{
|
|
value = InstEmitMemoryHelper.EmitReadIntAligned(context, address, size);
|
|
}
|
|
|
|
Operand arg0 = context.LoadArgument(OperandType.I64, 0);
|
|
|
|
Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset()));
|
|
Operand exValuePtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveValueOffset()));
|
|
|
|
context.Store(exAddrPtr, context.BitwiseAnd(address, Const(address.Type, GetExclusiveAddressMask())));
|
|
|
|
// Make sure the unused higher bits of the value are cleared.
|
|
if (size < 3)
|
|
{
|
|
context.Store(exValuePtr, Const(0UL));
|
|
}
|
|
if (size < 4)
|
|
{
|
|
context.Store(context.Add(exValuePtr, Const(exValuePtr.Type, 8L)), Const(0UL));
|
|
}
|
|
|
|
// Store the new exclusive value.
|
|
context.Store(exValuePtr, value);
|
|
|
|
return value;
|
|
}
|
|
else
|
|
{
|
|
return InstEmitMemoryHelper.EmitReadIntAligned(context, address, size);
|
|
}
|
|
}
|
|
|
|
public static void EmitStoreExclusive(
|
|
ArmEmitterContext context,
|
|
Operand address,
|
|
Operand value,
|
|
bool exclusive,
|
|
int size,
|
|
int rs,
|
|
bool a32)
|
|
{
|
|
if (size < 3)
|
|
{
|
|
value = context.ConvertI64ToI32(value);
|
|
}
|
|
|
|
if (exclusive)
|
|
{
|
|
// We overwrite one of the register (Rs),
|
|
// keep a copy of the values to ensure we are working with the correct values.
|
|
address = context.Copy(address);
|
|
value = context.Copy(value);
|
|
|
|
void SetRs(Operand value)
|
|
{
|
|
if (a32)
|
|
{
|
|
SetIntA32(context, rs, value);
|
|
}
|
|
else
|
|
{
|
|
SetIntOrZR(context, rs, value);
|
|
}
|
|
}
|
|
|
|
Operand arg0 = context.LoadArgument(OperandType.I64, 0);
|
|
|
|
Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset()));
|
|
Operand exAddr = context.Load(address.Type, exAddrPtr);
|
|
|
|
// STEP 1: Check if we have exclusive access to this memory region. If not, fail and skip store.
|
|
Operand maskedAddress = context.BitwiseAnd(address, Const(address.Type, GetExclusiveAddressMask()));
|
|
|
|
Operand exFailed = context.ICompareNotEqual(exAddr, maskedAddress);
|
|
|
|
Operand lblExit = Label();
|
|
|
|
SetRs(Const(1));
|
|
|
|
context.BranchIfTrue(lblExit, exFailed);
|
|
|
|
// STEP 2: We have exclusive access and the address is valid, attempt the store using CAS.
|
|
Operand physAddr = InstEmitMemoryHelper.EmitPtPointerLoad(context, address, default, write: true, size);
|
|
|
|
Operand exValuePtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveValueOffset()));
|
|
Operand exValue = size switch
|
|
{
|
|
0 => context.Load8(exValuePtr),
|
|
1 => context.Load16(exValuePtr),
|
|
2 => context.Load(OperandType.I32, exValuePtr),
|
|
3 => context.Load(OperandType.I64, exValuePtr),
|
|
_ => context.Load(OperandType.V128, exValuePtr)
|
|
};
|
|
|
|
Operand currValue = size switch
|
|
{
|
|
0 => context.CompareAndSwap8(physAddr, exValue, value),
|
|
1 => context.CompareAndSwap16(physAddr, exValue, value),
|
|
_ => context.CompareAndSwap(physAddr, exValue, value)
|
|
};
|
|
|
|
// STEP 3: Check if we succeeded by comparing expected and in-memory values.
|
|
Operand storeFailed;
|
|
|
|
if (size == 4)
|
|
{
|
|
Operand currValueLow = context.VectorExtract(OperandType.I64, currValue, 0);
|
|
Operand currValueHigh = context.VectorExtract(OperandType.I64, currValue, 1);
|
|
|
|
Operand exValueLow = context.VectorExtract(OperandType.I64, exValue, 0);
|
|
Operand exValueHigh = context.VectorExtract(OperandType.I64, exValue, 1);
|
|
|
|
storeFailed = context.BitwiseOr(
|
|
context.ICompareNotEqual(currValueLow, exValueLow),
|
|
context.ICompareNotEqual(currValueHigh, exValueHigh));
|
|
}
|
|
else
|
|
{
|
|
storeFailed = context.ICompareNotEqual(currValue, exValue);
|
|
}
|
|
|
|
SetRs(storeFailed);
|
|
|
|
context.MarkLabel(lblExit);
|
|
}
|
|
else
|
|
{
|
|
InstEmitMemoryHelper.EmitWriteIntAligned(context, address, value, size);
|
|
}
|
|
}
|
|
|
|
public static void EmitClearExclusive(ArmEmitterContext context)
|
|
{
|
|
Operand arg0 = context.LoadArgument(OperandType.I64, 0);
|
|
|
|
Operand exAddrPtr = context.Add(arg0, Const((long)NativeContext.GetExclusiveAddressOffset()));
|
|
|
|
// We store ULONG max to force any exclusive address checks to fail,
|
|
// since this value is not aligned to the ERG mask.
|
|
context.Store(exAddrPtr, Const(ulong.MaxValue));
|
|
}
|
|
|
|
private static long GetExclusiveAddressMask() => ~((4L << ErgSizeLog2) - 1);
|
|
}
|
|
}
|