mirror of
https://github.com/Ryujinx/Ryujinx.git
synced 2024-11-14 20:06:42 +00:00
22b2cb39af
* Turn `MemoryOperand` into a struct * Remove `IntrinsicOperation` * Remove `PhiNode` * Remove `Node` * Turn `Operand` into a struct * Turn `Operation` into a struct * Clean up pool management methods * Add `Arena` allocator * Move `OperationHelper` to `Operation.Factory` * Move `OperandHelper` to `Operand.Factory` * Optimize `Operation` a bit * Fix `Arena` initialization * Rename `NativeList<T>` to `ArenaList<T>` * Reduce `Operand` size from 88 to 56 bytes * Reduce `Operation` size from 56 to 40 bytes * Add optimistic interning of Register & Constant operands * Optimize `RegisterUsage` pass a bit * Optimize `RemoveUnusedNodes` pass a bit Iterating in reverse-order allows killing dependency chains in a single pass. * Fix PPTC symbols * Optimize `BasicBlock` a bit Reduce allocations from `_successor` & `DominanceFrontiers` * Fix `Operation` resize * Make `Arena` expandable Change the arena allocator to be expandable by allocating in pages, with some of them being pooled. Currently 32 pages are pooled. An LRU removal mechanism should probably be added to it. Apparently MHR can allocate bitmaps large enough to exceed the 16MB limit for the type. * Move `Arena` & `ArenaList` to `Common` * Remove `ThreadStaticPool` & co * Add `PhiOperation` * Reduce `Operand` size from 56 from 48 bytes * Add linear-probing to `Operand` intern table * Optimize `HybridAllocator` a bit * Add `Allocators` class * Tune `ArenaAllocator` sizes * Add page removal mechanism to `ArenaAllocator` Remove pages which have not been used for more than 5s after each reset. I am on fence if this would be better using a Gen2 callback object like the one in System.Buffers.ArrayPool<T>, to trim the pool. Because right now if a large translation happens, the pages will be freed only after a reset. This reset may not happen for a while because no new translation is hit, but the arena base sizes are rather small. * Fix `OOM` when allocating larger than page size in `ArenaAllocator` Tweak resizing mechanism for Operand.Uses and Assignemnts. * Optimize `Optimizer` a bit * Optimize `Operand.Add<T>/Remove<T>` a bit * Clean up `PreAllocator` * Fix phi insertion order Reduce codegen diffs. * Fix code alignment * Use new heuristics for degree of parallelism * Suppress warnings * Address gdkchan's feedback Renamed `GetValue()` to `GetValueUnsafe()` to make it more clear that `Operand.Value` should usually not be modified directly. * Add fast path to `ArenaAllocator` * Assembly for `ArenaAllocator.Allocate(ulong)`: .L0: mov rax, [rcx+0x18] lea r8, [rax+rdx] cmp r8, [rcx+0x10] ja short .L2 .L1: mov rdx, [rcx+8] add rax, [rdx+8] mov [rcx+0x18], r8 ret .L2: jmp ArenaAllocator.AllocateSlow(UInt64) A few variable/field had to be changed to ulong so that RyuJIT avoids emitting zero-extends. * Implement a new heuristic to free pooled pages. If an arena is used often, it is more likely that its pages will be needed, so the pages are kept for longer (e.g: during PPTC rebuild or burst sof compilations). If is not used often, then it is more likely that its pages will not be needed (e.g: after PPTC rebuild or bursts of compilations). * Address riperiperi's feedback * Use `EqualityComparer<T>` in `IntrusiveList<T>` Avoids a potential GC hole in `Equals(T, T)`.
199 lines
No EOL
6.8 KiB
C#
199 lines
No EOL
6.8 KiB
C#
using ARMeilleure.Common;
|
|
using ARMeilleure.Decoders;
|
|
using ARMeilleure.Diagnostics;
|
|
using ARMeilleure.Instructions;
|
|
using ARMeilleure.IntermediateRepresentation;
|
|
using ARMeilleure.Memory;
|
|
using ARMeilleure.State;
|
|
using ARMeilleure.Translation.PTC;
|
|
using System;
|
|
using System.Collections.Generic;
|
|
using System.Reflection;
|
|
using static ARMeilleure.IntermediateRepresentation.Operand.Factory;
|
|
|
|
namespace ARMeilleure.Translation
|
|
{
|
|
class ArmEmitterContext : EmitterContext
|
|
{
|
|
private readonly Dictionary<ulong, Operand> _labels;
|
|
|
|
private OpCode _optOpLastCompare;
|
|
private OpCode _optOpLastFlagSet;
|
|
|
|
private Operand _optCmpTempN;
|
|
private Operand _optCmpTempM;
|
|
|
|
private Block _currBlock;
|
|
|
|
public Block CurrBlock
|
|
{
|
|
get
|
|
{
|
|
return _currBlock;
|
|
}
|
|
set
|
|
{
|
|
_currBlock = value;
|
|
|
|
ResetBlockState();
|
|
}
|
|
}
|
|
|
|
public OpCode CurrOp { get; set; }
|
|
|
|
public IMemoryManager Memory { get; }
|
|
|
|
public bool HasPtc { get; }
|
|
|
|
public EntryTable<uint> CountTable { get; }
|
|
public AddressTable<ulong> FunctionTable { get; }
|
|
public TranslatorStubs Stubs { get; }
|
|
|
|
public ulong EntryAddress { get; }
|
|
public bool HighCq { get; }
|
|
public Aarch32Mode Mode { get; }
|
|
|
|
public ArmEmitterContext(
|
|
IMemoryManager memory,
|
|
EntryTable<uint> countTable,
|
|
AddressTable<ulong> funcTable,
|
|
TranslatorStubs stubs,
|
|
ulong entryAddress,
|
|
bool highCq,
|
|
Aarch32Mode mode)
|
|
{
|
|
HasPtc = Ptc.State != PtcState.Disabled;
|
|
Memory = memory;
|
|
CountTable = countTable;
|
|
FunctionTable = funcTable;
|
|
Stubs = stubs;
|
|
EntryAddress = entryAddress;
|
|
HighCq = highCq;
|
|
Mode = mode;
|
|
|
|
_labels = new Dictionary<ulong, Operand>();
|
|
}
|
|
|
|
public override Operand Call(MethodInfo info, params Operand[] callArgs)
|
|
{
|
|
if (!HasPtc)
|
|
{
|
|
return base.Call(info, callArgs);
|
|
}
|
|
else
|
|
{
|
|
int index = Delegates.GetDelegateIndex(info);
|
|
IntPtr funcPtr = Delegates.GetDelegateFuncPtrByIndex(index);
|
|
|
|
OperandType returnType = GetOperandType(info.ReturnType);
|
|
|
|
Symbol symbol = new Symbol(SymbolType.DelegateTable, (ulong)index);
|
|
|
|
Symbols.Add((ulong)funcPtr.ToInt64(), info.Name);
|
|
|
|
return Call(Const(funcPtr.ToInt64(), symbol), returnType, callArgs);
|
|
}
|
|
}
|
|
|
|
public Operand GetLabel(ulong address)
|
|
{
|
|
if (!_labels.TryGetValue(address, out Operand label))
|
|
{
|
|
label = Label();
|
|
|
|
_labels.Add(address, label);
|
|
}
|
|
|
|
return label;
|
|
}
|
|
|
|
public void MarkComparison(Operand n, Operand m)
|
|
{
|
|
_optOpLastCompare = CurrOp;
|
|
|
|
_optCmpTempN = Copy(n);
|
|
_optCmpTempM = Copy(m);
|
|
}
|
|
|
|
public void MarkFlagSet(PState stateFlag)
|
|
{
|
|
// Set this only if any of the NZCV flag bits were modified.
|
|
// This is used to ensure that when emiting a direct IL branch
|
|
// instruction for compare + branch sequences, we're not expecting
|
|
// to use comparison values from an old instruction, when in fact
|
|
// the flags were already overwritten by another instruction further along.
|
|
if (stateFlag >= PState.VFlag)
|
|
{
|
|
_optOpLastFlagSet = CurrOp;
|
|
}
|
|
}
|
|
|
|
private void ResetBlockState()
|
|
{
|
|
_optOpLastCompare = null;
|
|
_optOpLastFlagSet = null;
|
|
}
|
|
|
|
public Operand TryGetComparisonResult(Condition condition)
|
|
{
|
|
if (_optOpLastCompare == null || _optOpLastCompare != _optOpLastFlagSet)
|
|
{
|
|
return default;
|
|
}
|
|
|
|
Operand n = _optCmpTempN;
|
|
Operand m = _optCmpTempM;
|
|
|
|
InstName cmpName = _optOpLastCompare.Instruction.Name;
|
|
|
|
if (cmpName == InstName.Subs)
|
|
{
|
|
switch (condition)
|
|
{
|
|
case Condition.Eq: return ICompareEqual (n, m);
|
|
case Condition.Ne: return ICompareNotEqual (n, m);
|
|
case Condition.GeUn: return ICompareGreaterOrEqualUI(n, m);
|
|
case Condition.LtUn: return ICompareLessUI (n, m);
|
|
case Condition.GtUn: return ICompareGreaterUI (n, m);
|
|
case Condition.LeUn: return ICompareLessOrEqualUI (n, m);
|
|
case Condition.Ge: return ICompareGreaterOrEqual (n, m);
|
|
case Condition.Lt: return ICompareLess (n, m);
|
|
case Condition.Gt: return ICompareGreater (n, m);
|
|
case Condition.Le: return ICompareLessOrEqual (n, m);
|
|
}
|
|
}
|
|
else if (cmpName == InstName.Adds && _optOpLastCompare is IOpCodeAluImm op)
|
|
{
|
|
// There are several limitations that needs to be taken into account for CMN comparisons:
|
|
// - The unsigned comparisons are not valid, as they depend on the
|
|
// carry flag value, and they will have different values for addition and
|
|
// subtraction. For addition, it's carry, and for subtraction, it's borrow.
|
|
// So, we need to make sure we're not doing a unsigned compare for the CMN case.
|
|
// - We can only do the optimization for the immediate variants,
|
|
// because when the second operand value is exactly INT_MIN, we can't
|
|
// negate the value as theres no positive counterpart.
|
|
// Such invalid values can't be encoded on the immediate encodings.
|
|
if (op.RegisterSize == RegisterSize.Int32)
|
|
{
|
|
m = Const((int)-op.Immediate);
|
|
}
|
|
else
|
|
{
|
|
m = Const(-op.Immediate);
|
|
}
|
|
|
|
switch (condition)
|
|
{
|
|
case Condition.Eq: return ICompareEqual (n, m);
|
|
case Condition.Ne: return ICompareNotEqual (n, m);
|
|
case Condition.Ge: return ICompareGreaterOrEqual(n, m);
|
|
case Condition.Lt: return ICompareLess (n, m);
|
|
case Condition.Gt: return ICompareGreater (n, m);
|
|
case Condition.Le: return ICompareLessOrEqual (n, m);
|
|
}
|
|
}
|
|
|
|
return default;
|
|
}
|
|
}
|
|
} |