diff --git a/src/dynarmic/backend/x64/a32_emit_x64.cpp b/src/dynarmic/backend/x64/a32_emit_x64.cpp index 056639b6..4ed451c9 100644 --- a/src/dynarmic/backend/x64/a32_emit_x64.cpp +++ b/src/dynarmic/backend/x64/a32_emit_x64.cpp @@ -100,7 +100,7 @@ A32EmitX64::BlockDescriptor A32EmitX64::Emit(IR::Block& block) { code.EnableWriting(); SCOPE_EXIT { code.DisableWriting(); }; - static const std::vector gpr_order = [this] { + const std::vector gpr_order = [this] { std::vector gprs{any_gpr}; if (conf.page_table) { gprs.erase(std::find(gprs.begin(), gprs.end(), HostLoc::R14)); @@ -1033,10 +1033,12 @@ void EmitWriteMemoryMov(BlockOfCode& code, const Xbyak::RegExp& addr, const Xbya } // anonymous namespace template -void A32EmitX64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst) { +void A32EmitX64::EmitMemoryRead(A32EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); + const auto fastmem_marker = ShouldFastmem(ctx, inst); - if (!conf.page_table) { + if (!conf.page_table && !fastmem_marker) { + // Neither fastmem nor page table: Use callbacks ctx.reg_alloc.HostCall(inst, {}, args[0]); Devirtualize(conf.callbacks).EmitCall(code); return; @@ -1047,22 +1049,27 @@ void A32EmitX64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst) { const auto wrapped_fn = read_fallbacks[std::make_tuple(bitsize, vaddr.getIdx(), value.getIdx())]; - if (const auto marker = ShouldFastmem(ctx, inst)) { + if (fastmem_marker) { + // Use fastmem + const auto src_ptr = r13 + vaddr; + const auto location = code.getCurr(); - EmitReadMemoryMov(code, value, r13 + vaddr); + EmitReadMemoryMov(code, value, src_ptr); fastmem_patch_info.emplace( Common::BitCast(location), FastmemPatchInfo{ Common::BitCast(code.getCurr()), Common::BitCast(wrapped_fn), - *marker, + *fastmem_marker, }); ctx.reg_alloc.DefineValue(inst, value); return; } + // Use page table + ASSERT(conf.page_table); Xbyak::Label abort, end; const auto src_ptr = EmitVAddrLookup(code, ctx, bitsize, abort, vaddr); @@ -1079,10 +1086,12 @@ void A32EmitX64::ReadMemory(A32EmitContext& ctx, IR::Inst* inst) { } template -void A32EmitX64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst) { +void A32EmitX64::EmitMemoryWrite(A32EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); + const auto fastmem_marker = ShouldFastmem(ctx, inst); - if (!conf.page_table) { + if (!conf.page_table && !fastmem_marker) { + // Neither fastmem nor page table: Use callbacks ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]); Devirtualize(conf.callbacks).EmitCall(code); return; @@ -1093,21 +1102,26 @@ void A32EmitX64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst) { const auto wrapped_fn = write_fallbacks[std::make_tuple(bitsize, vaddr.getIdx(), value.getIdx())]; - if (const auto marker = ShouldFastmem(ctx, inst)) { + if (fastmem_marker) { + // Use fastmem + const auto dest_ptr = r13 + vaddr; + const auto location = code.getCurr(); - EmitWriteMemoryMov(code, r13 + vaddr, value); + EmitWriteMemoryMov(code, dest_ptr, value); fastmem_patch_info.emplace( Common::BitCast(location), FastmemPatchInfo{ Common::BitCast(code.getCurr()), Common::BitCast(wrapped_fn), - *marker, + *fastmem_marker, }); return; } + // Use page table + ASSERT(conf.page_table); Xbyak::Label abort, end; const auto dest_ptr = EmitVAddrLookup(code, ctx, bitsize, abort, vaddr); @@ -1122,35 +1136,35 @@ void A32EmitX64::WriteMemory(A32EmitContext& ctx, IR::Inst* inst) { } void A32EmitX64::EmitA32ReadMemory8(A32EmitContext& ctx, IR::Inst* inst) { - ReadMemory<8, &A32::UserCallbacks::MemoryRead8>(ctx, inst); + EmitMemoryRead<8, &A32::UserCallbacks::MemoryRead8>(ctx, inst); } void A32EmitX64::EmitA32ReadMemory16(A32EmitContext& ctx, IR::Inst* inst) { - ReadMemory<16, &A32::UserCallbacks::MemoryRead16>(ctx, inst); + EmitMemoryRead<16, &A32::UserCallbacks::MemoryRead16>(ctx, inst); } void A32EmitX64::EmitA32ReadMemory32(A32EmitContext& ctx, IR::Inst* inst) { - ReadMemory<32, &A32::UserCallbacks::MemoryRead32>(ctx, inst); + EmitMemoryRead<32, &A32::UserCallbacks::MemoryRead32>(ctx, inst); } void A32EmitX64::EmitA32ReadMemory64(A32EmitContext& ctx, IR::Inst* inst) { - ReadMemory<64, &A32::UserCallbacks::MemoryRead64>(ctx, inst); + EmitMemoryRead<64, &A32::UserCallbacks::MemoryRead64>(ctx, inst); } void A32EmitX64::EmitA32WriteMemory8(A32EmitContext& ctx, IR::Inst* inst) { - WriteMemory<8, &A32::UserCallbacks::MemoryWrite8>(ctx, inst); + EmitMemoryWrite<8, &A32::UserCallbacks::MemoryWrite8>(ctx, inst); } void A32EmitX64::EmitA32WriteMemory16(A32EmitContext& ctx, IR::Inst* inst) { - WriteMemory<16, &A32::UserCallbacks::MemoryWrite16>(ctx, inst); + EmitMemoryWrite<16, &A32::UserCallbacks::MemoryWrite16>(ctx, inst); } void A32EmitX64::EmitA32WriteMemory32(A32EmitContext& ctx, IR::Inst* inst) { - WriteMemory<32, &A32::UserCallbacks::MemoryWrite32>(ctx, inst); + EmitMemoryWrite<32, &A32::UserCallbacks::MemoryWrite32>(ctx, inst); } void A32EmitX64::EmitA32WriteMemory64(A32EmitContext& ctx, IR::Inst* inst) { - WriteMemory<64, &A32::UserCallbacks::MemoryWrite64>(ctx, inst); + EmitMemoryWrite<64, &A32::UserCallbacks::MemoryWrite64>(ctx, inst); } template diff --git a/src/dynarmic/backend/x64/a32_emit_x64.h b/src/dynarmic/backend/x64/a32_emit_x64.h index f9836c63..93ab4b87 100644 --- a/src/dynarmic/backend/x64/a32_emit_x64.h +++ b/src/dynarmic/backend/x64/a32_emit_x64.h @@ -106,9 +106,9 @@ protected: // Memory access helpers template - void ReadMemory(A32EmitContext& ctx, IR::Inst* inst); + void EmitMemoryRead(A32EmitContext& ctx, IR::Inst* inst); template - void WriteMemory(A32EmitContext& ctx, IR::Inst* inst); + void EmitMemoryWrite(A32EmitContext& ctx, IR::Inst* inst); template void ExclusiveReadMemory(A32EmitContext& ctx, IR::Inst* inst); template diff --git a/src/dynarmic/backend/x64/a64_emit_x64.cpp b/src/dynarmic/backend/x64/a64_emit_x64.cpp index a1078524..e4706f38 100644 --- a/src/dynarmic/backend/x64/a64_emit_x64.cpp +++ b/src/dynarmic/backend/x64/a64_emit_x64.cpp @@ -23,6 +23,7 @@ #include "dynarmic/common/bit_util.h" #include "dynarmic/common/common_types.h" #include "dynarmic/common/scope_exit.h" +#include "dynarmic/common/x64_disassemble.h" #include "dynarmic/frontend/A64/location_descriptor.h" #include "dynarmic/frontend/A64/types.h" #include "dynarmic/interface/exclusive_monitor.h" @@ -60,6 +61,10 @@ A64EmitX64::A64EmitX64(BlockOfCode& code, A64::UserConfig conf, A64::Jit* jit_in GenTerminalHandlers(); code.PreludeComplete(); ClearFastDispatchTable(); + + exception_handler.SetFastmemCallback([this](u64 rip_){ + return FastmemCallback(rip_); + }); } A64EmitX64::~A64EmitX64() = default; @@ -68,11 +73,14 @@ A64EmitX64::BlockDescriptor A64EmitX64::Emit(IR::Block& block) { code.EnableWriting(); SCOPE_EXIT { code.DisableWriting(); }; - static const std::vector gpr_order = [this] { + const std::vector gpr_order = [this] { std::vector gprs{any_gpr}; if (conf.page_table) { gprs.erase(std::find(gprs.begin(), gprs.end(), HostLoc::R14)); } + if (conf.fastmem_pointer) { + gprs.erase(std::find(gprs.begin(), gprs.end(), HostLoc::R13)); + } return gprs; }(); @@ -737,6 +745,40 @@ void A64EmitX64::EmitA64ClearExclusive(A64EmitContext&, IR::Inst*) { code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(0)); } +std::optional A64EmitX64::ShouldFastmem(A64EmitContext& ctx, IR::Inst* inst) const { + if (!conf.fastmem_pointer || !exception_handler.SupportsFastmem()) { + return std::nullopt; + } + + const auto marker = std::make_tuple(ctx.Location(), ctx.GetInstOffset(inst)); + if (do_not_fastmem.count(marker) > 0) { + return std::nullopt; + } + return marker; +} + +FakeCall A64EmitX64::FastmemCallback(u64 rip_) { + const auto iter = fastmem_patch_info.find(rip_); + + if (iter == fastmem_patch_info.end()) { + fmt::print("dynarmic: Segfault happened within JITted code at rip = {:016x}\n", rip_); + fmt::print("Segfault wasn't at a fastmem patch location!\n"); + fmt::print("Now dumping code.......\n\n"); + Common::DumpDisassembledX64((void*)(rip_ & ~u64(0xFFF)), 0x1000); + ASSERT_FALSE("iter != fastmem_patch_info.end()"); + } + + if (conf.recompile_on_fastmem_failure) { + const auto marker = iter->second.marker; + do_not_fastmem.emplace(marker); + InvalidateBasicBlocks({std::get<0>(marker)}); + } + FakeCall ret; + ret.call_rip = iter->second.callback; + ret.ret_rip = iter->second.resume_rip; + return ret; +} + namespace { constexpr size_t page_bits = 12; @@ -838,6 +880,39 @@ Xbyak::RegExp EmitVAddrLookup(BlockOfCode& code, A64EmitContext& ctx, size_t bit return page + tmp; } +Xbyak::RegExp EmitFastmemVAddr(BlockOfCode& code, A64EmitContext& ctx, Xbyak::Label& abort, Xbyak::Reg64 vaddr) { + const size_t unused_top_bits = 64 - ctx.conf.page_table_address_space_bits; + + if (unused_top_bits == 0) { + return r13 + vaddr; + } else if (ctx.conf.silently_mirror_page_table) { + Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr(); + if (unused_top_bits < 32) { + code.mov(tmp, vaddr); + code.shl(tmp, int(unused_top_bits)); + code.shr(tmp, int(unused_top_bits)); + } else if (unused_top_bits == 32) { + code.mov(tmp.cvt32(), vaddr.cvt32()); + } else { + code.mov(tmp.cvt32(), vaddr.cvt32()); + code.and_(tmp, u32((1 << ctx.conf.page_table_address_space_bits) - 1)); + } + return r13 + tmp; + } else { + if (ctx.conf.page_table_address_space_bits < 32) { + code.test(vaddr, u32(-(1 << ctx.conf.page_table_address_space_bits))); + code.jnz(abort, code.T_NEAR); + } else { + // TODO: Consider having TEST as above but coalesce 64-bit constant in register allocator + Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr(); + code.mov(tmp, vaddr); + code.shr(tmp, int(ctx.conf.page_table_address_space_bits)); + code.jnz(abort, code.T_NEAR); + } + return r13 + vaddr; + } +} + template void EmitReadMemoryMov(BlockOfCode& code, const Xbyak::Reg64& value, const Xbyak::RegExp& addr) { switch (bitsize) { @@ -880,9 +955,17 @@ void EmitWriteMemoryMov(BlockOfCode& code, const Xbyak::RegExp& addr, const Xbya } // namespace -template -void A64EmitX64::EmitDirectPageTableMemoryRead(A64EmitContext& ctx, IR::Inst* inst) { +template +void A64EmitX64::EmitMemoryRead(A64EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); + const auto fastmem_marker = ShouldFastmem(ctx, inst); + + if (!conf.page_table && !fastmem_marker) { + // Neither fastmem nor page table: Use callbacks + ctx.reg_alloc.HostCall(inst, {}, args[0]); + Devirtualize(conf.callbacks).EmitCall(code); + return; + } const Xbyak::Reg64 vaddr = ctx.reg_alloc.UseGpr(args[0]); const Xbyak::Reg64 value = ctx.reg_alloc.ScratchGpr(); @@ -891,8 +974,27 @@ void A64EmitX64::EmitDirectPageTableMemoryRead(A64EmitContext& ctx, IR::Inst* in Xbyak::Label abort, end; - const auto src_ptr = EmitVAddrLookup(code, ctx, bitsize, abort, vaddr); - EmitReadMemoryMov(code, value, src_ptr); + if (fastmem_marker) { + // Use fastmem + const auto src_ptr = EmitFastmemVAddr(code, ctx, abort, vaddr); + + const auto location = code.getCurr(); + EmitReadMemoryMov(code, value, src_ptr); + + fastmem_patch_info.emplace( + Common::BitCast(location), + FastmemPatchInfo{ + Common::BitCast(code.getCurr()), + Common::BitCast(wrapped_fn), + *fastmem_marker, + } + ); + } else { + // Use page table + ASSERT(conf.page_table); + const auto src_ptr = EmitVAddrLookup(code, ctx, bitsize, abort, vaddr); + EmitReadMemoryMov(code, value, src_ptr); + } code.L(end); code.SwitchToFarCode(); @@ -904,9 +1006,17 @@ void A64EmitX64::EmitDirectPageTableMemoryRead(A64EmitContext& ctx, IR::Inst* in ctx.reg_alloc.DefineValue(inst, value); } -template -void A64EmitX64::EmitDirectPageTableMemoryWrite(A64EmitContext& ctx, IR::Inst* inst) { +template +void A64EmitX64::EmitMemoryWrite(A64EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); + const auto fastmem_marker = ShouldFastmem(ctx, inst); + + if (!conf.page_table && !fastmem_marker) { + // Neither fastmem nor page table: Use callbacks + ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]); + Devirtualize(conf.callbacks).EmitCall(code); + return; + } const Xbyak::Reg64 vaddr = ctx.reg_alloc.UseGpr(args[0]); const Xbyak::Reg64 value = ctx.reg_alloc.UseGpr(args[1]); @@ -915,8 +1025,27 @@ void A64EmitX64::EmitDirectPageTableMemoryWrite(A64EmitContext& ctx, IR::Inst* i Xbyak::Label abort, end; - const auto dest_ptr = EmitVAddrLookup(code, ctx, bitsize, abort, vaddr); - EmitWriteMemoryMov(code, dest_ptr, value); + if (fastmem_marker) { + // Use fastmem + const auto dest_ptr = EmitFastmemVAddr(code, ctx, abort, vaddr); + + const auto location = code.getCurr(); + EmitWriteMemoryMov(code, dest_ptr, value); + + fastmem_patch_info.emplace( + Common::BitCast(location), + FastmemPatchInfo{ + Common::BitCast(code.getCurr()), + Common::BitCast(wrapped_fn), + *fastmem_marker, + } + ); + } else { + // Use page table + ASSERT(conf.page_table); + const auto dest_ptr = EmitVAddrLookup(code, ctx, bitsize, abort, vaddr); + EmitWriteMemoryMov(code, dest_ptr, value); + } code.L(end); code.SwitchToFarCode(); @@ -927,47 +1056,19 @@ void A64EmitX64::EmitDirectPageTableMemoryWrite(A64EmitContext& ctx, IR::Inst* i } void A64EmitX64::EmitA64ReadMemory8(A64EmitContext& ctx, IR::Inst* inst) { - if (conf.page_table) { - EmitDirectPageTableMemoryRead<8>(ctx, inst); - return; - } - - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(inst, {}, args[0]); - Devirtualize<&A64::UserCallbacks::MemoryRead8>(conf.callbacks).EmitCall(code); + EmitMemoryRead<8, &A64::UserCallbacks::MemoryRead8>(ctx, inst); } void A64EmitX64::EmitA64ReadMemory16(A64EmitContext& ctx, IR::Inst* inst) { - if (conf.page_table) { - EmitDirectPageTableMemoryRead<16>(ctx, inst); - return; - } - - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(inst, {}, args[0]); - Devirtualize<&A64::UserCallbacks::MemoryRead16>(conf.callbacks).EmitCall(code); + EmitMemoryRead<16, &A64::UserCallbacks::MemoryRead16>(ctx, inst); } void A64EmitX64::EmitA64ReadMemory32(A64EmitContext& ctx, IR::Inst* inst) { - if (conf.page_table) { - EmitDirectPageTableMemoryRead<32>(ctx, inst); - return; - } - - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(inst, {}, args[0]); - Devirtualize<&A64::UserCallbacks::MemoryRead32>(conf.callbacks).EmitCall(code); + EmitMemoryRead<32, &A64::UserCallbacks::MemoryRead32>(ctx, inst); } void A64EmitX64::EmitA64ReadMemory64(A64EmitContext& ctx, IR::Inst* inst) { - if (conf.page_table) { - EmitDirectPageTableMemoryRead<64>(ctx, inst); - return; - } - - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(inst, {}, args[0]); - Devirtualize<&A64::UserCallbacks::MemoryRead64>(conf.callbacks).EmitCall(code); + EmitMemoryRead<64, &A64::UserCallbacks::MemoryRead64>(ctx, inst); } void A64EmitX64::EmitA64ReadMemory128(A64EmitContext& ctx, IR::Inst* inst) { @@ -999,47 +1100,19 @@ void A64EmitX64::EmitA64ReadMemory128(A64EmitContext& ctx, IR::Inst* inst) { } void A64EmitX64::EmitA64WriteMemory8(A64EmitContext& ctx, IR::Inst* inst) { - if (conf.page_table) { - EmitDirectPageTableMemoryWrite<8>(ctx, inst); - return; - } - - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]); - Devirtualize<&A64::UserCallbacks::MemoryWrite8>(conf.callbacks).EmitCall(code); + EmitMemoryWrite<8, &A64::UserCallbacks::MemoryWrite8>(ctx, inst); } void A64EmitX64::EmitA64WriteMemory16(A64EmitContext& ctx, IR::Inst* inst) { - if (conf.page_table) { - EmitDirectPageTableMemoryWrite<16>(ctx, inst); - return; - } - - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]); - Devirtualize<&A64::UserCallbacks::MemoryWrite16>(conf.callbacks).EmitCall(code); + EmitMemoryWrite<16, &A64::UserCallbacks::MemoryWrite16>(ctx, inst); } void A64EmitX64::EmitA64WriteMemory32(A64EmitContext& ctx, IR::Inst* inst) { - if (conf.page_table) { - EmitDirectPageTableMemoryWrite<32>(ctx, inst); - return; - } - - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]); - Devirtualize<&A64::UserCallbacks::MemoryWrite32>(conf.callbacks).EmitCall(code); + EmitMemoryWrite<32, &A64::UserCallbacks::MemoryWrite32>(ctx, inst); } void A64EmitX64::EmitA64WriteMemory64(A64EmitContext& ctx, IR::Inst* inst) { - if (conf.page_table) { - EmitDirectPageTableMemoryWrite<64>(ctx, inst); - return; - } - - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]); - Devirtualize<&A64::UserCallbacks::MemoryWrite64>(conf.callbacks).EmitCall(code); + EmitMemoryWrite<64, &A64::UserCallbacks::MemoryWrite64>(ctx, inst); } void A64EmitX64::EmitA64WriteMemory128(A64EmitContext& ctx, IR::Inst* inst) { diff --git a/src/dynarmic/backend/x64/a64_emit_x64.h b/src/dynarmic/backend/x64/a64_emit_x64.h index 8d886038..32c14a6b 100644 --- a/src/dynarmic/backend/x64/a64_emit_x64.h +++ b/src/dynarmic/backend/x64/a64_emit_x64.h @@ -78,15 +78,6 @@ protected: FastDispatchEntry& (*fast_dispatch_table_lookup)(u64) = nullptr; void GenTerminalHandlers(); - template - void EmitDirectPageTableMemoryRead(A64EmitContext& ctx, IR::Inst* inst); - template - void EmitDirectPageTableMemoryWrite(A64EmitContext& ctx, IR::Inst* inst); - template - void EmitExclusiveReadMemory(A64EmitContext& ctx, IR::Inst* inst); - template - void EmitExclusiveWriteMemory(A64EmitContext& ctx, IR::Inst* inst); - // Microinstruction emitters void EmitPushRSB(EmitContext& ctx, IR::Inst* inst); #define OPCODE(...) @@ -100,6 +91,28 @@ protected: // Helpers std::string LocationDescriptorToFriendlyName(const IR::LocationDescriptor&) const override; + // Fastmem information + using DoNotFastmemMarker = std::tuple; + struct FastmemPatchInfo { + u64 resume_rip; + u64 callback; + DoNotFastmemMarker marker; + }; + tsl::robin_map fastmem_patch_info; + std::set do_not_fastmem; + std::optional ShouldFastmem(A64EmitContext& ctx, IR::Inst* inst) const; + FakeCall FastmemCallback(u64 rip); + + // Memory access helpers + template + void EmitMemoryRead(A64EmitContext& ctx, IR::Inst* inst); + template + void EmitMemoryWrite(A64EmitContext& ctx, IR::Inst* inst); + template + void EmitExclusiveReadMemory(A64EmitContext& ctx, IR::Inst* inst); + template + void EmitExclusiveWriteMemory(A64EmitContext& ctx, IR::Inst* inst); + // Terminal instruction emitters void EmitTerminalImpl(IR::Term::Interpret terminal, IR::LocationDescriptor initial_location, bool is_single_step) override; void EmitTerminalImpl(IR::Term::ReturnToDispatch terminal, IR::LocationDescriptor initial_location, bool is_single_step) override; diff --git a/src/dynarmic/backend/x64/a64_interface.cpp b/src/dynarmic/backend/x64/a64_interface.cpp index a758672a..940797d8 100644 --- a/src/dynarmic/backend/x64/a64_interface.cpp +++ b/src/dynarmic/backend/x64/a64_interface.cpp @@ -38,6 +38,9 @@ static std::function GenRCP(const A64::UserConfig& conf) { if (conf.page_table) { code.mov(code.r14, Common::BitCast(conf.page_table)); } + if (conf.fastmem_pointer) { + code.mov(code.r13, Common::BitCast(conf.fastmem_pointer)); + } }; } diff --git a/src/dynarmic/interface/A32/config.h b/src/dynarmic/interface/A32/config.h index 1839eb1a..dbf64c6b 100644 --- a/src/dynarmic/interface/A32/config.h +++ b/src/dynarmic/interface/A32/config.h @@ -173,6 +173,8 @@ struct UserConfig { void* fastmem_pointer = nullptr; /// Determines if instructions that pagefault should cause recompilation of that block /// with fastmem disabled. + /// Recompiled code will use the page_table if this is available, otherwise memory + /// accesses will hit the memory callbacks. bool recompile_on_fastmem_failure = true; // Coprocessors diff --git a/src/dynarmic/interface/A64/config.h b/src/dynarmic/interface/A64/config.h index 5535580b..baa08577 100644 --- a/src/dynarmic/interface/A64/config.h +++ b/src/dynarmic/interface/A64/config.h @@ -202,7 +202,7 @@ struct UserConfig { void** page_table = nullptr; /// Declares how many valid address bits are there in virtual addresses. /// Determines the size of page_table. Valid values are between 12 and 64 inclusive. - /// This is only used if page_table is not nullptr. + /// This is only used if page_table or fastmem_pointer is not nullptr. size_t page_table_address_space_bits = 36; /// Masks out the first N bits in host pointers from the page table. /// The intention behind this is to allow users of Dynarmic to pack attributes in the @@ -213,7 +213,7 @@ struct UserConfig { /// page table. If true, Dynarmic will silently mirror page_table's address space. If /// false, accessing memory outside of page_table bounds will result in a call to the /// relevant memory callback. - /// This is only used if page_table is not nullptr. + /// This is only used if page_table or fastmem_pointer is not nullptr. bool silently_mirror_page_table = true; /// Determines if the pointer in the page_table shall be offseted locally or globally. /// 'false' will access page_table[addr >> bits][addr & mask] @@ -232,6 +232,18 @@ struct UserConfig { /// page boundary. bool only_detect_misalignment_via_page_table_on_page_boundary = false; + /// Fastmem Pointer + /// This should point to the beginning of a 2^page_table_address_space_bits bytes + /// address space which is in arranged just like what you wish for emulated memory to + /// be. If the host page faults on an address, the JIT will fallback to calling the + /// MemoryRead*/MemoryWrite* callbacks. + void* fastmem_pointer = nullptr; + /// Determines if instructions that pagefault should cause recompilation of that block + /// with fastmem disabled. + /// Recompiled code will use the page_table if this is available, otherwise memory + /// accesses will hit the memory callbacks. + bool recompile_on_fastmem_failure = true; + /// This option relates to translation. Generally when we run into an unpredictable /// instruction the ExceptionRaised callback is called. If this is true, we define /// definite behaviour for some unpredictable instructions.