mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-23 02:42:09 +00:00
kern: fuck the KPolice^H^H^H^H^H^HPageGroups
This commit is contained in:
parent
dc7862882f
commit
96937a611d
8 changed files with 493 additions and 231 deletions
|
@ -36,6 +36,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
void Finalize() { m_page_table.Finalize(); }
|
void Finalize() { m_page_table.Finalize(); }
|
||||||
|
|
||||||
|
ALWAYS_INLINE KScopedLightLock AcquireDeviceMapLock() {
|
||||||
|
return m_page_table.AcquireDeviceMapLock();
|
||||||
|
}
|
||||||
|
|
||||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||||
return m_page_table.SetMemoryPermission(addr, size, perm);
|
return m_page_table.SetMemoryPermission(addr, size, perm);
|
||||||
}
|
}
|
||||||
|
@ -148,22 +152,30 @@ namespace ams::kern::arch::arm64 {
|
||||||
return m_page_table.WriteDebugIoMemory(address, buffer, size);
|
return m_page_table.WriteDebugIoMemory(address, buffer, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
Result LockForMapDeviceAddressSpace(KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||||
return m_page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned);
|
return m_page_table.LockForMapDeviceAddressSpace(address, size, perm, is_aligned);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||||
|
return m_page_table.LockForUnmapDeviceAddressSpace(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||||
return m_page_table.UnlockForDeviceAddressSpace(address, size);
|
return m_page_table.UnlockForDeviceAddressSpace(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) {
|
|
||||||
return m_page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
||||||
return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
|
return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||||
|
return m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm, is_aligned);
|
||||||
|
}
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size) {
|
||||||
|
return m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size);
|
||||||
|
}
|
||||||
|
|
||||||
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
||||||
return m_page_table.LockForIpcUserBuffer(out, address, size);
|
return m_page_table.LockForIpcUserBuffer(out, address, size);
|
||||||
}
|
}
|
||||||
|
@ -188,6 +200,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
return m_page_table.UnlockForCodeMemory(address, size, pg);
|
return m_page_table.UnlockForCodeMemory(address, size, pg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size) {
|
||||||
|
return m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size);
|
||||||
|
}
|
||||||
|
|
||||||
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||||
return m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
return m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||||
}
|
}
|
||||||
|
@ -240,6 +256,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
return m_page_table.UnmapPhysicalMemoryUnsafe(address, size);
|
return m_page_table.UnmapPhysicalMemoryUnsafe(address, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KProcessPageTable &src_page_table, KProcessAddress src_address) {
|
||||||
|
return m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table, src_address);
|
||||||
|
}
|
||||||
|
|
||||||
void DumpMemoryBlocks() const {
|
void DumpMemoryBlocks() const {
|
||||||
return m_page_table.DumpMemoryBlocks();
|
return m_page_table.DumpMemoryBlocks();
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,8 +69,8 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
Result Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size);
|
Result Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size);
|
||||||
Result Detach(ams::svc::DeviceName device_name);
|
Result Detach(ams::svc::DeviceName device_name);
|
||||||
|
|
||||||
Result Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings);
|
Result Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings);
|
||||||
Result Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address);
|
Result Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address);
|
||||||
|
|
||||||
void Unmap(KDeviceVirtualAddress device_address, size_t size) {
|
void Unmap(KDeviceVirtualAddress device_address, size_t size) {
|
||||||
return this->UnmapImpl(device_address, size, false);
|
return this->UnmapImpl(device_address, size, false);
|
||||||
|
@ -78,12 +78,11 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
private:
|
private:
|
||||||
Result MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm);
|
Result MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm);
|
||||||
|
|
||||||
Result MapImpl(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm);
|
Result MapImpl(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned);
|
||||||
void UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force);
|
void UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force);
|
||||||
|
|
||||||
bool IsFree(KDeviceVirtualAddress address, u64 size) const;
|
bool IsFree(KDeviceVirtualAddress address, u64 size) const;
|
||||||
Result MakePageGroup(KPageGroup *out, KDeviceVirtualAddress address, u64 size) const;
|
bool Compare(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) const;
|
||||||
bool Compare(const KPageGroup &pg, KDeviceVirtualAddress device_address) const;
|
|
||||||
public:
|
public:
|
||||||
static void Initialize();
|
static void Initialize();
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,13 @@ namespace ams::kern {
|
||||||
public:
|
public:
|
||||||
using TraversalEntry = KPageTableImpl::TraversalEntry;
|
using TraversalEntry = KPageTableImpl::TraversalEntry;
|
||||||
using TraversalContext = KPageTableImpl::TraversalContext;
|
using TraversalContext = KPageTableImpl::TraversalContext;
|
||||||
|
|
||||||
|
struct MemoryRange {
|
||||||
|
KVirtualAddress address;
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
void Close();
|
||||||
|
};
|
||||||
protected:
|
protected:
|
||||||
enum MemoryFillValue {
|
enum MemoryFillValue {
|
||||||
MemoryFillValue_Zero = 0,
|
MemoryFillValue_Zero = 0,
|
||||||
|
@ -155,6 +162,7 @@ namespace ams::kern {
|
||||||
size_t m_mapped_ipc_server_memory{};
|
size_t m_mapped_ipc_server_memory{};
|
||||||
mutable KLightLock m_general_lock{};
|
mutable KLightLock m_general_lock{};
|
||||||
mutable KLightLock m_map_physical_memory_lock{};
|
mutable KLightLock m_map_physical_memory_lock{};
|
||||||
|
KLightLock m_device_map_lock{};
|
||||||
KPageTableImpl m_impl{};
|
KPageTableImpl m_impl{};
|
||||||
KMemoryBlockManager m_memory_block_manager{};
|
KMemoryBlockManager m_memory_block_manager{};
|
||||||
u32 m_allocate_option{};
|
u32 m_allocate_option{};
|
||||||
|
@ -199,6 +207,10 @@ namespace ams::kern {
|
||||||
return this->CanContain(addr, size, KMemoryState_AliasCode);
|
return this->CanContain(addr, size, KMemoryState_AliasCode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE KScopedLightLock AcquireDeviceMapLock() {
|
||||||
|
return KScopedLightLock(m_device_map_lock);
|
||||||
|
}
|
||||||
|
|
||||||
KProcessAddress GetRegionAddress(KMemoryState state) const;
|
KProcessAddress GetRegionAddress(KMemoryState state) const;
|
||||||
size_t GetRegionSize(KMemoryState state) const;
|
size_t GetRegionSize(KMemoryState state) const;
|
||||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
|
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
|
||||||
|
@ -290,6 +302,8 @@ namespace ams::kern {
|
||||||
Result MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages);
|
Result MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages);
|
||||||
bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages);
|
bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages);
|
||||||
|
|
||||||
|
Result GetContiguousMemoryRangeWithState(MemoryRange *out, KProcessAddress address, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr);
|
||||||
|
|
||||||
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||||
|
|
||||||
Result MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
Result MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
|
||||||
|
@ -367,12 +381,15 @@ namespace ams::kern {
|
||||||
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size);
|
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size);
|
||||||
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size);
|
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size);
|
||||||
|
|
||||||
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned);
|
Result LockForMapDeviceAddressSpace(KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned);
|
||||||
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
|
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size);
|
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
|
||||||
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size);
|
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size);
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned);
|
||||||
|
Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange *out, KProcessAddress address, size_t size);
|
||||||
|
|
||||||
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size);
|
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size);
|
||||||
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
|
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
@ -381,6 +398,8 @@ namespace ams::kern {
|
||||||
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size);
|
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size);
|
||||||
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg);
|
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg);
|
||||||
|
|
||||||
|
Result OpenMemoryRangeForProcessCacheOperation(MemoryRange *out, KProcessAddress address, size_t size);
|
||||||
|
|
||||||
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr);
|
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr);
|
||||||
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr);
|
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr);
|
||||||
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr);
|
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr);
|
||||||
|
@ -398,6 +417,8 @@ namespace ams::kern {
|
||||||
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
|
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
|
||||||
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
|
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
|
||||||
|
|
||||||
|
Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase &src_pt, KProcessAddress src_address);
|
||||||
|
|
||||||
void DumpMemoryBlocksLocked() const {
|
void DumpMemoryBlocksLocked() const {
|
||||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
m_memory_block_manager.DumpBlocks();
|
m_memory_block_manager.DumpBlocks();
|
||||||
|
|
|
@ -216,6 +216,12 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
return (m_value & (1u << n));
|
return (m_value & (1u << n));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<Bit... Bits>
|
||||||
|
constexpr ALWAYS_INLINE u32 SelectBits() const {
|
||||||
|
constexpr u32 Mask = ((1u << Bits) | ...);
|
||||||
|
return m_value & Mask;
|
||||||
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool GetBit(Bit n) const {
|
constexpr ALWAYS_INLINE bool GetBit(Bit n) const {
|
||||||
return this->SelectBit(n) != 0;
|
return this->SelectBit(n) != 0;
|
||||||
}
|
}
|
||||||
|
@ -242,12 +248,14 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBit(Bit_NonSecure); }
|
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBit(Bit_NonSecure); }
|
||||||
constexpr ALWAYS_INLINE bool IsWriteable() const { return this->GetBit(Bit_Writeable); }
|
constexpr ALWAYS_INLINE bool IsWriteable() const { return this->GetBit(Bit_Writeable); }
|
||||||
constexpr ALWAYS_INLINE bool IsReadable() const { return this->GetBit(Bit_Readable); }
|
constexpr ALWAYS_INLINE bool IsReadable() const { return this->GetBit(Bit_Readable); }
|
||||||
constexpr ALWAYS_INLINE bool IsValid() const { return this->IsWriteable() || this->IsReadable(); }
|
constexpr ALWAYS_INLINE bool IsValid() const { return this->SelectBits<Bit_Readable, Bit_Writeable>(); }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE u32 GetAttributes() const { return this->SelectBit(Bit_NonSecure) | this->SelectBit(Bit_Writeable) | this->SelectBit(Bit_Readable); }
|
constexpr ALWAYS_INLINE u32 GetAttributes() const { return this->SelectBits<Bit_Readable, Bit_Writeable, Bit_NonSecure>(); }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { return (static_cast<u64>(m_value) << DevicePageBits) & PhysicalAddressMask; }
|
constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { return (static_cast<u64>(m_value) << DevicePageBits) & PhysicalAddressMask; }
|
||||||
|
|
||||||
|
|
||||||
|
ALWAYS_INLINE void InvalidateAttributes() { this->SetValue(m_value & ~(0xCu << 28)); }
|
||||||
ALWAYS_INLINE void Invalidate() { this->SetValue(0); }
|
ALWAYS_INLINE void Invalidate() { this->SetValue(0); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -847,7 +855,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Forcibly unmap all pages. */
|
/* Forcibly unmap all pages. */
|
||||||
this->UnmapImpl(0, (1ul << DeviceVirtualAddressBits), true);
|
this->UnmapImpl(0, (1ul << DeviceVirtualAddressBits), false);
|
||||||
|
|
||||||
/* Release all asids. */
|
/* Release all asids. */
|
||||||
for (size_t i = 0; i < TableCount; ++i) {
|
for (size_t i = 0; i < TableCount; ++i) {
|
||||||
|
@ -1117,12 +1125,11 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KDevicePageTable::MapImpl(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm) {
|
Result KDevicePageTable::MapImpl(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
|
||||||
/* Clear the output size. */
|
/* Clear the output size. */
|
||||||
*out_mapped_size = 0;
|
*out_mapped_size = 0;
|
||||||
|
|
||||||
/* Get the size, and validate the address. */
|
/* Get the size, and validate the address. */
|
||||||
const u64 size = pg.GetNumPages() * PageSize;
|
|
||||||
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
|
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
|
||||||
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
||||||
|
|
||||||
|
@ -1130,28 +1137,33 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
R_UNLESS(this->IsFree(device_address, size), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(this->IsFree(device_address, size), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
/* Ensure that if we fail, we unmap anything we mapped. */
|
/* Ensure that if we fail, we unmap anything we mapped. */
|
||||||
auto unmap_guard = SCOPE_GUARD { this->UnmapImpl(device_address, size, true); };
|
auto unmap_guard = SCOPE_GUARD { this->UnmapImpl(device_address, size, false); };
|
||||||
|
|
||||||
/* Iterate, mapping device pages. */
|
/* Iterate, mapping device pages. */
|
||||||
KDeviceVirtualAddress cur_addr = device_address;
|
KDeviceVirtualAddress cur_addr = device_address;
|
||||||
for (auto it = pg.begin(); it != pg.end(); ++it) {
|
while (true) {
|
||||||
/* Require that we be able to map the device page. */
|
/* Get the current contiguous range. */
|
||||||
R_UNLESS(IsHeapVirtualAddress(it->GetAddress()), svc::ResultInvalidCurrentMemory());
|
KPageTableBase::MemoryRange contig_range = {};
|
||||||
|
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + *out_mapped_size, size - *out_mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
|
||||||
|
|
||||||
/* Get the physical address for the page. */
|
/* Ensure we close the range when we're done. */
|
||||||
const KPhysicalAddress phys_addr = GetHeapPhysicalAddress(it->GetAddress());
|
ON_SCOPE_EXIT { contig_range.Close(); };
|
||||||
|
|
||||||
/* Map the device page. */
|
/* Map the device page. */
|
||||||
const u64 block_size = it->GetSize();
|
|
||||||
size_t mapped_size = 0;
|
size_t mapped_size = 0;
|
||||||
R_TRY(this->MapDevicePage(std::addressof(mapped_size), num_pt, max_pt, phys_addr, block_size, cur_addr, device_perm));
|
R_TRY(this->MapDevicePage(std::addressof(mapped_size), num_pt, max_pt, GetHeapPhysicalAddress(contig_range.address), contig_range.size, cur_addr, device_perm));
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
cur_addr += block_size;
|
cur_addr += contig_range.size;
|
||||||
*out_mapped_size += mapped_size;
|
*out_mapped_size += mapped_size;
|
||||||
|
|
||||||
/* If we didn't map as much as we wanted, break. */
|
/* If we didn't map as much as we wanted, break. */
|
||||||
if (mapped_size < block_size) {
|
if (mapped_size < contig_range.size) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Similarly, if we're done, break. */
|
||||||
|
if (*out_mapped_size >= size) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1186,8 +1198,6 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Check if there's nothing mapped at l1. */
|
/* Check if there's nothing mapped at l1. */
|
||||||
if (l1 == nullptr || !l1[l1_index].IsValid()) {
|
if (l1 == nullptr || !l1[l1_index].IsValid()) {
|
||||||
MESOSPHERE_ASSERT(force);
|
|
||||||
|
|
||||||
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
|
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
|
||||||
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
|
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
|
||||||
|
|
||||||
|
@ -1201,30 +1211,12 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
|
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
|
||||||
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
|
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
|
||||||
size_t num_closed = 0;
|
size_t num_closed = 0;
|
||||||
bool invalidated_tlb = false;
|
|
||||||
|
|
||||||
|
/* Invalidate the attributes of all entries. */
|
||||||
for (size_t i = 0; i < map_count; ++i) {
|
for (size_t i = 0; i < map_count; ++i) {
|
||||||
if (l2[l2_index + i].IsValid()) {
|
if (l2[l2_index + i].IsValid()) {
|
||||||
/* Get the physical address. */
|
l2[l2_index + i].InvalidateAttributes();
|
||||||
const KPhysicalAddress phys_addr = l2[l2_index + i].GetPhysicalAddress();
|
|
||||||
MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr));
|
|
||||||
|
|
||||||
/* Invalidate the entry. */
|
|
||||||
l2[l2_index + i].Invalidate();
|
|
||||||
++num_closed;
|
++num_closed;
|
||||||
|
|
||||||
/* Try to add the page to the group. */
|
|
||||||
if (R_FAILED(pg.AddBlock(GetHeapVirtualAddress(phys_addr), DevicePageSize / PageSize))) {
|
|
||||||
/* If we can't add it for deferred close, close it now. */
|
|
||||||
cpu::StoreDataCache(std::addressof(l2[l2_index + i]), sizeof(PageTableEntry));
|
|
||||||
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l2[l2_index + i]))));
|
|
||||||
SmmuSynchronizationBarrier();
|
|
||||||
|
|
||||||
/* Close the page's reference. */
|
|
||||||
mm.Close(GetHeapVirtualAddress(phys_addr), 1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
MESOSPHERE_ASSERT(force);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cpu::StoreDataCache(std::addressof(l2[l2_index]), map_count * sizeof(PageTableEntry));
|
cpu::StoreDataCache(std::addressof(l2[l2_index]), map_count * sizeof(PageTableEntry));
|
||||||
|
@ -1235,6 +1227,38 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
}
|
}
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
|
/* Close the memory manager's references to the pages. */
|
||||||
|
{
|
||||||
|
KPhysicalAddress contig_phys_addr = Null<KPhysicalAddress>;
|
||||||
|
size_t contig_count = 0;
|
||||||
|
for (size_t i = 0; i < map_count; ++i) {
|
||||||
|
/* Get the physical address. */
|
||||||
|
const KPhysicalAddress phys_addr = l2[l2_index + i].GetPhysicalAddress();
|
||||||
|
MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr));
|
||||||
|
|
||||||
|
/* Fully invalidate the entry. */
|
||||||
|
l2[l2_index + i].Invalidate();
|
||||||
|
|
||||||
|
if (contig_count == 0) {
|
||||||
|
/* Ensure that our address/count is valid. */
|
||||||
|
contig_phys_addr = phys_addr;
|
||||||
|
contig_count = contig_phys_addr != Null<KPhysicalAddress> ? 1 : 0;
|
||||||
|
} else if (phys_addr == Null<KPhysicalAddress> || phys_addr != (contig_phys_addr + (contig_count * DevicePageSize))) {
|
||||||
|
/* If we're no longer contiguous, close the range we've been building. */
|
||||||
|
mm.Close(GetHeapVirtualAddress(contig_phys_addr), (contig_count * DevicePageSize) / PageSize);
|
||||||
|
|
||||||
|
contig_phys_addr = phys_addr;
|
||||||
|
contig_count = contig_phys_addr != Null<KPhysicalAddress> ? 1 : 0;
|
||||||
|
} else {
|
||||||
|
++contig_count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (contig_count > 0) {
|
||||||
|
mm.Close(GetHeapVirtualAddress(contig_phys_addr), (contig_count * DevicePageSize) / PageSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Close the pages. */
|
/* Close the pages. */
|
||||||
if (ptm.Close(KVirtualAddress(l2), num_closed)) {
|
if (ptm.Close(KVirtualAddress(l2), num_closed)) {
|
||||||
/* Invalidate the l1 entry. */
|
/* Invalidate the l1 entry. */
|
||||||
|
@ -1243,22 +1267,12 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Synchronize. */
|
/* Synchronize. */
|
||||||
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
|
||||||
InvalidateTlbSection(m_table_asids[l0_index], address);
|
|
||||||
SmmuSynchronizationBarrier();
|
SmmuSynchronizationBarrier();
|
||||||
|
|
||||||
/* We invalidated the tlb. */
|
|
||||||
invalidated_tlb = true;
|
|
||||||
|
|
||||||
/* Free the l2 page. */
|
/* Free the l2 page. */
|
||||||
ptm.Free(KVirtualAddress(l2));
|
ptm.Free(KVirtualAddress(l2));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Invalidate the tlb if we haven't already. */
|
|
||||||
if (!invalidated_tlb) {
|
|
||||||
InvalidateTlbSection(m_table_asids[l0_index], address);
|
|
||||||
SmmuSynchronizationBarrier();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
address += map_count * DevicePageSize;
|
address += map_count * DevicePageSize;
|
||||||
remaining -= map_count * DevicePageSize;
|
remaining -= map_count * DevicePageSize;
|
||||||
|
@ -1287,114 +1301,158 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
remaining -= DeviceLargePageSize;
|
remaining -= DeviceLargePageSize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Close references to the pages in the group. */
|
|
||||||
pg.Close();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KDevicePageTable::MakePageGroup(KPageGroup *out, KDeviceVirtualAddress address, u64 size) const {
|
bool KDevicePageTable::Compare(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) const {
|
||||||
MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0);
|
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
|
||||||
MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
||||||
|
|
||||||
|
/* We need to traverse the ranges that make up our mapping, to make sure they're all good. Start by getting a contiguous range. */
|
||||||
|
KPageTableBase::MemoryRange contig_range = {};
|
||||||
|
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), process_address, size))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure that we close the range when we're done. */
|
||||||
|
bool range_open = true;
|
||||||
|
ON_SCOPE_EXIT { if (range_open) { contig_range.Close(); } };
|
||||||
|
|
||||||
/* Walk the directory. */
|
/* Walk the directory. */
|
||||||
u64 remaining = size;
|
KProcessAddress cur_process_address = process_address;
|
||||||
bool first = true;
|
size_t remaining_size = size;
|
||||||
u32 attr = 0;
|
KPhysicalAddress cur_phys_address = GetHeapPhysicalAddress(contig_range.address);
|
||||||
while (remaining > 0) {
|
size_t remaining_in_range = contig_range.size;
|
||||||
const size_t l0_index = (address / DeviceRegionSize);
|
bool first = true;
|
||||||
const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize;
|
u32 first_attr = 0;
|
||||||
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
|
while (remaining_size > 0) {
|
||||||
|
/* Convert the device address to a series of indices. */
|
||||||
|
const size_t l0_index = (device_address / DeviceRegionSize);
|
||||||
|
const size_t l1_index = (device_address % DeviceRegionSize) / DeviceLargePageSize;
|
||||||
|
const size_t l2_index = (device_address % DeviceLargePageSize) / DevicePageSize;
|
||||||
|
|
||||||
/* Get and validate l1. */
|
/* Get and validate l1. */
|
||||||
const PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
|
const PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
|
||||||
R_UNLESS(l1 != nullptr, svc::ResultInvalidCurrentMemory());
|
if (!(l1 != nullptr && l1[l1_index].IsValid())) {
|
||||||
R_UNLESS(l1[l1_index].IsValid(), svc::ResultInvalidCurrentMemory());
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (l1[l1_index].IsTable()) {
|
if (l1[l1_index].IsTable()) {
|
||||||
/* We're acting on an l2 entry. */
|
/* We're acting on an l2 entry. */
|
||||||
const PageTableEntry *l2 = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress()));
|
const PageTableEntry *l2 = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress()));
|
||||||
|
|
||||||
|
/* Determine the number of pages to check. */
|
||||||
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
|
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
|
||||||
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
|
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining_size / DevicePageSize);
|
||||||
|
|
||||||
|
/* Check each page. */
|
||||||
for (size_t i = 0; i < map_count; ++i) {
|
for (size_t i = 0; i < map_count; ++i) {
|
||||||
/* Ensure the l2 entry is valid. */
|
/* Ensure the l2 entry is valid. */
|
||||||
R_UNLESS(l2[l2_index + i].IsValid(), svc::ResultInvalidCurrentMemory());
|
if (!l2[l2_index + i].IsValid()) {
|
||||||
|
return false;
|
||||||
/* Get the physical address. */
|
|
||||||
const KPhysicalAddress phys_addr = l2[l2_index + i].GetPhysicalAddress();
|
|
||||||
MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr));
|
|
||||||
|
|
||||||
/* Add to the group. */
|
|
||||||
R_TRY(out->AddBlock(GetHeapVirtualAddress(phys_addr), DevicePageSize / PageSize));
|
|
||||||
|
|
||||||
/* If this is our first entry, get the attribute. */
|
|
||||||
if (first) {
|
|
||||||
attr = l2[l2_index + i].GetAttributes();
|
|
||||||
first = false;
|
|
||||||
} else {
|
|
||||||
/* Validate the attributes match the first entry. */
|
|
||||||
R_UNLESS(l2[l2_index + i].GetAttributes() == attr, svc::ResultInvalidCurrentMemory());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check that the attributes match the first attributes we encountered. */
|
||||||
|
const u32 cur_attr = l2[l2_index + i].GetAttributes();
|
||||||
|
if (!first && cur_attr != first_attr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If there's nothing remaining in the range, refresh the range. */
|
||||||
|
if (remaining_in_range == 0) {
|
||||||
|
contig_range.Close();
|
||||||
|
|
||||||
|
range_open = false;
|
||||||
|
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), cur_process_address, remaining_size))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
range_open = true;
|
||||||
|
|
||||||
|
cur_phys_address = GetHeapPhysicalAddress(contig_range.address);
|
||||||
|
remaining_in_range = contig_range.size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that the physical address is expected. */
|
||||||
|
if (l2[l2_index + i].GetPhysicalAddress() != cur_phys_address) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Advance. */
|
||||||
|
cur_phys_address += DevicePageSize;
|
||||||
|
cur_process_address += DevicePageSize;
|
||||||
|
remaining_size -= DevicePageSize;
|
||||||
|
remaining_in_range -= DevicePageSize;
|
||||||
|
|
||||||
|
first = false;
|
||||||
|
first_attr = cur_attr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance the device address. */
|
||||||
address += DevicePageSize * map_count;
|
device_address += map_count * DevicePageSize;
|
||||||
remaining -= DevicePageSize * map_count;
|
|
||||||
} else {
|
} else {
|
||||||
/* We're acting on an l1 entry. */
|
/* We're acting on an l1 entry. */
|
||||||
R_UNLESS(l2_index == 0, svc::ResultInvalidCurrentMemory());
|
if (!(l2_index == 0 && remaining_size >= DeviceLargePageSize)) {
|
||||||
R_UNLESS(remaining >= DeviceLargePageSize, svc::ResultInvalidCurrentMemory());
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Get the physical address. */
|
/* Check that the attributes match the first attributes we encountered. */
|
||||||
const KPhysicalAddress phys_addr = l1[l1_index].GetPhysicalAddress();
|
const u32 cur_attr = l1[l1_index].GetAttributes();
|
||||||
MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr));
|
if (!first && cur_attr != first_attr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/* Add to the group. */
|
/* If there's nothing remaining in the range, refresh the range. */
|
||||||
R_TRY(out->AddBlock(GetHeapVirtualAddress(phys_addr), DeviceLargePageSize / PageSize));
|
if (remaining_in_range == 0) {
|
||||||
|
contig_range.Close();
|
||||||
|
|
||||||
/* If this is our first entry, get the attribute. */
|
range_open = false;
|
||||||
if (first) {
|
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), cur_process_address, remaining_size))) {
|
||||||
attr = l1[l1_index].GetAttributes();
|
return false;
|
||||||
first = false;
|
}
|
||||||
} else {
|
range_open = true;
|
||||||
/* Validate the attributes match the first entry. */
|
|
||||||
R_UNLESS(l1[l1_index].GetAttributes() == attr, svc::ResultInvalidCurrentMemory());
|
cur_phys_address = GetHeapPhysicalAddress(contig_range.address);
|
||||||
|
remaining_in_range = contig_range.size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that the physical address is expected, and there's enough in the range. */
|
||||||
|
if (remaining_in_range < DeviceLargePageSize || l1[l1_index].GetPhysicalAddress() != cur_phys_address) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
address += DeviceLargePageSize;
|
cur_phys_address += DeviceLargePageSize;
|
||||||
remaining -= DeviceLargePageSize;
|
cur_process_address += DeviceLargePageSize;
|
||||||
|
remaining_size -= DeviceLargePageSize;
|
||||||
|
remaining_in_range -= DeviceLargePageSize;
|
||||||
|
|
||||||
|
first = false;
|
||||||
|
first_attr = cur_attr;
|
||||||
|
|
||||||
|
/* Advance the device address. */
|
||||||
|
device_address += DeviceLargePageSize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess();
|
/* The range is valid! */
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KDevicePageTable::Compare(const KPageGroup &compare_pg, KDeviceVirtualAddress device_address) const {
|
Result KDevicePageTable::Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) {
|
||||||
/* Check whether the page group we expect for the virtual address matches the page group we're validating. */
|
|
||||||
KPageGroup calc_pg(std::addressof(Kernel::GetBlockInfoManager()));
|
|
||||||
return (R_SUCCEEDED(this->MakePageGroup(std::addressof(calc_pg), device_address, compare_pg.GetNumPages() * PageSize))) &&
|
|
||||||
calc_pg.IsEquivalentTo(compare_pg);
|
|
||||||
}
|
|
||||||
|
|
||||||
Result KDevicePageTable::Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) {
|
|
||||||
/* Clear the output size. */
|
/* Clear the output size. */
|
||||||
*out_mapped_size = 0;
|
*out_mapped_size = 0;
|
||||||
|
|
||||||
/* Map the pages. */
|
/* Map the pages. */
|
||||||
s32 num_pt = 0;
|
s32 num_pt = 0;
|
||||||
return this->MapImpl(out_mapped_size, num_pt, refresh_mappings ? 1 : std::numeric_limits<s32>::max(), pg, device_address, device_perm);
|
return this->MapImpl(out_mapped_size, num_pt, refresh_mappings ? 1 : std::numeric_limits<s32>::max(), page_table, process_address, size, device_address, device_perm, refresh_mappings);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KDevicePageTable::Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address) {
|
Result KDevicePageTable::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) {
|
||||||
/* Validate address/size. */
|
/* Validate address/size. */
|
||||||
const size_t size = pg.GetNumPages() * PageSize;
|
|
||||||
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
|
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
|
||||||
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
||||||
|
|
||||||
/* Ensure the page group is correct. */
|
/* Ensure the page group is correct. */
|
||||||
R_UNLESS(this->Compare(pg, device_address), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(this->Compare(page_table, process_address, size, device_address), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
/* Unmap the pages. */
|
/* Unmap the pages. */
|
||||||
this->UnmapImpl(device_address, size, false);
|
this->UnmapImpl(device_address, size, false);
|
||||||
|
|
|
@ -71,12 +71,11 @@ namespace ams::kern {
|
||||||
/* Lock the address space. */
|
/* Lock the address space. */
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
/* Lock the pages. */
|
/* Lock the page table to prevent concurrent device mapping operations. */
|
||||||
KPageGroup pg(page_table->GetBlockInfoManager());
|
KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
|
||||||
R_TRY(page_table->LockForDeviceAddressSpace(std::addressof(pg), process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned));
|
|
||||||
|
|
||||||
/* Close the pages we opened when we're done with them. */
|
/* Lock the pages. */
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
R_TRY(page_table->LockForMapDeviceAddressSpace(process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned));
|
||||||
|
|
||||||
/* Ensure that if we fail, we don't keep unmapped pages locked. */
|
/* Ensure that if we fail, we don't keep unmapped pages locked. */
|
||||||
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
|
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
|
||||||
|
@ -87,7 +86,7 @@ namespace ams::kern {
|
||||||
auto mapped_size_guard = SCOPE_GUARD { *out_mapped_size = 0; };
|
auto mapped_size_guard = SCOPE_GUARD { *out_mapped_size = 0; };
|
||||||
|
|
||||||
/* Perform the mapping. */
|
/* Perform the mapping. */
|
||||||
R_TRY(m_table.Map(out_mapped_size, pg, device_address, device_perm, refresh_mappings));
|
R_TRY(m_table.Map(out_mapped_size, page_table, process_address, size, device_address, device_perm, refresh_mappings));
|
||||||
|
|
||||||
/* Ensure that we unmap the pages if we fail to update the protections. */
|
/* Ensure that we unmap the pages if we fail to update the protections. */
|
||||||
/* NOTE: Nintendo does not check the result of this unmap call. */
|
/* NOTE: Nintendo does not check the result of this unmap call. */
|
||||||
|
@ -113,19 +112,18 @@ namespace ams::kern {
|
||||||
/* Lock the address space. */
|
/* Lock the address space. */
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
/* Make and open a page group for the unmapped region. */
|
/* Lock the page table to prevent concurrent device mapping operations. */
|
||||||
KPageGroup pg(page_table->GetBlockInfoManager());
|
KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
|
||||||
R_TRY(page_table->MakePageGroupForUnmapDeviceAddressSpace(std::addressof(pg), process_address, size));
|
|
||||||
|
|
||||||
/* Ensure the page group is closed on scope exit. */
|
/* Lock the pages. */
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size));
|
||||||
|
|
||||||
/* If we fail to unmap, we want to do a partial unlock. */
|
/* If we fail to unmap, we want to do a partial unlock. */
|
||||||
{
|
{
|
||||||
auto unlock_guard = SCOPE_GUARD { page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, size); };
|
auto unlock_guard = SCOPE_GUARD { page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size, size); };
|
||||||
|
|
||||||
/* Unmap. */
|
/* Unmap. */
|
||||||
R_TRY(m_table.Unmap(pg, device_address));
|
R_TRY(m_table.Unmap(page_table, process_address, size, device_address));
|
||||||
|
|
||||||
unlock_guard.Cancel();
|
unlock_guard.Cancel();
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,6 +74,10 @@ namespace ams::kern {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KPageTableBase::MemoryRange::Close() {
|
||||||
|
Kernel::GetMemoryManager().Close(address, size / PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) {
|
Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) {
|
||||||
/* Initialize our members. */
|
/* Initialize our members. */
|
||||||
m_address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32);
|
m_address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32);
|
||||||
|
@ -1391,6 +1395,49 @@ namespace ams::kern {
|
||||||
return cur_block_address == GetHeapVirtualAddress(cur_addr) && cur_block_pages == (cur_size / PageSize);
|
return cur_block_address == GetHeapVirtualAddress(cur_addr) && cur_block_pages == (cur_size / PageSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::GetContiguousMemoryRangeWithState(MemoryRange *out, KProcessAddress address, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
auto &impl = this->GetImpl();
|
||||||
|
|
||||||
|
/* Begin a traversal. */
|
||||||
|
TraversalContext context;
|
||||||
|
TraversalEntry cur_entry = {};
|
||||||
|
R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* The region we're traversing has to be heap. */
|
||||||
|
const KPhysicalAddress phys_address = cur_entry.phys_addr;
|
||||||
|
R_UNLESS(this->IsHeapPhysicalAddress(phys_address), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* Traverse until we have enough size or we aren't contiguous any more. */
|
||||||
|
size_t contig_size;
|
||||||
|
for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) {
|
||||||
|
if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (cur_entry.phys_addr != phys_address + contig_size) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Take the minimum size for our region. */
|
||||||
|
size = std::min(size, contig_size);
|
||||||
|
|
||||||
|
/* Check that the memory is contiguous. */
|
||||||
|
R_TRY(this->CheckMemoryStateContiguous(address, size,
|
||||||
|
state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted,
|
||||||
|
perm_mask, perm,
|
||||||
|
attr_mask, attr));
|
||||||
|
|
||||||
|
/* The memory is contiguous, so set the output range. */
|
||||||
|
*out = {
|
||||||
|
.address = GetLinearMappedVirtualAddress(phys_address),
|
||||||
|
.size = size,
|
||||||
|
};
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
|
Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
|
|
||||||
|
@ -2578,7 +2625,7 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
Result KPageTableBase::LockForMapDeviceAddressSpace(KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||||
/* Lightly validate the range before doing anything else. */
|
/* Lightly validate the range before doing anything else. */
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
||||||
|
@ -2591,11 +2638,6 @@ namespace ams::kern {
|
||||||
size_t num_allocator_blocks;
|
size_t num_allocator_blocks;
|
||||||
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, test_state, perm, perm, KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None, KMemoryAttribute_DeviceShared));
|
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, test_state, perm, perm, KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None, KMemoryAttribute_DeviceShared));
|
||||||
|
|
||||||
/* Make the page group, if we should. */
|
|
||||||
if (out != nullptr) {
|
|
||||||
R_TRY(this->MakePageGroup(*out, address, num_pages));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create an update allocator. */
|
/* Create an update allocator. */
|
||||||
Result allocator_result;
|
Result allocator_result;
|
||||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
||||||
|
@ -2604,10 +2646,33 @@ namespace ams::kern {
|
||||||
/* Update the memory blocks. */
|
/* Update the memory blocks. */
|
||||||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None);
|
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None);
|
||||||
|
|
||||||
/* Open the page group. */
|
return ResultSuccess();
|
||||||
if (out != nullptr) {
|
}
|
||||||
out->Open();
|
|
||||||
}
|
Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||||
|
/* Lightly validate the range before doing anything else. */
|
||||||
|
const size_t num_pages = size / PageSize;
|
||||||
|
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
|
||||||
|
/* Check the memory state. */
|
||||||
|
size_t num_allocator_blocks;
|
||||||
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks),
|
||||||
|
address, size,
|
||||||
|
KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap, KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap,
|
||||||
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
|
|
||||||
|
/* Create an update allocator. */
|
||||||
|
Result allocator_result;
|
||||||
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
||||||
|
R_TRY(allocator_result);
|
||||||
|
|
||||||
|
/* Update the memory blocks. */
|
||||||
|
const KMemoryBlockManager::MemoryBlockLockFunction lock_func = m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
|
||||||
|
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None);
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
@ -2639,40 +2704,6 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) {
|
|
||||||
/* Lightly validate the range before doing anything else. */
|
|
||||||
const size_t num_pages = size / PageSize;
|
|
||||||
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
|
|
||||||
|
|
||||||
/* Lock the table. */
|
|
||||||
KScopedLightLock lk(m_general_lock);
|
|
||||||
|
|
||||||
/* Check the memory state. */
|
|
||||||
size_t num_allocator_blocks;
|
|
||||||
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks),
|
|
||||||
address, size,
|
|
||||||
KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap, KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDeviceMap,
|
|
||||||
KMemoryPermission_None, KMemoryPermission_None,
|
|
||||||
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
|
||||||
|
|
||||||
/* Create an update allocator. */
|
|
||||||
Result allocator_result;
|
|
||||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
|
||||||
R_TRY(allocator_result);
|
|
||||||
|
|
||||||
/* Make the page group. */
|
|
||||||
R_TRY(this->MakePageGroup(*out, address, num_pages));
|
|
||||||
|
|
||||||
/* Update the memory blocks. */
|
|
||||||
const KMemoryBlockManager::MemoryBlockLockFunction lock_func = m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
|
|
||||||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None);
|
|
||||||
|
|
||||||
/* Open a reference to the pages in the page group. */
|
|
||||||
out->Open();
|
|
||||||
|
|
||||||
return ResultSuccess();
|
|
||||||
}
|
|
||||||
|
|
||||||
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
||||||
/* Lightly validate the range before doing anything else. */
|
/* Lightly validate the range before doing anything else. */
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
|
@ -2689,23 +2720,23 @@ namespace ams::kern {
|
||||||
size_t allocator_num_blocks = 0, unmapped_allocator_num_blocks = 0;
|
size_t allocator_num_blocks = 0, unmapped_allocator_num_blocks = 0;
|
||||||
if (unmapped_size) {
|
if (unmapped_size) {
|
||||||
if (m_enable_device_address_space_merge) {
|
if (m_enable_device_address_space_merge) {
|
||||||
R_TRY(this->CheckMemoryState(std::addressof(allocator_num_blocks),
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(allocator_num_blocks),
|
||||||
address, size,
|
address, size,
|
||||||
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
||||||
KMemoryPermission_None, KMemoryPermission_None,
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
}
|
}
|
||||||
R_TRY(this->CheckMemoryState(std::addressof(unmapped_allocator_num_blocks),
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(unmapped_allocator_num_blocks),
|
||||||
mapped_end_address, unmapped_size,
|
mapped_end_address, unmapped_size,
|
||||||
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
||||||
KMemoryPermission_None, KMemoryPermission_None,
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
} else {
|
} else {
|
||||||
R_TRY(this->CheckMemoryState(std::addressof(allocator_num_blocks),
|
R_TRY(this->CheckMemoryStateContiguous(std::addressof(allocator_num_blocks),
|
||||||
address, size,
|
address, size,
|
||||||
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
||||||
KMemoryPermission_None, KMemoryPermission_None,
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create an update allocator for the region. */
|
/* Create an update allocator for the region. */
|
||||||
|
@ -2750,6 +2781,41 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
|
||||||
|
/* Get the range. */
|
||||||
|
const u32 test_state = KMemoryState_FlagReferenceCounted | (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap);
|
||||||
|
R_TRY(this->GetContiguousMemoryRangeWithState(out,
|
||||||
|
address, size,
|
||||||
|
test_state, test_state,
|
||||||
|
perm, perm,
|
||||||
|
KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None));
|
||||||
|
|
||||||
|
/* We got the range, so open it. */
|
||||||
|
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange *out, KProcessAddress address, size_t size) {
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
|
||||||
|
/* Get the range. */
|
||||||
|
R_TRY(this->GetContiguousMemoryRangeWithState(out,
|
||||||
|
address, size,
|
||||||
|
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
|
||||||
|
KMemoryPermission_None, KMemoryPermission_None,
|
||||||
|
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
|
||||||
|
|
||||||
|
/* We got the range, so open it. */
|
||||||
|
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
||||||
return this->LockMemoryAndOpen(nullptr, out, address, size,
|
return this->LockMemoryAndOpen(nullptr, out, address, size,
|
||||||
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
|
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
|
||||||
|
@ -2804,6 +2870,23 @@ namespace ams::kern {
|
||||||
KMemoryAttribute_Locked, std::addressof(pg));
|
KMemoryAttribute_Locked, std::addressof(pg));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange *out, KProcessAddress address, size_t size) {
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
|
||||||
|
/* Get the range. */
|
||||||
|
R_TRY(this->GetContiguousMemoryRangeWithState(out,
|
||||||
|
address, size,
|
||||||
|
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
||||||
|
KMemoryPermission_UserRead, KMemoryPermission_UserRead,
|
||||||
|
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
||||||
|
|
||||||
|
/* We got the range, so open it. */
|
||||||
|
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||||
/* Lightly validate the range before doing anything else. */
|
/* Lightly validate the range before doing anything else. */
|
||||||
R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
|
||||||
|
@ -4553,4 +4636,108 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase &src_page_table, KProcessAddress src_address) {
|
||||||
|
/* We need to lock both this table, and the current process's table, so set up an alias. */
|
||||||
|
KPageTableBase &dst_page_table = *this;
|
||||||
|
|
||||||
|
/* Acquire the table locks. */
|
||||||
|
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
|
||||||
|
|
||||||
|
/* Check that the memory is mapped in the destination process. */
|
||||||
|
size_t num_allocator_blocks;
|
||||||
|
R_TRY(dst_page_table.CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_SharedCode, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
|
||||||
|
|
||||||
|
/* Check that the memory is mapped in the source process. */
|
||||||
|
R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState_FlagCanMapProcess, KMemoryState_FlagCanMapProcess, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
||||||
|
|
||||||
|
/* Validate that the memory ranges are compatible. */
|
||||||
|
{
|
||||||
|
/* Define a helper type. */
|
||||||
|
struct ContiguousRangeInfo {
|
||||||
|
public:
|
||||||
|
KPageTableBase &m_pt;
|
||||||
|
TraversalContext m_context;
|
||||||
|
TraversalEntry m_entry;
|
||||||
|
KPhysicalAddress m_phys_addr;
|
||||||
|
size_t m_cur_size;
|
||||||
|
size_t m_remaining_size;
|
||||||
|
public:
|
||||||
|
ContiguousRangeInfo(KPageTableBase &pt, KProcessAddress address, size_t size) : m_pt(pt), m_remaining_size(size) {
|
||||||
|
/* Begin a traversal. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry), std::addressof(m_context), address));
|
||||||
|
|
||||||
|
/* Setup tracking fields. */
|
||||||
|
m_phys_addr = m_entry.phys_addr;
|
||||||
|
m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1)));
|
||||||
|
|
||||||
|
/* Consume the whole contiguous block. */
|
||||||
|
this->DetermineContiguousBlockExtents();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ContinueTraversal() {
|
||||||
|
/* Update our remaining size. */
|
||||||
|
m_remaining_size = m_remaining_size - m_cur_size;
|
||||||
|
|
||||||
|
/* Update our tracking fields. */
|
||||||
|
if (m_remaining_size > 0) {
|
||||||
|
m_phys_addr = m_entry.phys_addr;
|
||||||
|
m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size);
|
||||||
|
|
||||||
|
/* Consume the whole contiguous block. */
|
||||||
|
this->DetermineContiguousBlockExtents();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
void DetermineContiguousBlockExtents() {
|
||||||
|
/* Continue traversing until we're not contiguous, or we have enough. */
|
||||||
|
while (m_cur_size < m_remaining_size) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry), std::addressof(m_context)));
|
||||||
|
|
||||||
|
/* If we're not contiguous, we're done. */
|
||||||
|
if (m_entry.phys_addr != m_phys_addr + m_cur_size) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update our current size. */
|
||||||
|
m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Create ranges for both tables. */
|
||||||
|
ContiguousRangeInfo src_range(src_page_table, src_address, size);
|
||||||
|
ContiguousRangeInfo dst_range(dst_page_table, dst_address, size);
|
||||||
|
|
||||||
|
/* Validate the ranges. */
|
||||||
|
while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) {
|
||||||
|
R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, svc::ResultInvalidMemoryRegion());
|
||||||
|
R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, svc::ResultInvalidMemoryRegion());
|
||||||
|
|
||||||
|
src_range.ContinueTraversal();
|
||||||
|
dst_range.ContinueTraversal();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We no longer need to hold our lock on the source page table. */
|
||||||
|
lk.TryUnlockHalf(src_page_table.m_general_lock);
|
||||||
|
|
||||||
|
/* Create an update allocator. */
|
||||||
|
Result allocator_result;
|
||||||
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
|
||||||
|
R_TRY(allocator_result);
|
||||||
|
|
||||||
|
/* We're going to perform an update, so create a helper. */
|
||||||
|
KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
|
/* Unmap the memory. */
|
||||||
|
const size_t num_pages = size / PageSize;
|
||||||
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
|
||||||
|
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
||||||
|
|
||||||
|
/* Apply the memory block update. */
|
||||||
|
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,32 +30,24 @@ namespace ams::kern::svc {
|
||||||
/* Determine aligned extents. */
|
/* Determine aligned extents. */
|
||||||
const uintptr_t aligned_start = util::AlignDown(address, PageSize);
|
const uintptr_t aligned_start = util::AlignDown(address, PageSize);
|
||||||
const uintptr_t aligned_end = util::AlignUp(address + size, PageSize);
|
const uintptr_t aligned_end = util::AlignUp(address + size, PageSize);
|
||||||
const size_t num_pages = (aligned_end - aligned_start) / PageSize;
|
|
||||||
|
|
||||||
/* Create a page group for the process's memory. */
|
/* Iterate over and operate on contiguous ranges. */
|
||||||
KPageGroup pg(page_table.GetBlockInfoManager());
|
|
||||||
|
|
||||||
/* Make and open the page group. */
|
|
||||||
R_TRY(page_table.MakeAndOpenPageGroup(std::addressof(pg),
|
|
||||||
aligned_start, num_pages,
|
|
||||||
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
|
|
||||||
KMemoryPermission_UserRead, KMemoryPermission_UserRead,
|
|
||||||
KMemoryAttribute_Uncached, KMemoryAttribute_None));
|
|
||||||
|
|
||||||
/* Ensure we don't leak references to the pages we're operating on. */
|
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
|
||||||
|
|
||||||
/* Operate on all the blocks. */
|
|
||||||
uintptr_t cur_address = aligned_start;
|
uintptr_t cur_address = aligned_start;
|
||||||
size_t remaining = size;
|
size_t remaining = size;
|
||||||
for (const auto &block : pg) {
|
while (remaining > 0) {
|
||||||
/* Get the block extents. */
|
/* Get a contiguous range to operate on. */
|
||||||
KVirtualAddress operate_address = block.GetAddress();
|
KPageTableBase::MemoryRange contig_range = {};
|
||||||
size_t operate_size = block.GetSize();
|
R_TRY(page_table.OpenMemoryRangeForProcessCacheOperation(std::addressof(contig_range), cur_address, aligned_end - cur_address));
|
||||||
|
|
||||||
|
/* Close the range when we're done operating on it. */
|
||||||
|
ON_SCOPE_EXIT { contig_range.Close(); };
|
||||||
|
|
||||||
/* Adjust to remain within range. */
|
/* Adjust to remain within range. */
|
||||||
|
KVirtualAddress operate_address = contig_range.address;
|
||||||
|
size_t operate_size = contig_range.size;
|
||||||
if (cur_address < address) {
|
if (cur_address < address) {
|
||||||
operate_address += (address - cur_address);
|
operate_address += (address - cur_address);
|
||||||
|
operate_size -= (address - cur_address);
|
||||||
}
|
}
|
||||||
if (operate_size > remaining) {
|
if (operate_size > remaining) {
|
||||||
operate_size = remaining;
|
operate_size = remaining;
|
||||||
|
@ -65,7 +57,7 @@ namespace ams::kern::svc {
|
||||||
operation.Operate(GetVoidPointer(operate_address), operate_size);
|
operation.Operate(GetVoidPointer(operate_address), operate_size);
|
||||||
|
|
||||||
/* Advance. */
|
/* Advance. */
|
||||||
cur_address += block.GetSize();
|
cur_address += contig_range.size;
|
||||||
remaining -= operate_size;
|
remaining -= operate_size;
|
||||||
}
|
}
|
||||||
MESOSPHERE_ASSERT(remaining == 0);
|
MESOSPHERE_ASSERT(remaining == 0);
|
||||||
|
|
|
@ -122,21 +122,8 @@ namespace ams::kern::svc {
|
||||||
R_UNLESS(src_pt.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(src_pt.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
|
||||||
R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState_SharedCode), svc::ResultInvalidMemoryRegion());
|
R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState_SharedCode), svc::ResultInvalidMemoryRegion());
|
||||||
|
|
||||||
/* Create a new page group. */
|
/* Unmap the memory. */
|
||||||
KPageGroup pg(dst_pt.GetBlockInfoManager());
|
R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
|
||||||
|
|
||||||
/* Make the page group. */
|
|
||||||
R_TRY(src_pt.MakeAndOpenPageGroup(std::addressof(pg),
|
|
||||||
src_address, size / PageSize,
|
|
||||||
KMemoryState_FlagCanMapProcess, KMemoryState_FlagCanMapProcess,
|
|
||||||
KMemoryPermission_None, KMemoryPermission_None,
|
|
||||||
KMemoryAttribute_All, KMemoryAttribute_None));
|
|
||||||
|
|
||||||
/* Close the page group when we're done. */
|
|
||||||
ON_SCOPE_EXIT { pg.Close(); };
|
|
||||||
|
|
||||||
/* Unmap the group. */
|
|
||||||
R_TRY(dst_pt.UnmapPageGroup(dst_address, pg, KMemoryState_SharedCode));
|
|
||||||
|
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue