mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-18 16:32:05 +00:00
kern: implement page group unmapping
This commit is contained in:
parent
25b0baae59
commit
154422562a
11 changed files with 654 additions and 12 deletions
|
@ -192,6 +192,12 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
DataSynchronizationBarrier();
|
DataSynchronizationBarrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void InvalidateTlbByVaDataOnly(KProcessAddress virt_addr) {
|
||||||
|
const u64 value = ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
|
||||||
|
__asm__ __volatile__("tlbi vaae1is, %[value]" :: [value]"r"(value) : "memory");
|
||||||
|
DataSynchronizationBarrier();
|
||||||
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() {
|
ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() {
|
||||||
register uintptr_t x18 asm("x18");
|
register uintptr_t x18 asm("x18");
|
||||||
__asm__ __volatile__("" : [x18]"=r"(x18));
|
__asm__ __volatile__("" : [x18]"=r"(x18));
|
||||||
|
|
|
@ -25,6 +25,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
class KPageTable : public KPageTableBase {
|
class KPageTable : public KPageTableBase {
|
||||||
NON_COPYABLE(KPageTable);
|
NON_COPYABLE(KPageTable);
|
||||||
NON_MOVEABLE(KPageTable);
|
NON_MOVEABLE(KPageTable);
|
||||||
|
public:
|
||||||
|
using TraversalEntry = KPageTableImpl::TraversalEntry;
|
||||||
|
using TraversalContext = KPageTableImpl::TraversalContext;
|
||||||
private:
|
private:
|
||||||
KPageTableManager *manager;
|
KPageTableManager *manager;
|
||||||
u64 ttbr;
|
u64 ttbr;
|
||||||
|
@ -93,8 +96,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup *page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||||
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
||||||
|
|
||||||
KPageTableManager &GetPageTableManager() { return *this->manager; }
|
KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
||||||
const KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
|
||||||
private:
|
private:
|
||||||
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
|
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
|
||||||
/* Set basic attributes. */
|
/* Set basic attributes. */
|
||||||
|
@ -197,6 +199,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);
|
||||||
|
|
||||||
|
ALWAYS_INLINE Result SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
Result SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
|
||||||
|
|
||||||
static void PteDataSynchronizationBarrier() {
|
static void PteDataSynchronizationBarrier() {
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
}
|
}
|
||||||
|
@ -213,6 +218,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
cpu::InvalidateEntireTlbDataOnly();
|
cpu::InvalidateEntireTlbDataOnly();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const {
|
||||||
|
cpu::InvalidateTlbByVaDataOnly(virt_addr);
|
||||||
|
}
|
||||||
|
|
||||||
void NoteUpdated() const {
|
void NoteUpdated() const {
|
||||||
cpu::DataSynchronizationBarrier();
|
cpu::DataSynchronizationBarrier();
|
||||||
|
|
||||||
|
@ -223,7 +232,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) {
|
void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
|
||||||
|
MESOSPHERE_ASSERT(this->IsKernel());
|
||||||
|
|
||||||
|
cpu::DataSynchronizationBarrier();
|
||||||
|
this->OnKernelTableSinglePageUpdated(virt_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) const {
|
||||||
KVirtualAddress table = this->GetPageTableManager().Allocate();
|
KVirtualAddress table = this->GetPageTableManager().Allocate();
|
||||||
|
|
||||||
if (table == Null<KVirtualAddress>) {
|
if (table == Null<KVirtualAddress>) {
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
namespace ams::kern::arch::arm64 {
|
namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
constexpr size_t L1BlockSize = 1_GB;
|
constexpr size_t L1BlockSize = 1_GB;
|
||||||
|
constexpr size_t L1ContiguousBlockSize = 0x10 * L1BlockSize;
|
||||||
constexpr size_t L2BlockSize = 2_MB;
|
constexpr size_t L2BlockSize = 2_MB;
|
||||||
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize;
|
||||||
constexpr size_t L3BlockSize = PageSize;
|
constexpr size_t L3BlockSize = PageSize;
|
||||||
|
|
|
@ -22,12 +22,20 @@
|
||||||
|
|
||||||
namespace ams::kern::arch::arm64 {
|
namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* TODO: This seems worse than KInitialPageTable. Can we fulfill Nintendo's API using KInitialPageTable? */
|
|
||||||
/* KInitialPageTable is significantly nicer, but doesn't have KPageTableImpl's traversal semantics. */
|
|
||||||
/* Perhaps we could implement those on top of it? */
|
|
||||||
class KPageTableImpl {
|
class KPageTableImpl {
|
||||||
NON_COPYABLE(KPageTableImpl);
|
NON_COPYABLE(KPageTableImpl);
|
||||||
NON_MOVEABLE(KPageTableImpl);
|
NON_MOVEABLE(KPageTableImpl);
|
||||||
|
public:
|
||||||
|
struct TraversalEntry {
|
||||||
|
KPhysicalAddress phys_addr;
|
||||||
|
size_t block_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct TraversalContext {
|
||||||
|
const L1PageTableEntry *l1_entry;
|
||||||
|
const L2PageTableEntry *l2_entry;
|
||||||
|
const L3PageTableEntry *l3_entry;
|
||||||
|
};
|
||||||
private:
|
private:
|
||||||
static constexpr size_t PageBits = __builtin_ctzll(PageSize);
|
static constexpr size_t PageBits = __builtin_ctzll(PageSize);
|
||||||
static constexpr size_t NumLevels = 3;
|
static constexpr size_t NumLevels = 3;
|
||||||
|
@ -55,6 +63,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1) + 4>(GetInteger(addr)); }
|
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1) + 4>(GetInteger(addr)); }
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); }
|
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); }
|
||||||
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); }
|
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); }
|
||||||
|
|
||||||
|
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
|
||||||
|
return KMemoryLayout::GetLinearVirtualAddress(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE bool ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const;
|
||||||
|
ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
|
||||||
|
ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
|
||||||
private:
|
private:
|
||||||
L1PageTableEntry *table;
|
L1PageTableEntry *table;
|
||||||
bool is_kernel;
|
bool is_kernel;
|
||||||
|
@ -89,6 +105,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||||
L1PageTableEntry *Finalize();
|
L1PageTableEntry *Finalize();
|
||||||
|
|
||||||
|
bool BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const;
|
||||||
|
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
||||||
|
|
||||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -169,6 +169,8 @@ namespace ams::kern {
|
||||||
KPageTableImpl &GetImpl() { return this->impl; }
|
KPageTableImpl &GetImpl() { return this->impl; }
|
||||||
const KPageTableImpl &GetImpl() const { return this->impl; }
|
const KPageTableImpl &GetImpl() const { return this->impl; }
|
||||||
|
|
||||||
|
KBlockInfoManager *GetBlockInfoManager() const { return this->block_info_manager; }
|
||||||
|
|
||||||
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); }
|
||||||
|
|
||||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||||
|
@ -212,6 +214,8 @@ namespace ams::kern {
|
||||||
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties);
|
||||||
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
|
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
|
||||||
|
|
||||||
|
bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) const;
|
||||||
|
|
||||||
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
|
||||||
public:
|
public:
|
||||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const {
|
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const {
|
||||||
|
|
|
@ -176,7 +176,165 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) {
|
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) {
|
||||||
MESOSPHERE_TODO_IMPLEMENT();
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
auto &impl = this->GetImpl();
|
||||||
|
|
||||||
|
/* If we're not forcing an unmap, separate pages immediately. */
|
||||||
|
if (!force) {
|
||||||
|
const size_t size = num_pages * PageSize;
|
||||||
|
R_TRY(this->SeparatePages(virt_addr, std::min(GetInteger(virt_addr) & -GetInteger(virt_addr), size), page_list, reuse_ll));
|
||||||
|
if (num_pages > 1) {
|
||||||
|
const auto end_page = virt_addr + size;
|
||||||
|
const auto last_page = end_page - PageSize;
|
||||||
|
|
||||||
|
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
||||||
|
R_TRY(this->SeparatePages(last_page, std::min(GetInteger(end_page) & -GetInteger(end_page), size), page_list, reuse_ll));
|
||||||
|
merge_guard.Cancel();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Cache initial addresses for use on cleanup. */
|
||||||
|
const KProcessAddress orig_virt_addr = virt_addr;
|
||||||
|
size_t remaining_pages = num_pages;
|
||||||
|
|
||||||
|
/* Ensure that any pages we track close on exit. */
|
||||||
|
KPageGroup pages_to_close(this->GetBlockInfoManager());
|
||||||
|
KScopedPageGroup spg(pages_to_close);
|
||||||
|
|
||||||
|
/* Begin traversal. */
|
||||||
|
TraversalContext context;
|
||||||
|
TraversalEntry next_entry;
|
||||||
|
bool next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
|
||||||
|
|
||||||
|
while (remaining_pages > 0) {
|
||||||
|
/* Handle the case where we're not valid. */
|
||||||
|
if (!next_valid) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(force);
|
||||||
|
const size_t cur_size = std::min(next_entry.block_size - (GetInteger(virt_addr) & (next_entry.block_size - 1)), remaining_pages * PageSize);
|
||||||
|
remaining_pages -= cur_size / PageSize;
|
||||||
|
virt_addr += cur_size;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Handle the case where the block is bigger than it should be. */
|
||||||
|
if (next_entry.block_size > remaining_pages * PageSize) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(force);
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(this->SeparatePages(virt_addr, remaining_pages * PageSize, page_list, reuse_ll));
|
||||||
|
next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
|
||||||
|
MESOSPHERE_ASSERT(next_valid);
|
||||||
|
}
|
||||||
|
//MESOSPHERE_LOG("Unmap: Acting on %08zx %08zx (%p %p %p)\n", GetInteger(next_entry.phys_addr), next_entry.block_size, context.l1_entry, context.l2_entry, context.l3_entry);
|
||||||
|
|
||||||
|
/* Check that our state is coherent. */
|
||||||
|
MESOSPHERE_ASSERT((next_entry.block_size / PageSize) <= remaining_pages);
|
||||||
|
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size));
|
||||||
|
|
||||||
|
/* Unmap the block. */
|
||||||
|
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
|
||||||
|
switch (next_entry.block_size) {
|
||||||
|
case L1BlockSize:
|
||||||
|
{
|
||||||
|
/* Clear the entry. */
|
||||||
|
*l1_entry = InvalidL1PageTableEntry;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case L2ContiguousBlockSize:
|
||||||
|
case L2BlockSize:
|
||||||
|
{
|
||||||
|
/* Get the number of L2 blocks. */
|
||||||
|
const size_t num_l2_blocks = next_entry.block_size / L2BlockSize;
|
||||||
|
|
||||||
|
/* Get the L2 entry. */
|
||||||
|
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
|
||||||
|
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||||
|
|
||||||
|
/* Clear the entry. */
|
||||||
|
for (size_t i = 0; i < num_l2_blocks; i++) {
|
||||||
|
*impl.GetL2EntryFromTable(l2_virt, virt_addr + L2BlockSize * i) = InvalidL2PageTableEntry;
|
||||||
|
}
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
|
||||||
|
/* Close references to the L2 table. */
|
||||||
|
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
||||||
|
if (this->GetPageTableManager().Close(l2_virt, num_l2_blocks)) {
|
||||||
|
*l1_entry = InvalidL1PageTableEntry;
|
||||||
|
this->NoteUpdated();
|
||||||
|
this->FreePageTable(page_list, l2_virt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case L3ContiguousBlockSize:
|
||||||
|
case L3BlockSize:
|
||||||
|
{
|
||||||
|
/* Get the number of L3 blocks. */
|
||||||
|
const size_t num_l3_blocks = next_entry.block_size / L3BlockSize;
|
||||||
|
|
||||||
|
/* Get the L2 entry. */
|
||||||
|
KPhysicalAddress l2_phys = Null<KPhysicalAddress>;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(l1_entry->GetTable(l2_phys));
|
||||||
|
const KVirtualAddress l2_virt = GetPageTableVirtualAddress(l2_phys);
|
||||||
|
L2PageTableEntry *l2_entry = impl.GetL2EntryFromTable(l2_virt, virt_addr);
|
||||||
|
|
||||||
|
/* Get the L3 entry. */
|
||||||
|
KPhysicalAddress l3_phys = Null<KPhysicalAddress>;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(l2_entry->GetTable(l3_phys));
|
||||||
|
const KVirtualAddress l3_virt = GetPageTableVirtualAddress(l3_phys);
|
||||||
|
|
||||||
|
/* Clear the entry. */
|
||||||
|
for (size_t i = 0; i < num_l3_blocks; i++) {
|
||||||
|
*impl.GetL3EntryFromTable(l3_virt, virt_addr + L3BlockSize * i) = InvalidL3PageTableEntry;
|
||||||
|
}
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
|
||||||
|
/* Close references to the L3 table. */
|
||||||
|
if (this->GetPageTableManager().IsInPageTableHeap(l3_virt)) {
|
||||||
|
if (this->GetPageTableManager().Close(l3_virt, num_l3_blocks)) {
|
||||||
|
*l2_entry = InvalidL2PageTableEntry;
|
||||||
|
this->NoteUpdated();
|
||||||
|
|
||||||
|
/* Close reference to the L2 table. */
|
||||||
|
if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) {
|
||||||
|
if (this->GetPageTableManager().Close(l2_virt, 1)) {
|
||||||
|
*l1_entry = InvalidL1PageTableEntry;
|
||||||
|
this->NoteUpdated();
|
||||||
|
this->FreePageTable(page_list, l2_virt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this->FreePageTable(page_list, l3_virt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Close the blocks. */
|
||||||
|
if (!force && IsHeapPhysicalAddress(next_entry.phys_addr)) {
|
||||||
|
const KVirtualAddress block_virt_addr = GetHeapVirtualAddress(next_entry.phys_addr);
|
||||||
|
const size_t block_num_pages = next_entry.block_size / PageSize;
|
||||||
|
if (R_FAILED(pages_to_close.AddBlock(block_virt_addr, block_num_pages))) {
|
||||||
|
this->NoteUpdated();
|
||||||
|
Kernel::GetMemoryManager().Close(block_virt_addr, block_num_pages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Advance. */
|
||||||
|
virt_addr += next_entry.block_size;
|
||||||
|
remaining_pages -= next_entry.block_size / PageSize;
|
||||||
|
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure we remain coherent. */
|
||||||
|
if (this->IsKernel() && num_pages == 1) {
|
||||||
|
this->NoteSingleKernelPageUpdated(orig_virt_addr);
|
||||||
|
} else {
|
||||||
|
this->NoteUpdated();
|
||||||
|
}
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) {
|
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
@ -381,6 +539,120 @@ namespace ams::kern::arch::arm64 {
|
||||||
return merged;
|
return merged;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KPageTable::SeparatePagesImpl(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
auto &impl = this->GetImpl();
|
||||||
|
|
||||||
|
/* First, try to separate an L1 block into contiguous L2 blocks. */
|
||||||
|
L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr);
|
||||||
|
if (l1_entry->IsBlock()) {
|
||||||
|
/* If our block size is too big, don't bother. */
|
||||||
|
R_UNLESS(block_size < L1BlockSize, ResultSuccess());
|
||||||
|
|
||||||
|
/* Get the addresses we're working with. */
|
||||||
|
const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L1BlockSize);
|
||||||
|
const KPhysicalAddress block_phys_addr = l1_entry->GetBlock();
|
||||||
|
|
||||||
|
/* Allocate a new page for the L2 table. */
|
||||||
|
const KVirtualAddress l2_table = this->AllocatePageTable(page_list, reuse_ll);
|
||||||
|
R_UNLESS(l2_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
||||||
|
const KPhysicalAddress l2_phys = GetPageTablePhysicalAddress(l2_table);
|
||||||
|
|
||||||
|
/* Set the entries in the L2 table. */
|
||||||
|
const u64 entry_template = l1_entry->GetEntryTemplate();
|
||||||
|
for (size_t i = 0; i < L1BlockSize / L2BlockSize; i++) {
|
||||||
|
*(impl.GetL2EntryFromTable(l2_table, block_virt_addr + L2BlockSize * i)) = L2PageTableEntry(block_phys_addr + L2BlockSize * i, entry_template, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Open references to the L2 table. */
|
||||||
|
Kernel::GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize);
|
||||||
|
|
||||||
|
/* Replace the L1 entry with one to the new table. */
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
*l1_entry = L1PageTableEntry(l2_phys, this->IsKernel(), true);
|
||||||
|
this->NoteUpdated();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we don't have an l1 table, we're done. */
|
||||||
|
R_UNLESS(l1_entry->IsTable(), ResultSuccess());
|
||||||
|
|
||||||
|
/* We want to separate L2 contiguous blocks into L2 blocks, so check that our size permits that. */
|
||||||
|
R_UNLESS(block_size < L2ContiguousBlockSize, ResultSuccess());
|
||||||
|
|
||||||
|
L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, virt_addr);
|
||||||
|
if (l2_entry->IsBlock()) {
|
||||||
|
/* If we're contiguous, try to separate. */
|
||||||
|
if (l2_entry->IsContiguous()) {
|
||||||
|
const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2ContiguousBlockSize);
|
||||||
|
|
||||||
|
/* Mark the entries as non-contiguous. */
|
||||||
|
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
|
||||||
|
impl.GetL2Entry(l1_entry, block_virt_addr + L2BlockSize * i)->SetContiguous(false);
|
||||||
|
}
|
||||||
|
this->NoteUpdated();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We want to separate L2 blocks into L3 contiguous blocks, so check that our size permits that. */
|
||||||
|
R_UNLESS(block_size < L2BlockSize, ResultSuccess());
|
||||||
|
|
||||||
|
/* Get the addresses we're working with. */
|
||||||
|
const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L2BlockSize);
|
||||||
|
const KPhysicalAddress block_phys_addr = l2_entry->GetBlock();
|
||||||
|
|
||||||
|
/* Allocate a new page for the L3 table. */
|
||||||
|
const KVirtualAddress l3_table = this->AllocatePageTable(page_list, reuse_ll);
|
||||||
|
R_UNLESS(l3_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
|
||||||
|
const KPhysicalAddress l3_phys = GetPageTablePhysicalAddress(l3_table);
|
||||||
|
|
||||||
|
/* Set the entries in the L3 table. */
|
||||||
|
const u64 entry_template = l2_entry->GetEntryTemplate();
|
||||||
|
for (size_t i = 0; i < L2BlockSize / L3BlockSize; i++) {
|
||||||
|
*(impl.GetL3EntryFromTable(l3_table, block_virt_addr + L3BlockSize * i)) = L3PageTableEntry(block_phys_addr + L3BlockSize * i, entry_template, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Open references to the L3 table. */
|
||||||
|
Kernel::GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize);
|
||||||
|
|
||||||
|
/* Replace the L2 entry with one to the new table. */
|
||||||
|
PteDataSynchronizationBarrier();
|
||||||
|
*l2_entry = L2PageTableEntry(l3_phys, this->IsKernel(), true);
|
||||||
|
this->NoteUpdated();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we don't have an L3 table, we're done. */
|
||||||
|
R_UNLESS(l2_entry->IsTable(), ResultSuccess());
|
||||||
|
|
||||||
|
/* We want to separate L3 contiguous blocks into L2 blocks, so check that our size permits that. */
|
||||||
|
R_UNLESS(block_size < L3ContiguousBlockSize, ResultSuccess());
|
||||||
|
|
||||||
|
/* If we're contiguous, try to separate. */
|
||||||
|
L3PageTableEntry *l3_entry = impl.GetL3Entry(l2_entry, virt_addr);
|
||||||
|
if (l3_entry->IsBlock() && l3_entry->IsContiguous()) {
|
||||||
|
const KProcessAddress block_virt_addr = util::AlignDown(GetInteger(virt_addr), L3ContiguousBlockSize);
|
||||||
|
|
||||||
|
/* Mark the entries as non-contiguous. */
|
||||||
|
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
|
||||||
|
impl.GetL3Entry(l2_entry, block_virt_addr + L3BlockSize * i)->SetContiguous(false);
|
||||||
|
}
|
||||||
|
this->NoteUpdated();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We're done! */
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
/* Try to separate pages, re-merging if we fail. */
|
||||||
|
auto guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
|
||||||
|
R_TRY(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll));
|
||||||
|
guard.Cancel();
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
|
}
|
||||||
|
|
||||||
void KPageTable::FinalizeUpdate(PageLinkedList *page_list) {
|
void KPageTable::FinalizeUpdate(PageLinkedList *page_list) {
|
||||||
while (page_list->Peek()) {
|
while (page_list->Peek()) {
|
||||||
KVirtualAddress page = KVirtualAddress(page_list->Pop());
|
KVirtualAddress page = KVirtualAddress(page_list->Pop());
|
||||||
|
|
|
@ -27,6 +27,221 @@ namespace ams::kern::arch::arm64 {
|
||||||
return this->table;
|
return this->table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const {
|
||||||
|
/* Set the L3 entry. */
|
||||||
|
out_context->l3_entry = l3_entry;
|
||||||
|
|
||||||
|
if (l3_entry->IsBlock()) {
|
||||||
|
/* Set the output entry. */
|
||||||
|
out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1));
|
||||||
|
if (l3_entry->IsContiguous()) {
|
||||||
|
out_entry->block_size = L3ContiguousBlockSize;
|
||||||
|
} else {
|
||||||
|
out_entry->block_size = L3BlockSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
|
out_entry->block_size = L3BlockSize;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const {
|
||||||
|
/* Set the L2 entry. */
|
||||||
|
out_context->l2_entry = l2_entry;
|
||||||
|
|
||||||
|
if (l2_entry->IsBlock()) {
|
||||||
|
/* Set the output entry. */
|
||||||
|
out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1));
|
||||||
|
if (l2_entry->IsContiguous()) {
|
||||||
|
out_entry->block_size = L2ContiguousBlockSize;
|
||||||
|
} else {
|
||||||
|
out_entry->block_size = L2BlockSize;
|
||||||
|
}
|
||||||
|
/* Set the output context. */
|
||||||
|
out_context->l3_entry = nullptr;
|
||||||
|
return true;
|
||||||
|
} else if (l2_entry->IsTable()) {
|
||||||
|
return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr);
|
||||||
|
} else {
|
||||||
|
out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
|
out_entry->block_size = L2BlockSize;
|
||||||
|
out_context->l3_entry = nullptr;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const {
|
||||||
|
/* Set the L1 entry. */
|
||||||
|
out_context->l1_entry = l1_entry;
|
||||||
|
|
||||||
|
if (l1_entry->IsBlock()) {
|
||||||
|
/* Set the output entry. */
|
||||||
|
out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1));
|
||||||
|
if (l1_entry->IsContiguous()) {
|
||||||
|
out_entry->block_size = L1ContiguousBlockSize;
|
||||||
|
} else {
|
||||||
|
out_entry->block_size = L1BlockSize;
|
||||||
|
}
|
||||||
|
/* Set the output context. */
|
||||||
|
out_context->l2_entry = nullptr;
|
||||||
|
out_context->l3_entry = nullptr;
|
||||||
|
return true;
|
||||||
|
} else if (l1_entry->IsTable()) {
|
||||||
|
return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr);
|
||||||
|
} else {
|
||||||
|
out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
|
out_entry->block_size = L1BlockSize;
|
||||||
|
out_context->l2_entry = nullptr;
|
||||||
|
out_context->l3_entry = nullptr;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const {
|
||||||
|
/* Setup invalid defaults. */
|
||||||
|
out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
|
out_entry->block_size = L1BlockSize;
|
||||||
|
out_context->l1_entry = this->table + this->num_entries;
|
||||||
|
out_context->l2_entry = nullptr;
|
||||||
|
out_context->l3_entry = nullptr;
|
||||||
|
|
||||||
|
/* Validate that we can read the actual entry. */
|
||||||
|
const size_t l0_index = GetL0Index(address);
|
||||||
|
const size_t l1_index = GetL1Index(address);
|
||||||
|
if (this->is_kernel) {
|
||||||
|
/* Kernel entries must be accessed via TTBR1. */
|
||||||
|
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* User entries must be accessed with TTBR0. */
|
||||||
|
if ((l0_index != 0) || l1_index >= this->num_entries) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Extract the entry. */
|
||||||
|
const bool valid = this->ExtractL1Entry(out_entry, out_context, this->GetL1Entry(address), address);
|
||||||
|
|
||||||
|
/* Update the context for next traversal. */
|
||||||
|
switch (out_entry->block_size) {
|
||||||
|
case L1ContiguousBlockSize:
|
||||||
|
out_context->l1_entry += (L1ContiguousBlockSize / L1BlockSize) - GetContiguousL1Offset(address) / L1BlockSize;
|
||||||
|
break;
|
||||||
|
case L1BlockSize:
|
||||||
|
out_context->l1_entry += 1;
|
||||||
|
break;
|
||||||
|
case L2ContiguousBlockSize:
|
||||||
|
out_context->l1_entry += 1;
|
||||||
|
out_context->l2_entry += (L2ContiguousBlockSize / L2BlockSize) - GetContiguousL2Offset(address) / L2BlockSize;
|
||||||
|
break;
|
||||||
|
case L2BlockSize:
|
||||||
|
out_context->l1_entry += 1;
|
||||||
|
out_context->l2_entry += 1;
|
||||||
|
break;
|
||||||
|
case L3ContiguousBlockSize:
|
||||||
|
out_context->l1_entry += 1;
|
||||||
|
out_context->l2_entry += 1;
|
||||||
|
out_context->l3_entry += (L3ContiguousBlockSize / L3BlockSize) - GetContiguousL3Offset(address) / L3BlockSize;
|
||||||
|
break;
|
||||||
|
case L3BlockSize:
|
||||||
|
out_context->l1_entry += 1;
|
||||||
|
out_context->l2_entry += 1;
|
||||||
|
out_context->l3_entry += 1;
|
||||||
|
break;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
|
||||||
|
return valid;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const {
|
||||||
|
bool valid = false;
|
||||||
|
|
||||||
|
/* Check if we're not at the end of an L3 table. */
|
||||||
|
if (!util::IsAligned(reinterpret_cast<uintptr_t>(context->l3_entry), PageSize)) {
|
||||||
|
valid = this->ExtractL3Entry(out_entry, context, context->l3_entry, Null<KProcessAddress>);
|
||||||
|
|
||||||
|
switch (out_entry->block_size) {
|
||||||
|
case L3ContiguousBlockSize:
|
||||||
|
context->l3_entry += (L3ContiguousBlockSize / L3BlockSize);
|
||||||
|
break;
|
||||||
|
case L3BlockSize:
|
||||||
|
context->l3_entry += 1;
|
||||||
|
break;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
} else if (!util::IsAligned(reinterpret_cast<uintptr_t>(context->l2_entry), PageSize)) {
|
||||||
|
/* We're not at the end of an L2 table. */
|
||||||
|
valid = this->ExtractL2Entry(out_entry, context, context->l2_entry, Null<KProcessAddress>);
|
||||||
|
|
||||||
|
switch (out_entry->block_size) {
|
||||||
|
case L2ContiguousBlockSize:
|
||||||
|
context->l2_entry += (L2ContiguousBlockSize / L2BlockSize);
|
||||||
|
break;
|
||||||
|
case L2BlockSize:
|
||||||
|
context->l2_entry += 1;
|
||||||
|
break;
|
||||||
|
case L3ContiguousBlockSize:
|
||||||
|
context->l2_entry += 1;
|
||||||
|
context->l3_entry += (L3ContiguousBlockSize / L3BlockSize);
|
||||||
|
break;
|
||||||
|
case L3BlockSize:
|
||||||
|
context->l2_entry += 1;
|
||||||
|
context->l3_entry += 1;
|
||||||
|
break;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* We need to update the l1 entry. */
|
||||||
|
const size_t l1_index = context->l1_entry - this->table;
|
||||||
|
if (l1_index < this->num_entries) {
|
||||||
|
valid = this->ExtractL1Entry(out_entry, context, context->l1_entry, Null<KProcessAddress>);
|
||||||
|
} else {
|
||||||
|
/* Invalid, end traversal. */
|
||||||
|
out_entry->phys_addr = Null<KPhysicalAddress>;
|
||||||
|
out_entry->block_size = L1BlockSize;
|
||||||
|
context->l1_entry = this->table + this->num_entries;
|
||||||
|
context->l2_entry = nullptr;
|
||||||
|
context->l3_entry = nullptr;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (out_entry->block_size) {
|
||||||
|
case L1ContiguousBlockSize:
|
||||||
|
context->l1_entry += (L1ContiguousBlockSize / L1BlockSize);
|
||||||
|
break;
|
||||||
|
case L1BlockSize:
|
||||||
|
context->l1_entry += 1;
|
||||||
|
break;
|
||||||
|
case L2ContiguousBlockSize:
|
||||||
|
context->l1_entry += 1;
|
||||||
|
context->l2_entry += (L2ContiguousBlockSize / L2BlockSize);
|
||||||
|
break;
|
||||||
|
case L2BlockSize:
|
||||||
|
context->l1_entry += 1;
|
||||||
|
context->l2_entry += 1;
|
||||||
|
break;
|
||||||
|
case L3ContiguousBlockSize:
|
||||||
|
context->l1_entry += 1;
|
||||||
|
context->l2_entry += 1;
|
||||||
|
context->l3_entry += (L3ContiguousBlockSize / L3BlockSize);
|
||||||
|
break;
|
||||||
|
case L3BlockSize:
|
||||||
|
context->l1_entry += 1;
|
||||||
|
context->l2_entry += 1;
|
||||||
|
context->l3_entry += 1;
|
||||||
|
break;
|
||||||
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return valid;
|
||||||
|
}
|
||||||
|
|
||||||
bool KPageTableImpl::GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
bool KPageTableImpl::GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||||
/* Validate that we can read the actual entry. */
|
/* Validate that we can read the actual entry. */
|
||||||
const size_t l0_index = GetL0Index(address);
|
const size_t l0_index = GetL0Index(address);
|
||||||
|
|
|
@ -145,7 +145,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const {
|
Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter ¶ms) const {
|
||||||
/* Clear memory at the address. */
|
/* Clear memory at the address. */
|
||||||
std::memset(GetVoidPointer(address), 0, params.code_num_pages);
|
std::memset(GetVoidPointer(address), 0, params.code_num_pages * PageSize);
|
||||||
|
|
||||||
/* Prepare to layout the data. */
|
/* Prepare to layout the data. */
|
||||||
const KProcessAddress rx_address = address + this->kip_header->GetRxAddress();
|
const KProcessAddress rx_address = address + this->kip_header->GetRxAddress();
|
||||||
|
|
|
@ -17,6 +17,75 @@
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr std::tuple<KMemoryState, const char *> MemoryStateNames[] = {
|
||||||
|
{KMemoryState_Free , "----- Free -----"},
|
||||||
|
{KMemoryState_Io , "Io "},
|
||||||
|
{KMemoryState_Static , "Static "},
|
||||||
|
{KMemoryState_Code , "Code "},
|
||||||
|
{KMemoryState_CodeData , "CodeData "},
|
||||||
|
{KMemoryState_Normal , "Normal "},
|
||||||
|
{KMemoryState_Shared , "Shared "},
|
||||||
|
{KMemoryState_AliasCode , "AliasCode "},
|
||||||
|
{KMemoryState_AliasCodeData , "AliasCodeData "},
|
||||||
|
{KMemoryState_Ipc , "Ipc "},
|
||||||
|
{KMemoryState_Stack , "Stack "},
|
||||||
|
{KMemoryState_ThreadLocal , "ThreadLocal "},
|
||||||
|
{KMemoryState_Transfered , "Transfered "},
|
||||||
|
{KMemoryState_SharedTransfered , "SharedTransfered"},
|
||||||
|
{KMemoryState_SharedCode , "SharedCode "},
|
||||||
|
{KMemoryState_Inaccessible , "Inaccessible "},
|
||||||
|
{KMemoryState_NonSecureIpc , "NonSecureIpc "},
|
||||||
|
{KMemoryState_NonDeviceIpc , "NonDeviceIpc "},
|
||||||
|
{KMemoryState_Kernel , "Kernel "},
|
||||||
|
{KMemoryState_GeneratedCode , "GeneratedCode "},
|
||||||
|
{KMemoryState_CodeOut , "CodeOut "},
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr const char *GetMemoryStateName(KMemoryState state) {
|
||||||
|
for (size_t i = 0; i < util::size(MemoryStateNames); i++) {
|
||||||
|
if (std::get<0>(MemoryStateNames[i]) == state) {
|
||||||
|
return std::get<1>(MemoryStateNames[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "Unknown ";
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr const char *GetMemoryPermissionString(const KMemoryInfo &info) {
|
||||||
|
if (info.state == KMemoryState_Free) {
|
||||||
|
return " ";
|
||||||
|
} else {
|
||||||
|
switch (info.perm) {
|
||||||
|
case KMemoryPermission_UserReadExecute:
|
||||||
|
return "r-x";
|
||||||
|
case KMemoryPermission_UserRead:
|
||||||
|
return "r--";
|
||||||
|
case KMemoryPermission_UserReadWrite:
|
||||||
|
return "rw-";
|
||||||
|
default:
|
||||||
|
return "---";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void DumpMemoryInfo(const KMemoryInfo &info) {
|
||||||
|
const char *state = GetMemoryStateName(info.state);
|
||||||
|
const char *perm = GetMemoryPermissionString(info);
|
||||||
|
const void *start = reinterpret_cast<void *>(info.GetAddress());
|
||||||
|
const void *end = reinterpret_cast<void *>(info.GetLastAddress());
|
||||||
|
const size_t kb = info.GetSize() / 1_KB;
|
||||||
|
|
||||||
|
const char l = (info.attribute & KMemoryAttribute_Locked) ? 'L' : '-';
|
||||||
|
const char i = (info.attribute & KMemoryAttribute_IpcLocked) ? 'I' : '-';
|
||||||
|
const char d = (info.attribute & KMemoryAttribute_DeviceShared) ? 'D' : '-';
|
||||||
|
const char u = (info.attribute & KMemoryAttribute_Uncached) ? 'U' : '-';
|
||||||
|
|
||||||
|
MESOSPHERE_LOG("%p - %p (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, info.ipc_lock_count, info.device_use_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager) {
|
Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager) {
|
||||||
/* Allocate a block to encapsulate the address space, insert it into the tree. */
|
/* Allocate a block to encapsulate the address space, insert it into the tree. */
|
||||||
KMemoryBlock *start_block = slab_manager->Allocate();
|
KMemoryBlock *start_block = slab_manager->Allocate();
|
||||||
|
@ -214,6 +283,9 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryBlockManager::DumpBlocks() const {
|
void KMemoryBlockManager::DumpBlocks() const {
|
||||||
MESOSPHERE_TODO("Dump useful debugging information");
|
/* Dump each block. */
|
||||||
|
for (const auto &block : this->memory_block_tree) {
|
||||||
|
DumpMemoryInfo(block.GetMemoryInfo());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -337,6 +337,11 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool KPageTableBase::IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) const {
|
||||||
|
/* TODO */
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize);
|
||||||
|
|
||||||
|
@ -382,6 +387,8 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||||
|
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
/* Ensure this is a valid map request. */
|
/* Ensure this is a valid map request. */
|
||||||
const size_t num_pages = pg.GetNumPages();
|
const size_t num_pages = pg.GetNumPages();
|
||||||
R_UNLESS(this->Contains(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
|
R_UNLESS(this->Contains(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
|
||||||
|
@ -416,7 +423,37 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||||
MESOSPHERE_TODO_IMPLEMENT();
|
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
|
/* Ensure this is a valid unmap request. */
|
||||||
|
const size_t num_pages = pg.GetNumPages();
|
||||||
|
const size_t size = num_pages * PageSize;
|
||||||
|
R_UNLESS(this->Contains(address, size, state), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(this->general_lock);
|
||||||
|
|
||||||
|
/* Check if state allows us to unmap. */
|
||||||
|
R_TRY(this->CheckMemoryState(address, size, KMemoryState_All, state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
|
||||||
|
|
||||||
|
/* Check that the page group is valid. */
|
||||||
|
R_UNLESS(this->IsValidPageGroup(pg, address, size), svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* Create an update allocator. */
|
||||||
|
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||||
|
R_TRY(allocator.GetResult());
|
||||||
|
|
||||||
|
/* We're going to perform an update, so create a helper. */
|
||||||
|
KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
|
/* Perform unmapping operation. */
|
||||||
|
const KPageProperties properties = { KMemoryPermission_None, false, false, false };
|
||||||
|
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_Unmap, false));
|
||||||
|
|
||||||
|
/* Update the blocks. */
|
||||||
|
this->memory_block_manager.Update(&allocator, address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
|
||||||
|
|
||||||
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,7 +125,7 @@ namespace ams::util {
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr inline IntrusiveRedBlackTreeNode *GetPrev(IntrusiveRedBlackTreeNode *node) {
|
static constexpr inline IntrusiveRedBlackTreeNode *GetPrev(IntrusiveRedBlackTreeNode *node) {
|
||||||
return RB_NEXT(IntrusiveRedBlackTreeRoot, nullptr, node);
|
return RB_PREV(IntrusiveRedBlackTreeRoot, nullptr, node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr inline IntrusiveRedBlackTreeNode const *GetPrev(IntrusiveRedBlackTreeNode const *node) {
|
static constexpr inline IntrusiveRedBlackTreeNode const *GetPrev(IntrusiveRedBlackTreeNode const *node) {
|
||||||
|
@ -146,7 +146,7 @@ namespace ams::util {
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode *GetMaxImpl() const {
|
IntrusiveRedBlackTreeNode *GetMaxImpl() const {
|
||||||
return RB_MIN(IntrusiveRedBlackTreeRoot, const_cast<IntrusiveRedBlackTreeRoot *>(&this->root));
|
return RB_MAX(IntrusiveRedBlackTreeRoot, const_cast<IntrusiveRedBlackTreeRoot *>(&this->root));
|
||||||
}
|
}
|
||||||
|
|
||||||
IntrusiveRedBlackTreeNode *InsertImpl(IntrusiveRedBlackTreeNode *node) {
|
IntrusiveRedBlackTreeNode *InsertImpl(IntrusiveRedBlackTreeNode *node) {
|
||||||
|
|
Loading…
Reference in a new issue