mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-11-08 05:01:44 +00:00
kern: update KInitialPageTable/KInitialPageAllocator
This commit is contained in:
parent
962cf97150
commit
2fb258ca7e
4 changed files with 225 additions and 79 deletions
|
@ -38,37 +38,67 @@ namespace ams::kern::arch::arm64::init {
|
|||
public:
|
||||
class IPageAllocator {
|
||||
public:
|
||||
virtual KPhysicalAddress Allocate() { return Null<KPhysicalAddress>; }
|
||||
virtual void Free(KPhysicalAddress phys_addr) { /* Nothing to do here. */ (void)(phys_addr); }
|
||||
virtual KPhysicalAddress Allocate(size_t size) = 0;
|
||||
virtual void Free(KPhysicalAddress phys_addr, size_t size) = 0;
|
||||
};
|
||||
|
||||
struct NoClear{};
|
||||
private:
|
||||
KPhysicalAddress m_l1_table;
|
||||
KPhysicalAddress m_l1_tables[2];
|
||||
u32 m_num_entries[2];
|
||||
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : m_l1_table(l1) { /* ... */ }
|
||||
KInitialPageTable(KVirtualAddress start_address, KVirtualAddress end_address, IPageAllocator &allocator) {
|
||||
/* Set tables. */
|
||||
m_l1_tables[0] = AllocateNewPageTable(allocator);
|
||||
m_l1_tables[1] = AllocateNewPageTable(allocator);
|
||||
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
|
||||
ClearNewPageTable(m_l1_table);
|
||||
/* Set counts. */
|
||||
m_num_entries[0] = MaxPageTableEntries;
|
||||
m_num_entries[1] = ((end_address / L1BlockSize) & (MaxPageTableEntries - 1)) - ((start_address / L1BlockSize) & (MaxPageTableEntries - 1)) + 1;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetL1TableAddress() const {
|
||||
return GetInteger(m_l1_table);
|
||||
KInitialPageTable() {
|
||||
/* Set tables. */
|
||||
m_l1_tables[0] = util::AlignDown(cpu::GetTtbr0El1(), PageSize);
|
||||
m_l1_tables[1] = util::AlignDown(cpu::GetTtbr1El1(), PageSize);
|
||||
|
||||
/* Set counts. */
|
||||
cpu::TranslationControlRegisterAccessor tcr;
|
||||
m_num_entries[0] = tcr.GetT0Size() / L1BlockSize;
|
||||
m_num_entries[1] = tcr.GetT1Size() / L1BlockSize;
|
||||
|
||||
/* Check counts. */
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(0 < m_num_entries[0] && m_num_entries[0] <= MaxPageTableEntries);
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(0 < m_num_entries[1] && m_num_entries[1] <= MaxPageTableEntries);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetTtbr0L1TableAddress() const {
|
||||
return GetInteger(m_l1_tables[0]);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetTtbr1L1TableAddress() const {
|
||||
return GetInteger(m_l1_tables[1]);
|
||||
}
|
||||
private:
|
||||
static constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KPhysicalAddress _l1_table, KVirtualAddress address) {
|
||||
L1PageTableEntry *l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(_l1_table));
|
||||
return l1_table + ((GetInteger(address) >> 30) & (MaxPageTableEntries - 1));
|
||||
constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KVirtualAddress address) const {
|
||||
const size_t index = (GetInteger(address) >> (BITSIZEOF(address) - 1)) & 1;
|
||||
L1PageTableEntry *l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[index]));
|
||||
return l1_table + ((GetInteger(address) / L1BlockSize) & (m_num_entries[index] - 1));
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KVirtualAddress address) {
|
||||
L2PageTableEntry *l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(entry->GetTable()));
|
||||
return l2_table + ((GetInteger(address) >> 21) & (MaxPageTableEntries - 1));
|
||||
return l2_table + ((GetInteger(address) / L2BlockSize) & (MaxPageTableEntries - 1));
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KVirtualAddress address) {
|
||||
L3PageTableEntry *l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(entry->GetTable()));
|
||||
return l3_table + ((GetInteger(address) >> 12) & (MaxPageTableEntries - 1));
|
||||
return l3_table + ((GetInteger(address) / L3BlockSize) & (MaxPageTableEntries - 1));
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(IPageAllocator &allocator) {
|
||||
auto address = allocator.Allocate(PageSize);
|
||||
ClearNewPageTable(address);
|
||||
return address;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) {
|
||||
|
@ -83,7 +113,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
size_t count = 0;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
||||
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
||||
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
||||
|
@ -137,7 +167,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
size_t count = 0;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
||||
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
||||
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
||||
|
@ -194,7 +224,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
}
|
||||
|
||||
PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
||||
if (l1_entry->IsBlock()) {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize);
|
||||
|
@ -301,7 +331,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
/* Iteratively map pages until the requested region is mapped. */
|
||||
while (size > 0) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
||||
/* Can we make an L1 block? */
|
||||
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
|
||||
|
@ -316,7 +346,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
/* If we don't already have an L2 table, we need to make a new one. */
|
||||
if (!l1_entry->IsTable()) {
|
||||
KPhysicalAddress new_table = allocator.Allocate();
|
||||
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
|
||||
ClearNewPageTable(new_table);
|
||||
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
@ -350,7 +380,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
/* If we don't already have an L3 table, we need to make a new one. */
|
||||
if (!l2_entry->IsTable()) {
|
||||
KPhysicalAddress new_table = allocator.Allocate();
|
||||
KPhysicalAddress new_table = AllocateNewPageTable(allocator);
|
||||
ClearNewPageTable(new_table);
|
||||
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
|
||||
cpu::DataSynchronizationBarrierInnerShareable();
|
||||
|
@ -382,7 +412,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
||||
/* Get the L1 entry. */
|
||||
const L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
const L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
||||
if (l1_entry->IsBlock()) {
|
||||
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
|
||||
|
@ -444,7 +474,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
};
|
||||
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
||||
/* If an L1 block is mapped, update. */
|
||||
if (l1_entry->IsBlock()) {
|
||||
|
@ -485,7 +515,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
||||
/* If an L1 block is mapped, the address isn't free. */
|
||||
if (l1_entry->IsBlock()) {
|
||||
|
@ -534,7 +564,7 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
/* Iteratively reprotect pages until the requested region is reprotected. */
|
||||
while (size > 0) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
|
||||
|
||||
/* Check if an L1 block is present. */
|
||||
if (l1_entry->IsBlock()) {
|
||||
|
@ -673,11 +703,18 @@ namespace ams::kern::arch::arm64::init {
|
|||
|
||||
};
|
||||
|
||||
class KInitialPageAllocator : public KInitialPageTable::IPageAllocator {
|
||||
class KInitialPageAllocator final : public KInitialPageTable::IPageAllocator {
|
||||
private:
|
||||
static constexpr inline size_t FreeUnitSize = BITSIZEOF(u64) * PageSize;
|
||||
struct FreeListEntry {
|
||||
FreeListEntry *next;
|
||||
size_t size;
|
||||
};
|
||||
public:
|
||||
struct State {
|
||||
uintptr_t next_address;
|
||||
uintptr_t free_bitmap;
|
||||
uintptr_t start_address;
|
||||
uintptr_t end_address;
|
||||
FreeListEntry *free_head;
|
||||
};
|
||||
private:
|
||||
State m_state;
|
||||
|
@ -685,8 +722,8 @@ namespace ams::kern::arch::arm64::init {
|
|||
constexpr ALWAYS_INLINE KInitialPageAllocator() : m_state{} { /* ... */ }
|
||||
|
||||
ALWAYS_INLINE void Initialize(uintptr_t address) {
|
||||
m_state.next_address = address + BITSIZEOF(m_state.free_bitmap) * PageSize;
|
||||
m_state.free_bitmap = ~uintptr_t();
|
||||
m_state.start_address = address;
|
||||
m_state.end_address = address;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InitializeFromState(uintptr_t state_val) {
|
||||
|
@ -697,28 +734,134 @@ namespace ams::kern::arch::arm64::init {
|
|||
*out = m_state;
|
||||
m_state = {};
|
||||
}
|
||||
public:
|
||||
virtual KPhysicalAddress Allocate() override {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(m_state.next_address != Null<uintptr_t>);
|
||||
uintptr_t allocated = m_state.next_address;
|
||||
if (m_state.free_bitmap != 0) {
|
||||
u64 index;
|
||||
uintptr_t mask;
|
||||
do {
|
||||
index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(m_state.free_bitmap) - 1);
|
||||
mask = (static_cast<uintptr_t>(1) << index);
|
||||
} while ((m_state.free_bitmap & mask) == 0);
|
||||
m_state.free_bitmap &= ~mask;
|
||||
allocated = m_state.next_address - ((BITSIZEOF(m_state.free_bitmap) - index) * PageSize);
|
||||
} else {
|
||||
m_state.next_address += PageSize;
|
||||
private:
|
||||
bool CanAllocate(size_t align, size_t size) const {
|
||||
for (auto *cur = m_state.free_head; cur != nullptr; cur = cur->next) {
|
||||
const uintptr_t cur_last = reinterpret_cast<uintptr_t>(cur) + cur->size - 1;
|
||||
const uintptr_t alloc_last = util::AlignUp(reinterpret_cast<uintptr_t>(cur), align) + size - 1;
|
||||
if (alloc_last <= cur_last) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
ClearPhysicalMemory(allocated, PageSize);
|
||||
return allocated;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* No need to override free. The default does nothing, and so would we. */
|
||||
bool TryAllocate(uintptr_t address, size_t size) {
|
||||
/* Try to allocate the region. */
|
||||
auto **prev_next = std::addressof(m_state.free_head);
|
||||
for (auto *cur = m_state.free_head; cur != nullptr; prev_next = std::addressof(cur->next), cur = cur->next) {
|
||||
const uintptr_t cur_start = reinterpret_cast<uintptr_t>(cur);
|
||||
const uintptr_t cur_last = cur_start + cur->size - 1;
|
||||
if (cur_start <= address && address + size - 1 <= cur_last) {
|
||||
auto *alloc = reinterpret_cast<FreeListEntry *>(address);
|
||||
|
||||
/* Perform fragmentation at front. */
|
||||
if (cur != alloc) {
|
||||
prev_next = std::addressof(cur->next);
|
||||
*alloc = {
|
||||
.next = cur->next,
|
||||
.size = cur_start + cur->size - address,
|
||||
};
|
||||
*cur = {
|
||||
.next = alloc,
|
||||
.size = address - cur_start,
|
||||
};
|
||||
}
|
||||
|
||||
/* Perform fragmentation at tail. */
|
||||
if (alloc->size != size) {
|
||||
auto *next = reinterpret_cast<FreeListEntry *>(address + size);
|
||||
*next = {
|
||||
.next = alloc->next,
|
||||
.size = alloc->size - size,
|
||||
};
|
||||
*alloc = {
|
||||
.next = next,
|
||||
.size = size,
|
||||
};
|
||||
}
|
||||
|
||||
*prev_next = alloc->next;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
public:
|
||||
KPhysicalAddress Allocate(size_t align, size_t size) {
|
||||
/* Ensure that the free list is non-empty. */
|
||||
while (!this->CanAllocate(align, size)) {
|
||||
this->Free(m_state.end_address, FreeUnitSize);
|
||||
m_state.end_address += FreeUnitSize;
|
||||
}
|
||||
|
||||
/* Allocate a random address. */
|
||||
const uintptr_t aligned_start = util::AlignUp(m_state.start_address, align);
|
||||
const uintptr_t aligned_end = util::AlignDown(m_state.end_address, align);
|
||||
const size_t ind_max = ((aligned_end - aligned_start) / align) - 1;
|
||||
while (true) {
|
||||
if (const uintptr_t random_address = aligned_start + (KSystemControl::Init::GenerateRandomRange(0, ind_max) * align); this->TryAllocate(random_address, size)) {
|
||||
return random_address;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtual KPhysicalAddress Allocate(size_t size) override {
|
||||
return this->Allocate(size, size);
|
||||
}
|
||||
|
||||
virtual void Free(KPhysicalAddress phys_addr, size_t size) override {
|
||||
auto **prev_next = std::addressof(m_state.free_head);
|
||||
auto *new_chunk = reinterpret_cast<FreeListEntry *>(GetInteger(phys_addr));
|
||||
if (auto *cur = m_state.free_head; cur != nullptr) {
|
||||
const uintptr_t new_start = reinterpret_cast<uintptr_t>(new_chunk);
|
||||
const uintptr_t new_end = GetInteger(phys_addr) + size;
|
||||
while (true) {
|
||||
/* Attempt coalescing. */
|
||||
const uintptr_t cur_start = reinterpret_cast<uintptr_t>(cur);
|
||||
const uintptr_t cur_end = cur_start + cur->size;
|
||||
if (new_start < new_end) {
|
||||
if (new_end < cur_start) {
|
||||
*new_chunk = {
|
||||
.next = cur,
|
||||
.size = size,
|
||||
};
|
||||
break;
|
||||
} else if (new_end == cur_start) {
|
||||
*new_chunk = {
|
||||
.next = cur->next,
|
||||
.size = cur->size + size,
|
||||
};
|
||||
break;
|
||||
}
|
||||
} else if (cur_end == new_start) {
|
||||
cur->size += size;
|
||||
return;
|
||||
}
|
||||
|
||||
prev_next = std::addressof(cur->next);
|
||||
if (cur->next != nullptr) {
|
||||
cur = cur->next;
|
||||
} else {
|
||||
*new_chunk = {
|
||||
.next = nullptr,
|
||||
.size = size,
|
||||
};
|
||||
cur->next = new_chunk;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
*new_chunk = {
|
||||
.next = nullptr,
|
||||
.size = size,
|
||||
};
|
||||
}
|
||||
|
||||
*prev_next = new_chunk;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -197,6 +197,11 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
public:
|
||||
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(TranslationControl, tcr_el1)
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetT0Size() const {
|
||||
const size_t shift_value = this->GetBits(0, 6);
|
||||
return size_t(1) << (size_t(64) - shift_value);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE size_t GetT1Size() const {
|
||||
const size_t shift_value = this->GetBits(16, 6);
|
||||
return size_t(1) << (size_t(64) - shift_value);
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace ams::kern::init {
|
|||
constexpr size_t StackSize = PageSize;
|
||||
constexpr size_t StackAlign = PageSize;
|
||||
const KVirtualAddress stack_start_virt = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(StackSize, StackAlign, KMemoryRegionType_KernelMisc, PageSize);
|
||||
const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate();
|
||||
const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(PageSize);
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id));
|
||||
|
||||
page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator);
|
||||
|
@ -135,7 +135,7 @@ namespace ams::kern::init {
|
|||
MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / arch::arm64::L1BlockSize) == arch::arm64::MaxPageTableEntries);
|
||||
|
||||
/* Create page table object for use during initialization. */
|
||||
KInitialPageTable ttbr1_table(util::AlignDown(cpu::GetTtbr1El1(), PageSize), KInitialPageTable::NoClear{});
|
||||
KInitialPageTable ttbr1_table;
|
||||
|
||||
/* Initialize the slab allocator counts. */
|
||||
InitializeSlabResourceCounts();
|
||||
|
@ -382,7 +382,7 @@ namespace ams::kern::init {
|
|||
/* Finalize the page allocator, we're done allocating at this point. */
|
||||
KInitialPageAllocator::State final_init_page_table_state;
|
||||
g_initial_page_allocator.GetFinalState(std::addressof(final_init_page_table_state));
|
||||
const KPhysicalAddress final_init_page_table_end_address = final_init_page_table_state.next_address;
|
||||
const KPhysicalAddress final_init_page_table_end_address = final_init_page_table_state.end_address;
|
||||
const size_t init_page_table_region_size = GetInteger(final_init_page_table_end_address) - GetInteger(resource_end_phys_addr);
|
||||
|
||||
/* Insert regions for the initial page table region. */
|
||||
|
|
|
@ -28,6 +28,14 @@ namespace ams::kern::init::loader {
|
|||
|
||||
namespace {
|
||||
|
||||
constexpr uintptr_t KernelBaseAlignment = 0x200000;
|
||||
constexpr uintptr_t KernelBaseRangeStart = 0xFFFFFF8000000000;
|
||||
constexpr uintptr_t KernelBaseRangeEnd = 0xFFFFFFFFFFE00000;
|
||||
constexpr uintptr_t KernelBaseRangeLast = KernelBaseRangeEnd - 1;
|
||||
static_assert(util::IsAligned(KernelBaseRangeStart, KernelBaseAlignment));
|
||||
static_assert(util::IsAligned(KernelBaseRangeEnd, KernelBaseAlignment));
|
||||
static_assert(KernelBaseRangeStart <= KernelBaseRangeLast);
|
||||
|
||||
static_assert(InitialProcessBinarySizeMax <= KernelResourceSize);
|
||||
|
||||
constexpr size_t InitialPageTableRegionSizeMax = 2_MB;
|
||||
|
@ -71,27 +79,24 @@ namespace ams::kern::init::loader {
|
|||
cpu::InvalidateEntireTlb();
|
||||
}
|
||||
|
||||
void SetupInitialIdentityMapping(KInitialPageTable &ttbr1_table, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageTable::IPageAllocator &allocator) {
|
||||
/* Make a new page table for TTBR0_EL1. */
|
||||
KInitialPageTable ttbr0_table(allocator.Allocate());
|
||||
|
||||
void SetupInitialIdentityMapping(KInitialPageTable &init_pt, uintptr_t base_address, uintptr_t kernel_size, uintptr_t page_table_region, size_t page_table_region_size, KInitialPageTable::IPageAllocator &allocator) {
|
||||
/* Map in an RWX identity mapping for the kernel. */
|
||||
constexpr PageTableEntry KernelRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||
ttbr0_table.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator);
|
||||
init_pt.Map(base_address, kernel_size, base_address, KernelRWXIdentityAttribute, allocator);
|
||||
|
||||
/* Map in an RWX identity mapping for ourselves. */
|
||||
constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||
const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast<uintptr_t>(__start__), PageSize);
|
||||
const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast<uintptr_t>(__end__), PageSize) - kernel_ldr_base;
|
||||
ttbr0_table.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator);
|
||||
init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator);
|
||||
|
||||
/* Map in the page table region as RW- for ourselves. */
|
||||
constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||
ttbr0_table.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator);
|
||||
init_pt.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator);
|
||||
|
||||
/* Place the L1 table addresses in the relevant system registers. */
|
||||
cpu::SetTtbr0El1(ttbr0_table.GetL1TableAddress());
|
||||
cpu::SetTtbr1El1(ttbr1_table.GetL1TableAddress());
|
||||
cpu::SetTtbr0El1(init_pt.GetTtbr0L1TableAddress());
|
||||
cpu::SetTtbr1El1(init_pt.GetTtbr1L1TableAddress());
|
||||
|
||||
/* Setup MAIR_EL1, TCR_EL1. */
|
||||
/* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/
|
||||
|
@ -115,19 +120,12 @@ namespace ams::kern::init::loader {
|
|||
|
||||
KVirtualAddress GetRandomKernelBaseAddress(KInitialPageTable &page_table, KPhysicalAddress phys_base_address, size_t kernel_size) {
|
||||
/* Define useful values for random generation. */
|
||||
constexpr uintptr_t KernelBaseAlignment = 0x200000;
|
||||
constexpr uintptr_t KernelBaseRangeMin = 0xFFFFFF8000000000;
|
||||
constexpr uintptr_t KernelBaseRangeMax = 0xFFFFFFFFFFE00000;
|
||||
constexpr uintptr_t KernelBaseRangeEnd = KernelBaseRangeMax - 1;
|
||||
static_assert(util::IsAligned(KernelBaseRangeMin, KernelBaseAlignment));
|
||||
static_assert(util::IsAligned(KernelBaseRangeMax, KernelBaseAlignment));
|
||||
static_assert(KernelBaseRangeMin <= KernelBaseRangeEnd);
|
||||
|
||||
const uintptr_t kernel_offset = GetInteger(phys_base_address) % KernelBaseAlignment;
|
||||
|
||||
/* Repeatedly generate a random virtual address until we get one that's unmapped in the destination page table. */
|
||||
while (true) {
|
||||
const uintptr_t random_kaslr_slide = KSystemControl::Init::GenerateRandomRange(KernelBaseRangeMin / KernelBaseAlignment, KernelBaseRangeEnd / KernelBaseAlignment);
|
||||
const uintptr_t random_kaslr_slide = KSystemControl::Init::GenerateRandomRange(KernelBaseRangeStart / KernelBaseAlignment, KernelBaseRangeLast / KernelBaseAlignment);
|
||||
const KVirtualAddress kernel_region_start = random_kaslr_slide * KernelBaseAlignment;
|
||||
const KVirtualAddress kernel_region_end = kernel_region_start + util::AlignUp(kernel_offset + kernel_size, KernelBaseAlignment);
|
||||
const size_t kernel_region_size = GetInteger(kernel_region_end) - GetInteger(kernel_region_start);
|
||||
|
@ -138,7 +136,7 @@ namespace ams::kern::init::loader {
|
|||
}
|
||||
|
||||
/* Make sure that the region stays within our intended bounds. */
|
||||
if (kernel_region_end > KernelBaseRangeMax) {
|
||||
if (kernel_region_end > KernelBaseRangeEnd) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -201,28 +199,28 @@ namespace ams::kern::init::loader {
|
|||
g_initial_page_allocator.Initialize(ini_end_address);
|
||||
|
||||
/* Make a new page table for TTBR1_EL1. */
|
||||
KInitialPageTable ttbr1_table(g_initial_page_allocator.Allocate());
|
||||
KInitialPageTable init_pt(KernelBaseRangeStart, KernelBaseRangeLast, g_initial_page_allocator);
|
||||
|
||||
/* Setup initial identity mapping. TTBR1 table passed by reference. */
|
||||
SetupInitialIdentityMapping(ttbr1_table, base_address, bss_end_offset, ini_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator);
|
||||
SetupInitialIdentityMapping(init_pt, base_address, bss_end_offset, ini_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator);
|
||||
|
||||
/* Generate a random slide for the kernel's base address. */
|
||||
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(ttbr1_table, base_address, bss_end_offset);
|
||||
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(init_pt, base_address, bss_end_offset);
|
||||
|
||||
/* Map kernel .text as R-X. */
|
||||
constexpr PageTableEntry KernelTextAttribute(PageTableEntry::Permission_KernelRX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||
ttbr1_table.Map(virtual_base_address + rx_offset, rx_end_offset - rx_offset, base_address + rx_offset, KernelTextAttribute, g_initial_page_allocator);
|
||||
init_pt.Map(virtual_base_address + rx_offset, rx_end_offset - rx_offset, base_address + rx_offset, KernelTextAttribute, g_initial_page_allocator);
|
||||
|
||||
/* Map kernel .rodata and .rwdata as RW-. */
|
||||
/* Note that we will later reprotect .rodata as R-- */
|
||||
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
|
||||
ttbr1_table.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||
ttbr1_table.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||
init_pt.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||
init_pt.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||
|
||||
/* Physically randomize the kernel region. */
|
||||
/* NOTE: Nintendo does this only on 10.0.0+ */
|
||||
ttbr1_table.PhysicallyRandomize(virtual_base_address + rx_offset, bss_end_offset - rx_offset, true);
|
||||
init_pt.PhysicallyRandomize(virtual_base_address + rx_offset, bss_end_offset - rx_offset, true);
|
||||
|
||||
/* Clear kernel .bss. */
|
||||
std::memset(GetVoidPointer(virtual_base_address + bss_offset), 0, bss_end_offset - bss_offset);
|
||||
|
@ -237,14 +235,14 @@ namespace ams::kern::init::loader {
|
|||
Elf::CallInitArrayFuncs(GetInteger(virtual_base_address) + init_array_offset, GetInteger(virtual_base_address) + init_array_end_offset);
|
||||
|
||||
/* Reprotect .rodata as R-- */
|
||||
ttbr1_table.Reprotect(virtual_base_address + ro_offset, ro_end_offset - ro_offset, KernelRwDataAttribute, KernelRoDataAttribute);
|
||||
init_pt.Reprotect(virtual_base_address + ro_offset, ro_end_offset - ro_offset, KernelRwDataAttribute, KernelRoDataAttribute);
|
||||
|
||||
/* Return the difference between the random virtual base and the physical base. */
|
||||
return GetInteger(virtual_base_address) - base_address;
|
||||
}
|
||||
|
||||
KPhysicalAddress AllocateKernelInitStack() {
|
||||
return g_initial_page_allocator.Allocate() + PageSize;
|
||||
return g_initial_page_allocator.Allocate(PageSize) + PageSize;
|
||||
}
|
||||
|
||||
uintptr_t GetFinalPageAllocatorState() {
|
||||
|
|
Loading…
Reference in a new issue