mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-22 02:12:06 +00:00
kernel_ldr: update to support 10.0.0
This commit is contained in:
parent
122b0775f1
commit
116e00c21c
5 changed files with 334 additions and 29 deletions
|
@ -19,6 +19,7 @@
|
||||||
#include <mesosphere/kern_k_typed_address.hpp>
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
#include <mesosphere/kern_select_cpu.hpp>
|
#include <mesosphere/kern_select_cpu.hpp>
|
||||||
#include <mesosphere/arch/arm64/kern_k_page_table_entry.hpp>
|
#include <mesosphere/arch/arm64/kern_k_page_table_entry.hpp>
|
||||||
|
#include <mesosphere/kern_select_system_control.hpp>
|
||||||
|
|
||||||
namespace ams::kern::arch::arm64::init {
|
namespace ams::kern::arch::arm64::init {
|
||||||
|
|
||||||
|
@ -64,6 +65,220 @@ namespace ams::kern::arch::arm64::init {
|
||||||
/* The MMU is necessarily not yet turned on, if we are creating an initial page table. */
|
/* The MMU is necessarily not yet turned on, if we are creating an initial page table. */
|
||||||
std::memset(reinterpret_cast<void *>(GetInteger(address)), 0, PageSize);
|
std::memset(reinterpret_cast<void *>(GetInteger(address)), 0, PageSize);
|
||||||
}
|
}
|
||||||
|
private:
|
||||||
|
size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) {
|
||||||
|
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||||
|
size_t count = 0;
|
||||||
|
while (virt_addr < end_virt_addr) {
|
||||||
|
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||||
|
|
||||||
|
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
||||||
|
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
|
||||||
|
virt_addr += L1BlockSize;
|
||||||
|
if (l1_entry->IsBlock() && block_size == L1BlockSize) {
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Non empty and non-block must be table. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||||
|
|
||||||
|
/* Table, so check if we're mapped in L2. */
|
||||||
|
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||||
|
|
||||||
|
if (l2_entry->IsBlock() || l2_entry->IsEmpty()) {
|
||||||
|
const size_t advance_size = (l2_entry->IsBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
|
||||||
|
virt_addr += advance_size;
|
||||||
|
if (l2_entry->IsBlock() && block_size == advance_size) {
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Non empty and non-block must be table. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||||
|
|
||||||
|
/* Table, so check if we're mapped in L3. */
|
||||||
|
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||||
|
|
||||||
|
/* L3 must be block or empty. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock() || l3_entry->IsEmpty());
|
||||||
|
|
||||||
|
const size_t advance_size = (l3_entry->IsBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
|
||||||
|
virt_addr += advance_size;
|
||||||
|
if (l3_entry->IsBlock() && block_size == advance_size) {
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress NOINLINE GetBlockByIndex(KVirtualAddress virt_addr, size_t size, size_t block_size, size_t index) {
|
||||||
|
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||||
|
size_t count = 0;
|
||||||
|
while (virt_addr < end_virt_addr) {
|
||||||
|
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||||
|
|
||||||
|
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
||||||
|
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
|
||||||
|
if (l1_entry->IsBlock() && block_size == L1BlockSize) {
|
||||||
|
if ((count++) == index) {
|
||||||
|
return virt_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
virt_addr += L1BlockSize;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Non empty and non-block must be table. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||||
|
|
||||||
|
/* Table, so check if we're mapped in L2. */
|
||||||
|
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||||
|
|
||||||
|
if (l2_entry->IsBlock() || l2_entry->IsEmpty()) {
|
||||||
|
const size_t advance_size = (l2_entry->IsBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
|
||||||
|
if (l2_entry->IsBlock() && block_size == advance_size) {
|
||||||
|
if ((count++) == index) {
|
||||||
|
return virt_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
virt_addr += advance_size;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Non empty and non-block must be table. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||||
|
|
||||||
|
/* Table, so check if we're mapped in L3. */
|
||||||
|
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||||
|
|
||||||
|
/* L3 must be block or empty. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock() || l3_entry->IsEmpty());
|
||||||
|
|
||||||
|
const size_t advance_size = (l3_entry->IsBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
|
||||||
|
if (l3_entry->IsBlock() && block_size == advance_size) {
|
||||||
|
if ((count++) == index) {
|
||||||
|
return virt_addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
virt_addr += advance_size;
|
||||||
|
}
|
||||||
|
return Null<KVirtualAddress>;
|
||||||
|
}
|
||||||
|
|
||||||
|
PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) {
|
||||||
|
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||||
|
|
||||||
|
if (l1_entry->IsBlock()) {
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize);
|
||||||
|
return l1_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||||
|
|
||||||
|
/* Table, so check if we're mapped in L2. */
|
||||||
|
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||||
|
|
||||||
|
if (l2_entry->IsBlock()) {
|
||||||
|
const size_t real_size = (l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(real_size == block_size);
|
||||||
|
return l2_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||||
|
|
||||||
|
/* Table, so check if we're mapped in L3. */
|
||||||
|
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||||
|
|
||||||
|
/* L3 must be block. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
|
||||||
|
|
||||||
|
const size_t real_size = (l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(real_size == block_size);
|
||||||
|
return l3_entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
void NOINLINE SwapBlocks(KVirtualAddress src_virt_addr, KVirtualAddress dst_virt_addr, size_t block_size, bool do_copy) {
|
||||||
|
static_assert(L2ContiguousBlockSize / L2BlockSize == L3ContiguousBlockSize / L3BlockSize);
|
||||||
|
const bool contig = (block_size == L2ContiguousBlockSize || block_size == L3ContiguousBlockSize);
|
||||||
|
const size_t num_mappings = contig ? L2ContiguousBlockSize / L2BlockSize : 1;
|
||||||
|
|
||||||
|
/* Unmap the source. */
|
||||||
|
PageTableEntry *src_entry = this->GetMappingEntry(src_virt_addr, block_size);
|
||||||
|
const auto src_saved = *src_entry;
|
||||||
|
for (size_t i = 0; i < num_mappings; i++) {
|
||||||
|
*src_entry = InvalidPageTableEntry;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Unmap the target. */
|
||||||
|
PageTableEntry *dst_entry = this->GetMappingEntry(dst_virt_addr, block_size);
|
||||||
|
const auto dst_saved = *dst_entry;
|
||||||
|
for (size_t i = 0; i < num_mappings; i++) {
|
||||||
|
*dst_entry = InvalidPageTableEntry;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Invalidate the entire tlb. */
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
|
cpu::InvalidateEntireTlb();
|
||||||
|
|
||||||
|
/* Copy data, if we should. */
|
||||||
|
const u64 negative_block_size_for_mask = static_cast<u64>(-static_cast<s64>(block_size));
|
||||||
|
const u64 offset_mask = negative_block_size_for_mask & ((1ul << 36) - 1);
|
||||||
|
const KVirtualAddress copy_src_addr = KVirtualAddress(src_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
|
||||||
|
const KVirtualAddress copy_dst_addr = KVirtualAddress(dst_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
|
||||||
|
if (block_size && do_copy) {
|
||||||
|
u8 tmp[0x100];
|
||||||
|
for (size_t ofs = 0; ofs < block_size; ofs += sizeof(tmp)) {
|
||||||
|
std::memcpy(tmp, GetVoidPointer(copy_src_addr + ofs), sizeof(tmp));
|
||||||
|
std::memcpy(GetVoidPointer(copy_src_addr + ofs), GetVoidPointer(copy_dst_addr + ofs), sizeof(tmp));
|
||||||
|
std::memcpy(GetVoidPointer(copy_dst_addr + ofs), tmp, sizeof(tmp));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Swap the mappings. */
|
||||||
|
const u64 attr_preserve_mask = (negative_block_size_for_mask | 0xFFFF000000000000ul) ^ ((1ul << 36) - 1);
|
||||||
|
const size_t shift_for_contig = contig ? 4 : 0;
|
||||||
|
size_t advanced_size = 0;
|
||||||
|
const u64 src_attr_val = src_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask;
|
||||||
|
const u64 dst_attr_val = dst_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask;
|
||||||
|
for (size_t i = 0; i < num_mappings; i++) {
|
||||||
|
reinterpret_cast<u64 *>(src_entry)[i] = GetInteger(copy_dst_addr + (advanced_size >> shift_for_contig)) | src_attr_val;
|
||||||
|
reinterpret_cast<u64 *>(dst_entry)[i] = GetInteger(copy_src_addr + (advanced_size >> shift_for_contig)) | dst_attr_val;
|
||||||
|
advanced_size += block_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
|
}
|
||||||
|
|
||||||
|
void NOINLINE PhysicallyRandomize(KVirtualAddress virt_addr, size_t size, size_t block_size, bool do_copy) {
|
||||||
|
const size_t block_count = this->GetBlockCount(virt_addr, size, block_size);
|
||||||
|
if (block_count > 1) {
|
||||||
|
for (size_t cur_block = 0; cur_block < block_count; cur_block++) {
|
||||||
|
const size_t target_block = KSystemControl::Init::GenerateRandomRange(cur_block, block_count - 1);
|
||||||
|
if (cur_block != target_block) {
|
||||||
|
const KVirtualAddress cur_virt_addr = this->GetBlockByIndex(virt_addr, size, block_size, cur_block);
|
||||||
|
const KVirtualAddress target_virt_addr = this->GetBlockByIndex(virt_addr, size, block_size, target_block);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(cur_virt_addr != Null<KVirtualAddress>);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(target_virt_addr != Null<KVirtualAddress>);
|
||||||
|
this->SwapBlocks(cur_virt_addr, target_virt_addr, block_size, do_copy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
|
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) {
|
||||||
/* Ensure that addresses and sizes are page aligned. */
|
/* Ensure that addresses and sizes are page aligned. */
|
||||||
|
@ -363,32 +578,53 @@ namespace ams::kern::arch::arm64::init {
|
||||||
cpu::DataSynchronizationBarrierInnerShareable();
|
cpu::DataSynchronizationBarrierInnerShareable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PhysicallyRandomize(KVirtualAddress virt_addr, size_t size, bool do_copy) {
|
||||||
|
this->PhysicallyRandomize(virt_addr, size, L1BlockSize, do_copy);
|
||||||
|
this->PhysicallyRandomize(virt_addr, size, L2ContiguousBlockSize, do_copy);
|
||||||
|
this->PhysicallyRandomize(virt_addr, size, L2BlockSize, do_copy);
|
||||||
|
this->PhysicallyRandomize(virt_addr, size, L3ContiguousBlockSize, do_copy);
|
||||||
|
this->PhysicallyRandomize(virt_addr, size, L3BlockSize, do_copy);
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class KInitialPageAllocator : public KInitialPageTable::IPageAllocator {
|
class KInitialPageAllocator : public KInitialPageTable::IPageAllocator {
|
||||||
private:
|
|
||||||
uintptr_t next_address;
|
|
||||||
public:
|
public:
|
||||||
constexpr ALWAYS_INLINE KInitialPageAllocator() : next_address(Null<uintptr_t>) { /* ... */ }
|
struct State {
|
||||||
|
uintptr_t next_address;
|
||||||
|
uintptr_t free_bitmap;
|
||||||
|
};
|
||||||
|
private:
|
||||||
|
State state;
|
||||||
|
public:
|
||||||
|
constexpr ALWAYS_INLINE KInitialPageAllocator() : state{} { /* ... */ }
|
||||||
|
|
||||||
ALWAYS_INLINE void Initialize(uintptr_t address) {
|
ALWAYS_INLINE void Initialize(uintptr_t address) {
|
||||||
this->next_address = address;
|
this->state.next_address = address + BITSIZEOF(this->state.free_bitmap) * PageSize;
|
||||||
|
this->state.free_bitmap = ~uintptr_t();
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE uintptr_t GetFinalNextAddress() {
|
ALWAYS_INLINE void GetFinalState(State *out) {
|
||||||
const uintptr_t final_address = this->next_address;
|
*out = this->state;
|
||||||
this->next_address = Null<uintptr_t>;
|
this->state = {};
|
||||||
return final_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
ALWAYS_INLINE uintptr_t GetFinalState() {
|
|
||||||
return this->GetFinalNextAddress();
|
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
virtual KPhysicalAddress Allocate() override {
|
virtual KPhysicalAddress Allocate() override {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(this->next_address != Null<uintptr_t>);
|
MESOSPHERE_INIT_ABORT_UNLESS(this->state.next_address != Null<uintptr_t>);
|
||||||
const uintptr_t allocated = this->next_address;
|
uintptr_t allocated = this->state.next_address;
|
||||||
this->next_address += PageSize;
|
if (this->state.free_bitmap != 0) {
|
||||||
|
u64 index;
|
||||||
|
uintptr_t mask;
|
||||||
|
do {
|
||||||
|
index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(this->state.free_bitmap) - 1);
|
||||||
|
mask = (static_cast<uintptr_t>(1) << index);
|
||||||
|
} while ((this->state.free_bitmap & mask) == 0);
|
||||||
|
this->state.free_bitmap &= ~mask;
|
||||||
|
allocated = this->state.next_address - ((BITSIZEOF(this->state.free_bitmap) - index) * PageSize);
|
||||||
|
} else {
|
||||||
|
this->state.next_address += PageSize;
|
||||||
|
}
|
||||||
|
|
||||||
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize);
|
std::memset(reinterpret_cast<void *>(allocated), 0, PageSize);
|
||||||
return allocated;
|
return allocated;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,12 @@ namespace ams::kern::arch::arm64 {
|
||||||
public:
|
public:
|
||||||
struct InvalidTag{};
|
struct InvalidTag{};
|
||||||
|
|
||||||
|
enum ExtensionTag : u64 {
|
||||||
|
ExtensionTag_IsValidBit = (1ul << 56),
|
||||||
|
ExtensionTag_IsValid = (ExtensionTag_IsValidBit | (1ul << 0)),
|
||||||
|
ExtensionTag_IsBlockMask = (ExtensionTag_IsValidBit | (1ul << 1)),
|
||||||
|
};
|
||||||
|
|
||||||
enum Permission : u64 {
|
enum Permission : u64 {
|
||||||
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
|
||||||
Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)),
|
Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)),
|
||||||
|
@ -89,7 +95,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
/* Construct a new attribute. */
|
/* Construct a new attribute. */
|
||||||
constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share)
|
constexpr ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share)
|
||||||
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share))
|
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionTag_IsValid))
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
@ -134,8 +140,9 @@ namespace ams::kern::arch::arm64 {
|
||||||
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
||||||
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
||||||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
||||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x1; }
|
constexpr ALWAYS_INLINE bool IsBlock() const { return (this->attributes & ExtensionTag_IsBlockMask) == ExtensionTag_IsValidBit; }
|
||||||
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; }
|
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBits(0, 2) == 0x3; }
|
||||||
|
constexpr ALWAYS_INLINE bool IsEmpty() const { return this->GetBits(0, 2) == 0x0; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE decltype(auto) SetContiguousAllowed(bool en) { this->SetBit(55, !en); return *this; }
|
constexpr ALWAYS_INLINE decltype(auto) SetContiguousAllowed(bool en) { this->SetBit(55, !en); return *this; }
|
||||||
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
|
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
|
||||||
|
@ -157,6 +164,10 @@ namespace ams::kern::arch::arm64 {
|
||||||
return this->attributes == attr;
|
return this->attributes == attr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const {
|
||||||
|
return this->attributes;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
||||||
return this->attributes;
|
return this->attributes;
|
||||||
|
@ -186,7 +197,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
constexpr ALWAYS_INLINE L1PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionTag_IsValid)
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
@ -231,7 +242,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
constexpr ALWAYS_INLINE L2PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
|
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | PageTableEntry::ExtensionTag_IsValid)
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
@ -264,12 +275,12 @@ namespace ams::kern::arch::arm64 {
|
||||||
constexpr ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
|
constexpr ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
constexpr ALWAYS_INLINE L3PageTableEntry(KPhysicalAddress phys_addr, const PageTableEntry &attr, bool contig)
|
||||||
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x3)
|
: PageTableEntry(attr, (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x2 | PageTableEntry::ExtensionTag_IsValid)
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE bool IsBlock() const { return this->GetBits(0, 2) == 0x3; }
|
constexpr ALWAYS_INLINE bool IsBlock() const { return (GetRawAttributes() & ExtensionTag_IsBlockMask) == ExtensionTag_IsBlockMask; }
|
||||||
|
|
||||||
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
|
||||||
return this->SelectBits(12, 36);
|
return this->SelectBits(12, 36);
|
||||||
|
|
|
@ -20,6 +20,24 @@ namespace ams::kern {
|
||||||
|
|
||||||
constexpr size_t PageSize = 4_KB;
|
constexpr size_t PageSize = 4_KB;
|
||||||
|
|
||||||
|
enum TargetFirmware : u32 {
|
||||||
|
TargetFirmware_1_0_0 = ATMOSPHERE_TARGET_FIRMWARE_100,
|
||||||
|
TargetFirmware_2_0_0 = ATMOSPHERE_TARGET_FIRMWARE_200,
|
||||||
|
TargetFirmware_3_0_0 = ATMOSPHERE_TARGET_FIRMWARE_300,
|
||||||
|
TargetFirmware_4_0_0 = ATMOSPHERE_TARGET_FIRMWARE_400,
|
||||||
|
TargetFirmware_5_0_0 = ATMOSPHERE_TARGET_FIRMWARE_500,
|
||||||
|
TargetFirmware_6_0_0 = ATMOSPHERE_TARGET_FIRMWARE_600,
|
||||||
|
TargetFirmware_6_2_0 = ATMOSPHERE_TARGET_FIRMWARE_620,
|
||||||
|
TargetFirmware_7_0_0 = ATMOSPHERE_TARGET_FIRMWARE_700,
|
||||||
|
TargetFirmware_8_0_0 = ATMOSPHERE_TARGET_FIRMWARE_800,
|
||||||
|
TargetFirmware_8_1_0 = ATMOSPHERE_TARGET_FIRMWARE_810,
|
||||||
|
TargetFirmware_9_0_0 = ATMOSPHERE_TARGET_FIRMWARE_900,
|
||||||
|
TargetFirmware_9_1_0 = ATMOSPHERE_TARGET_FIRMWARE_910,
|
||||||
|
TargetFirmware_10_0_0 = ATMOSPHERE_TARGET_FIRMWARE_1000,
|
||||||
|
};
|
||||||
|
|
||||||
|
TargetFirmware GetTargetFirmware();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 1 || defined(AMS_BUILD_FOR_AUDITING)
|
#if 1 || defined(AMS_BUILD_FOR_AUDITING)
|
||||||
|
|
|
@ -21,6 +21,14 @@
|
||||||
.section .crt0.text.start, "ax", %progbits
|
.section .crt0.text.start, "ax", %progbits
|
||||||
.global _start
|
.global _start
|
||||||
_start:
|
_start:
|
||||||
|
b _main
|
||||||
|
__metadata_begin:
|
||||||
|
.ascii "MLD0" /* Magic */
|
||||||
|
__metadata_target_firmware:
|
||||||
|
.dword 0xCCCCCCCC /* Target Firmware. */
|
||||||
|
__metadata_reserved:
|
||||||
|
.dword 0xCCCCCCCC /* Reserved. */
|
||||||
|
_main:
|
||||||
/* KernelLdr_Main(uintptr_t kernel_base_address, KernelMap *kernel_map, uintptr_t ini1_base_address); */
|
/* KernelLdr_Main(uintptr_t kernel_base_address, KernelMap *kernel_map, uintptr_t ini1_base_address); */
|
||||||
adr x18, _start
|
adr x18, _start
|
||||||
adr x16, __external_references
|
adr x16, __external_references
|
||||||
|
@ -96,6 +104,14 @@ _start:
|
||||||
mov sp, x2
|
mov sp, x2
|
||||||
br x1
|
br x1
|
||||||
|
|
||||||
|
|
||||||
|
.global _ZN3ams4kern17GetTargetFirmwareEv
|
||||||
|
.type _ZN3ams4kern17GetTargetFirmwareEv, %function
|
||||||
|
_ZN3ams4kern17GetTargetFirmwareEv:
|
||||||
|
adr x0, __metadata_target_firmware
|
||||||
|
ldr x0, [x0]
|
||||||
|
ret
|
||||||
|
|
||||||
.balign 8
|
.balign 8
|
||||||
__external_references:
|
__external_references:
|
||||||
.quad __bss_start__ - _start
|
.quad __bss_start__ - _start
|
||||||
|
|
|
@ -31,12 +31,29 @@ namespace ams::kern::init::loader {
|
||||||
constexpr size_t KernelResourceRegionSize = 0x1728000;
|
constexpr size_t KernelResourceRegionSize = 0x1728000;
|
||||||
constexpr size_t ExtraKernelResourceSize = 0x68000;
|
constexpr size_t ExtraKernelResourceSize = 0x68000;
|
||||||
static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000);
|
static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000);
|
||||||
|
constexpr size_t KernelResourceReduction_10_0_0 = 0x10000;
|
||||||
|
|
||||||
constexpr size_t InitialPageTableRegionSize = 0x200000;
|
constexpr size_t InitialPageTableRegionSize = 0x200000;
|
||||||
|
|
||||||
/* Global Allocator. */
|
/* Global Allocator. */
|
||||||
KInitialPageAllocator g_initial_page_allocator;
|
KInitialPageAllocator g_initial_page_allocator;
|
||||||
|
|
||||||
|
KInitialPageAllocator::State g_final_page_allocator_state;
|
||||||
|
|
||||||
|
size_t GetResourceRegionSize() {
|
||||||
|
/* Decide if Kernel should have enlarged resource region. */
|
||||||
|
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
||||||
|
size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
|
||||||
|
static_assert(KernelResourceRegionSize > InitialProcessBinarySizeMax);
|
||||||
|
static_assert(KernelResourceRegionSize + ExtraKernelResourceSize > InitialProcessBinarySizeMax);
|
||||||
|
|
||||||
|
/* 10.0.0 reduced the kernel resource region size by 64K. */
|
||||||
|
if (kern::GetTargetFirmware() >= kern::TargetFirmware_10_0_0) {
|
||||||
|
resource_region_size -= KernelResourceReduction_10_0_0;
|
||||||
|
}
|
||||||
|
return resource_region_size;
|
||||||
|
}
|
||||||
|
|
||||||
void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) {
|
void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) {
|
||||||
/* TODO: Proper secure monitor call. */
|
/* TODO: Proper secure monitor call. */
|
||||||
KPhysicalAddress correct_base = KSystemControl::Init::GetKernelPhysicalBaseAddress(base_address);
|
KPhysicalAddress correct_base = KSystemControl::Init::GetKernelPhysicalBaseAddress(base_address);
|
||||||
|
@ -100,8 +117,8 @@ namespace ams::kern::init::loader {
|
||||||
cpu::MemoryAccessIndirectionRegisterAccessor(MairValue).Store();
|
cpu::MemoryAccessIndirectionRegisterAccessor(MairValue).Store();
|
||||||
cpu::TranslationControlRegisterAccessor(TcrValue).Store();
|
cpu::TranslationControlRegisterAccessor(TcrValue).Store();
|
||||||
|
|
||||||
/* Perform cpu-specific setup. */
|
/* Perform cpu-specific setup on < 10.0.0. */
|
||||||
{
|
if (kern::GetTargetFirmware() < kern::TargetFirmware_10_0_0) {
|
||||||
SavedRegisterState saved_registers;
|
SavedRegisterState saved_registers;
|
||||||
SaveRegistersToTpidrEl1(&saved_registers);
|
SaveRegistersToTpidrEl1(&saved_registers);
|
||||||
ON_SCOPE_EXIT { VerifyAndClearTpidrEl1(&saved_registers); };
|
ON_SCOPE_EXIT { VerifyAndClearTpidrEl1(&saved_registers); };
|
||||||
|
@ -246,11 +263,8 @@ namespace ams::kern::init::loader {
|
||||||
const uintptr_t init_array_offset = layout->init_array_offset;
|
const uintptr_t init_array_offset = layout->init_array_offset;
|
||||||
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
|
const uintptr_t init_array_end_offset = layout->init_array_end_offset;
|
||||||
|
|
||||||
/* Decide if Kernel should have enlarged resource region. */
|
/* Determine the size of the resource region. */
|
||||||
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
const size_t resource_region_size = GetResourceRegionSize();
|
||||||
const size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
|
|
||||||
static_assert(KernelResourceRegionSize > InitialProcessBinarySizeMax);
|
|
||||||
static_assert(KernelResourceRegionSize + ExtraKernelResourceSize > InitialProcessBinarySizeMax);
|
|
||||||
|
|
||||||
/* Setup the INI1 header in memory for the kernel. */
|
/* Setup the INI1 header in memory for the kernel. */
|
||||||
const uintptr_t ini_end_address = base_address + ini_load_offset + resource_region_size;
|
const uintptr_t ini_end_address = base_address + ini_load_offset + resource_region_size;
|
||||||
|
@ -290,6 +304,11 @@ namespace ams::kern::init::loader {
|
||||||
ttbr1_table.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
ttbr1_table.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
ttbr1_table.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
ttbr1_table.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
|
|
||||||
|
/* On 10.0.0+, Physicaly randomize the kernel region. */
|
||||||
|
if (kern::GetTargetFirmware() >= kern::TargetFirmware_10_0_0) {
|
||||||
|
ttbr1_table.PhysicallyRandomize(virtual_base_address + rx_offset, bss_end_offset - rx_offset, true);
|
||||||
|
}
|
||||||
|
|
||||||
/* Clear kernel .bss. */
|
/* Clear kernel .bss. */
|
||||||
std::memset(GetVoidPointer(virtual_base_address + bss_offset), 0, bss_end_offset - bss_offset);
|
std::memset(GetVoidPointer(virtual_base_address + bss_offset), 0, bss_end_offset - bss_offset);
|
||||||
|
|
||||||
|
@ -312,7 +331,12 @@ namespace ams::kern::init::loader {
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t GetFinalPageAllocatorState() {
|
uintptr_t GetFinalPageAllocatorState() {
|
||||||
return g_initial_page_allocator.GetFinalState();
|
g_initial_page_allocator.GetFinalState(std::addressof(g_final_page_allocator_state));
|
||||||
|
if (kern::GetTargetFirmware() >= kern::TargetFirmware_10_0_0) {
|
||||||
|
return reinterpret_cast<uintptr_t>(std::addressof(g_final_page_allocator_state));
|
||||||
|
} else {
|
||||||
|
return g_final_page_allocator_state.next_address;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
Loading…
Reference in a new issue