mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-18 00:12:03 +00:00
kern: Update init to reflect 10.0.0 changes
This commit is contained in:
parent
152a945561
commit
bc1d3ccc91
11 changed files with 143 additions and 25 deletions
|
@ -394,6 +394,77 @@ namespace ams::kern::arch::arm64::init {
|
||||||
return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1));
|
return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress GetPhysicalAddressOfRandomizedRange(KVirtualAddress virt_addr, size_t size) const {
|
||||||
|
/* Define tracking variables for ourselves to use. */
|
||||||
|
KPhysicalAddress min_phys_addr = Null<KPhysicalAddress>;
|
||||||
|
KPhysicalAddress max_phys_addr = Null<KPhysicalAddress>;
|
||||||
|
|
||||||
|
/* Ensure the range we're querying is valid. */
|
||||||
|
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||||
|
if (virt_addr > end_virt_addr) {
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(size == 0);
|
||||||
|
return min_phys_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto UpdateExtents = [&](const KPhysicalAddress block, size_t block_size) ALWAYS_INLINE_LAMBDA {
|
||||||
|
/* Ensure that we are allowed to have the block here. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), block_size));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(block_size <= GetInteger(end_virt_addr) - GetInteger(virt_addr));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), block_size));
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, block_size));
|
||||||
|
|
||||||
|
const KPhysicalAddress block_end = block + block_size;
|
||||||
|
|
||||||
|
/* We want to update min phys addr when it's 0 or > block. */
|
||||||
|
/* This is equivalent in two's complement to (n - 1) >= block. */
|
||||||
|
if ((GetInteger(min_phys_addr) - 1) >= GetInteger(block)) {
|
||||||
|
min_phys_addr = block;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update max phys addr when it's 0 or < block_end. */
|
||||||
|
if (GetInteger(max_phys_addr) < GetInteger(block_end) || GetInteger(max_phys_addr) == 0) {
|
||||||
|
max_phys_addr = block_end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Traverse onwards. */
|
||||||
|
virt_addr += block_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
while (virt_addr < end_virt_addr) {
|
||||||
|
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||||
|
|
||||||
|
/* If an L1 block is mapped, update. */
|
||||||
|
if (l1_entry->IsBlock()) {
|
||||||
|
UpdateExtents(l1_entry->GetBlock(), L1BlockSize);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Not a block, so we must have a table. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable());
|
||||||
|
|
||||||
|
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
|
||||||
|
if (l2_entry->IsBlock()) {
|
||||||
|
UpdateExtents(l2_entry->GetBlock(), l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Not a block, so we must have a table. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable());
|
||||||
|
|
||||||
|
/* We must have a mapped l3 entry to inspect. */
|
||||||
|
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock());
|
||||||
|
|
||||||
|
UpdateExtents(l3_entry->GetBlock(), l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure we got the right range. */
|
||||||
|
MESOSPHERE_INIT_ABORT_UNLESS(GetInteger(max_phys_addr) - GetInteger(min_phys_addr) == size);
|
||||||
|
|
||||||
|
/* Write the address that we found. */
|
||||||
|
return min_phys_addr;
|
||||||
|
}
|
||||||
|
|
||||||
bool IsFree(KVirtualAddress virt_addr, size_t size) {
|
bool IsFree(KVirtualAddress virt_addr, size_t size) {
|
||||||
/* Ensure that addresses and sizes are page aligned. */
|
/* Ensure that addresses and sizes are page aligned. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
|
||||||
|
|
|
@ -156,6 +156,7 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
void ClearPageToZeroImpl(void *);
|
void ClearPageToZeroImpl(void *);
|
||||||
void FlushEntireDataCacheSharedForInit();
|
void FlushEntireDataCacheSharedForInit();
|
||||||
void FlushEntireDataCacheLocalForInit();
|
void FlushEntireDataCacheLocalForInit();
|
||||||
|
void StoreEntireCacheForInit();
|
||||||
|
|
||||||
void FlushEntireDataCache();
|
void FlushEntireDataCache();
|
||||||
|
|
||||||
|
|
|
@ -26,14 +26,14 @@ namespace ams::kern::arch::arm64 {
|
||||||
constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ }
|
constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ }
|
||||||
|
|
||||||
void Lock() {
|
void Lock() {
|
||||||
u32 tmp0, tmp1;
|
u32 tmp0, tmp1, tmp2;
|
||||||
|
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" prfm pstl1keep, %[packed_tickets]\n"
|
" prfm pstl1keep, %[packed_tickets]\n"
|
||||||
"1:\n"
|
"1:\n"
|
||||||
" ldaxr %w[tmp0], %[packed_tickets]\n"
|
" ldaxr %w[tmp0], %[packed_tickets]\n"
|
||||||
" add %w[tmp0], %w[tmp0], #0x10000\n"
|
" add %w[tmp2], %w[tmp0], #0x10000\n"
|
||||||
" stxr %w[tmp1], %w[tmp0], %[packed_tickets]\n"
|
" stxr %w[tmp1], %w[tmp2], %[packed_tickets]\n"
|
||||||
" cbnz %w[tmp1], 1b\n"
|
" cbnz %w[tmp1], 1b\n"
|
||||||
" \n"
|
" \n"
|
||||||
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
|
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
|
||||||
|
@ -46,7 +46,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
||||||
" b.ne 2b\n"
|
" b.ne 2b\n"
|
||||||
"3:\n"
|
"3:\n"
|
||||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [packed_tickets]"+Q"(this->packed_tickets)
|
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [packed_tickets]"+Q"(this->packed_tickets)
|
||||||
:
|
:
|
||||||
: "cc", "memory"
|
: "cc", "memory"
|
||||||
);
|
);
|
||||||
|
|
|
@ -51,11 +51,11 @@ namespace ams::kern {
|
||||||
u64 selected = 0;
|
u64 selected = 0;
|
||||||
|
|
||||||
u64 cur_num_bits = BITSIZEOF(bitmap) / 2;
|
u64 cur_num_bits = BITSIZEOF(bitmap) / 2;
|
||||||
u64 cur_mask = (1ull << cur_num_bits) / 2;
|
u64 cur_mask = (1ull << cur_num_bits) - 1;
|
||||||
|
|
||||||
while (cur_num_bits) {
|
while (cur_num_bits) {
|
||||||
const u64 high = (bitmap >> 0) & cur_mask;
|
const u64 low = (bitmap >> 0) & cur_mask;
|
||||||
const u64 low = (bitmap >> cur_num_bits) & cur_mask;
|
const u64 high = (bitmap >> cur_num_bits) & cur_mask;
|
||||||
|
|
||||||
bool choose_low;
|
bool choose_low;
|
||||||
if (high == 0) {
|
if (high == 0) {
|
||||||
|
|
|
@ -333,6 +333,13 @@ namespace ams::kern::arch::arm64::cpu {
|
||||||
return PerformCacheOperationBySetWayLocal<true>(FlushDataCacheLineBySetWayImpl);
|
return PerformCacheOperationBySetWayLocal<true>(FlushDataCacheLineBySetWayImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void StoreEntireCacheForInit() {
|
||||||
|
PerformCacheOperationBySetWayLocal<true>(StoreDataCacheLineBySetWayImpl);
|
||||||
|
PerformCacheOperationBySetWayShared<true>(StoreDataCacheLineBySetWayImpl);
|
||||||
|
DataSynchronizationBarrierInnerShareable();
|
||||||
|
InvalidateEntireInstructionCache();
|
||||||
|
}
|
||||||
|
|
||||||
void FlushEntireDataCache() {
|
void FlushEntireDataCache() {
|
||||||
return PerformCacheOperationBySetWayShared<false>(FlushDataCacheLineBySetWayImpl);
|
return PerformCacheOperationBySetWayShared<false>(FlushDataCacheLineBySetWayImpl);
|
||||||
}
|
}
|
||||||
|
|
|
@ -324,7 +324,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
EnsureRandomGeneratorInitialized();
|
EnsureRandomGeneratorInitialized();
|
||||||
|
|
||||||
return GenerateRandomU64();
|
return GenerateRandomU64FromGenerator();
|
||||||
}
|
}
|
||||||
|
|
||||||
void KSystemControl::SleepSystem() {
|
void KSystemControl::SleepSystem() {
|
||||||
|
|
|
@ -2,6 +2,20 @@
|
||||||
import sys, os
|
import sys, os
|
||||||
from struct import pack as pk, unpack as up
|
from struct import pack as pk, unpack as up
|
||||||
|
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_100 = 1
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_200 = 2
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_300 = 3
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_400 = 4
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_500 = 5
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_600 = 6
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_620 = 7
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_700 = 8
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_800 = 9
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_810 = 10
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_900 = 11
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_910 = 12
|
||||||
|
ATMOSPHERE_TARGET_FIRMWARE_1000 = 13
|
||||||
|
|
||||||
def align_up(val, algn):
|
def align_up(val, algn):
|
||||||
val += algn - 1
|
val += algn - 1
|
||||||
return val - (val % algn)
|
return val - (val % algn)
|
||||||
|
@ -18,7 +32,7 @@ def main(argc, argv):
|
||||||
kernel_metadata_offset = 4
|
kernel_metadata_offset = 4
|
||||||
assert (kernel_metadata_offset <= len(kernel) - 0x40)
|
assert (kernel_metadata_offset <= len(kernel) - 0x40)
|
||||||
assert (kernel[kernel_metadata_offset:kernel_metadata_offset + 4] == b'MSS0')
|
assert (kernel[kernel_metadata_offset:kernel_metadata_offset + 4] == b'MSS0')
|
||||||
kernel_end = up('<I', kernel[kernel_metadata_offset + 0x34:kernel_metadata_offset + 0x38])[0]
|
kernel_end = up('<I', kernel[kernel_metadata_offset + 0x38:kernel_metadata_offset + 0x3C])[0]
|
||||||
assert (kernel_end >= len(kernel))
|
assert (kernel_end >= len(kernel))
|
||||||
|
|
||||||
embedded_ini = b''
|
embedded_ini = b''
|
||||||
|
@ -36,8 +50,8 @@ def main(argc, argv):
|
||||||
|
|
||||||
with open('mesosphere.bin', 'wb') as f:
|
with open('mesosphere.bin', 'wb') as f:
|
||||||
f.write(kernel[:kernel_metadata_offset + 4])
|
f.write(kernel[:kernel_metadata_offset + 4])
|
||||||
f.write(pk('<QQ', embedded_ini_offset, kernel_ldr_offset))
|
f.write(pk('<QQI', embedded_ini_offset, kernel_ldr_offset, ATMOSPHERE_TARGET_FIRMWARE_1000))
|
||||||
f.write(kernel[kernel_metadata_offset + 0x14:])
|
f.write(kernel[kernel_metadata_offset + 0x18:])
|
||||||
f.seek(embedded_ini_offset)
|
f.seek(embedded_ini_offset)
|
||||||
f.write(embedded_ini)
|
f.write(embedded_ini)
|
||||||
f.seek(embedded_ini_end)
|
f.seek(embedded_ini_end)
|
||||||
|
|
|
@ -34,6 +34,7 @@ namespace ams::kern::init {
|
||||||
constexpr size_t KernelResourceRegionSize = 0x1728000;
|
constexpr size_t KernelResourceRegionSize = 0x1728000;
|
||||||
constexpr size_t ExtraKernelResourceSize = 0x68000;
|
constexpr size_t ExtraKernelResourceSize = 0x68000;
|
||||||
static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000);
|
static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000);
|
||||||
|
constexpr size_t KernelResourceReduction_10_0_0 = 0x10000;
|
||||||
|
|
||||||
/* Global Allocator. */
|
/* Global Allocator. */
|
||||||
KInitialPageAllocator g_initial_page_allocator;
|
KInitialPageAllocator g_initial_page_allocator;
|
||||||
|
@ -41,6 +42,20 @@ namespace ams::kern::init {
|
||||||
/* Global initial arguments array. */
|
/* Global initial arguments array. */
|
||||||
KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores];
|
KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores];
|
||||||
|
|
||||||
|
size_t GetResourceRegionSize() {
|
||||||
|
/* Decide if Kernel should have enlarged resource region. */
|
||||||
|
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
||||||
|
size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
|
||||||
|
static_assert(KernelResourceRegionSize > InitialProcessBinarySizeMax);
|
||||||
|
static_assert(KernelResourceRegionSize + ExtraKernelResourceSize > InitialProcessBinarySizeMax);
|
||||||
|
|
||||||
|
/* 10.0.0 reduced the kernel resource region size by 64K. */
|
||||||
|
if (kern::GetTargetFirmware() >= kern::TargetFirmware_10_0_0) {
|
||||||
|
resource_region_size -= KernelResourceReduction_10_0_0;
|
||||||
|
}
|
||||||
|
return resource_region_size;
|
||||||
|
}
|
||||||
|
|
||||||
/* Page table attributes. */
|
/* Page table attributes. */
|
||||||
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||||
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable);
|
||||||
|
@ -138,16 +153,15 @@ namespace ams::kern::init {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack));
|
||||||
|
|
||||||
/* Decide if Kernel should have enlarged resource region (slab region + page table heap region). */
|
/* Decide if Kernel should have enlarged resource region (slab region + page table heap region). */
|
||||||
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
|
const size_t resource_region_size = GetResourceRegionSize();
|
||||||
const size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0);
|
|
||||||
|
|
||||||
/* Determine the size of the slab region. */
|
/* Determine the size of the slab region. */
|
||||||
const size_t slab_region_size = CalculateTotalSlabHeapSize();
|
const size_t slab_region_size = CalculateTotalSlabHeapSize();
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size);
|
MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size);
|
||||||
|
|
||||||
/* Setup the slab region. */
|
/* Setup the slab region. */
|
||||||
const KPhysicalAddress code_start_phys_addr = ttbr1_table.GetPhysicalAddress(code_start_virt_addr);
|
const KPhysicalAddress code_start_phys_addr = ttbr1_table.GetPhysicalAddressOfRandomizedRange(code_start_virt_addr, code_region_size);
|
||||||
const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + (code_end_virt_addr - code_start_virt_addr);
|
const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + code_region_size;
|
||||||
const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
|
const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr;
|
||||||
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
|
||||||
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
|
constexpr size_t SlabRegionAlign = KernelAslrAlignment;
|
||||||
|
@ -155,9 +169,6 @@ namespace ams::kern::init {
|
||||||
const KVirtualAddress slab_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + (GetInteger(code_end_phys_addr) % SlabRegionAlign);
|
const KVirtualAddress slab_region_start = KMemoryLayout::GetVirtualMemoryRegionTree().GetRandomAlignedRegion(slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + (GetInteger(code_end_phys_addr) % SlabRegionAlign);
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab));
|
||||||
|
|
||||||
/* Set the slab region's pair region. */
|
|
||||||
KMemoryLayout::GetVirtualMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_KernelSlab)->SetPairAddress(GetInteger(slab_start_phys_addr));
|
|
||||||
|
|
||||||
/* Setup the temp region. */
|
/* Setup the temp region. */
|
||||||
constexpr size_t TempRegionSize = 128_MB;
|
constexpr size_t TempRegionSize = 128_MB;
|
||||||
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
constexpr size_t TempRegionAlign = KernelAslrAlignment;
|
||||||
|
@ -206,15 +217,20 @@ namespace ams::kern::init {
|
||||||
SetupDramPhysicalMemoryRegions();
|
SetupDramPhysicalMemoryRegions();
|
||||||
|
|
||||||
/* Insert a physical region for the kernel code region. */
|
/* Insert a physical region for the kernel code region. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(code_start_phys_addr), (code_end_virt_addr - code_start_virt_addr), KMemoryRegionType_DramKernelCode));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(code_start_phys_addr), code_region_size, KMemoryRegionType_DramKernelCode));
|
||||||
KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_DramKernelCode)->SetPairAddress(code_start_virt_addr);
|
|
||||||
|
|
||||||
/* Insert a physical region for the kernel slab region. */
|
/* Insert a physical region for the kernel slab region. */
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
|
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab));
|
||||||
KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstRegionByTypeAttr(KMemoryRegionType_DramKernelSlab)->SetPairAddress(GetInteger(slab_region_start));
|
|
||||||
|
|
||||||
/* Map and clear the slab region. */
|
/* Map the slab region. */
|
||||||
ttbr1_table.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
ttbr1_table.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
|
|
||||||
|
/* Physically randomize the slab region. */
|
||||||
|
/* NOTE: Nintendo does this only on 10.0.0+ */
|
||||||
|
ttbr1_table.PhysicallyRandomize(slab_region_start, slab_region_size, false);
|
||||||
|
cpu::StoreEntireCacheForInit();
|
||||||
|
|
||||||
|
/* Clear the slab region. */
|
||||||
std::memset(GetVoidPointer(slab_region_start), 0, slab_region_size);
|
std::memset(GetVoidPointer(slab_region_start), 0, slab_region_size);
|
||||||
|
|
||||||
/* Determine size available for kernel page table heaps, requiring > 8 MB. */
|
/* Determine size available for kernel page table heaps, requiring > 8 MB. */
|
||||||
|
|
|
@ -65,7 +65,7 @@ __metadata_kernel_layout:
|
||||||
.type _ZN3ams4kern17GetTargetFirmwareEv, %function
|
.type _ZN3ams4kern17GetTargetFirmwareEv, %function
|
||||||
_ZN3ams4kern17GetTargetFirmwareEv:
|
_ZN3ams4kern17GetTargetFirmwareEv:
|
||||||
adr x0, __metadata_target_firmware
|
adr x0, __metadata_target_firmware
|
||||||
ldr x0, [x0]
|
ldr w0, [x0]
|
||||||
ret
|
ret
|
||||||
|
|
||||||
/* ams::kern::init::StartCore0(uintptr_t, uintptr_t) */
|
/* ams::kern::init::StartCore0(uintptr_t, uintptr_t) */
|
||||||
|
@ -101,6 +101,15 @@ core0_el1:
|
||||||
add x2, x0, x2
|
add x2, x0, x2
|
||||||
LOAD_FROM_LABEL(x3, __metadata_kernelldr_offset)
|
LOAD_FROM_LABEL(x3, __metadata_kernelldr_offset)
|
||||||
add x3, x0, x3
|
add x3, x0, x3
|
||||||
|
|
||||||
|
/* If kernelldr is ours, set its target firmware. */
|
||||||
|
ldr w4, [x3, #4]
|
||||||
|
LOAD_IMMEDIATE_32(w5, 0x30444C4D)
|
||||||
|
cmp w4, w5
|
||||||
|
b.ne 1f
|
||||||
|
LOAD_FROM_LABEL(x4, __metadata_target_firmware)
|
||||||
|
str w4, [x3, #8]
|
||||||
|
1:
|
||||||
blr x3
|
blr x3
|
||||||
|
|
||||||
/* At this point kernelldr has been invoked, and we are relocated at a random virtual address. */
|
/* At this point kernelldr has been invoked, and we are relocated at a random virtual address. */
|
||||||
|
|
|
@ -109,7 +109,7 @@ _main:
|
||||||
.type _ZN3ams4kern17GetTargetFirmwareEv, %function
|
.type _ZN3ams4kern17GetTargetFirmwareEv, %function
|
||||||
_ZN3ams4kern17GetTargetFirmwareEv:
|
_ZN3ams4kern17GetTargetFirmwareEv:
|
||||||
adr x0, __metadata_target_firmware
|
adr x0, __metadata_target_firmware
|
||||||
ldr x0, [x0]
|
ldr w0, [x0]
|
||||||
ret
|
ret
|
||||||
|
|
||||||
.balign 8
|
.balign 8
|
||||||
|
|
|
@ -304,7 +304,7 @@ namespace ams::kern::init::loader {
|
||||||
ttbr1_table.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
ttbr1_table.Map(virtual_base_address + ro_offset, ro_end_offset - ro_offset, base_address + ro_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
ttbr1_table.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
ttbr1_table.Map(virtual_base_address + rw_offset, bss_end_offset - rw_offset, base_address + rw_offset, KernelRwDataAttribute, g_initial_page_allocator);
|
||||||
|
|
||||||
/* On 10.0.0+, Physicaly randomize the kernel region. */
|
/* On 10.0.0+, Physically randomize the kernel region. */
|
||||||
if (kern::GetTargetFirmware() >= kern::TargetFirmware_10_0_0) {
|
if (kern::GetTargetFirmware() >= kern::TargetFirmware_10_0_0) {
|
||||||
ttbr1_table.PhysicallyRandomize(virtual_base_address + rx_offset, bss_end_offset - rx_offset, true);
|
ttbr1_table.PhysicallyRandomize(virtual_base_address + rx_offset, bss_end_offset - rx_offset, true);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue