mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-18 08:22:04 +00:00
Exosphere: Change physical segment maps depending on firmware version
This commit is contained in:
parent
441e58be56
commit
adc496b6a7
5 changed files with 84 additions and 22 deletions
|
@ -147,7 +147,12 @@ void bootup_misc_mmio(void) {
|
||||||
(void)(MAKE_MC_REG(0x014));
|
(void)(MAKE_MC_REG(0x014));
|
||||||
|
|
||||||
/* Clear RESET Vector, setup CPU Secure Boot RESET Vectors. */
|
/* Clear RESET Vector, setup CPU Secure Boot RESET Vectors. */
|
||||||
uint32_t reset_vec = TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_WARMBOOT_CRT0_AND_MAIN);
|
uint32_t reset_vec;
|
||||||
|
if (exosphere_get_target_firmware() >= EXOSPHERE_TARGET_FIRMWARE_500) {
|
||||||
|
reset_vec = TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGMENT_ID_WARMBOOT_CRT0_AND_MAIN);
|
||||||
|
} else {
|
||||||
|
reset_vec = TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_WARMBOOT_CRT0_AND_MAIN);
|
||||||
|
}
|
||||||
EVP_CPU_RESET_VECTOR_0 = 0;
|
EVP_CPU_RESET_VECTOR_0 = 0;
|
||||||
SB_AA64_RESET_LOW_0 = reset_vec | 1;
|
SB_AA64_RESET_LOW_0 = reset_vec | 1;
|
||||||
SB_AA64_RESET_HIGH_0 = 0;
|
SB_AA64_RESET_HIGH_0 = 0;
|
||||||
|
|
|
@ -18,7 +18,8 @@ extern const uint8_t __start_cold[];
|
||||||
/* warboot_init.c */
|
/* warboot_init.c */
|
||||||
extern unsigned int g_exosphere_target_firmware_for_init;
|
extern unsigned int g_exosphere_target_firmware_for_init;
|
||||||
void init_dma_controllers(unsigned int target_firmware);
|
void init_dma_controllers(unsigned int target_firmware);
|
||||||
void set_memory_registers_enable_mmu(void);
|
void set_memory_registers_enable_mmu_1x_ttbr0(void);
|
||||||
|
void set_memory_registers_enable_mmu_5x_ttbr0(void);
|
||||||
|
|
||||||
static void identity_map_all_mappings(uintptr_t *mmu_l1_tbl, uintptr_t *mmu_l3_tbl) {
|
static void identity_map_all_mappings(uintptr_t *mmu_l1_tbl, uintptr_t *mmu_l3_tbl) {
|
||||||
static const uintptr_t addrs[] = { TUPLE_FOLD_LEFT_0(EVAL(IDENTIY_MAPPING_ID_MAX), _MMAPID, COMMA) };
|
static const uintptr_t addrs[] = { TUPLE_FOLD_LEFT_0(EVAL(IDENTIY_MAPPING_ID_MAX), _MMAPID, COMMA) };
|
||||||
|
@ -65,22 +66,34 @@ static void warmboot_map_all_ram_segments(uintptr_t *mmu_l3_tbl) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tzram_map_all_segments(uintptr_t *mmu_l3_tbl) {
|
static void tzram_map_all_segments(uintptr_t *mmu_l3_tbl, unsigned int target_firmware) {
|
||||||
static const uintptr_t offs[] = { TUPLE_FOLD_LEFT_0(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) };
|
static const uintptr_t offs[] = { TUPLE_FOLD_LEFT_0(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) };
|
||||||
static const size_t sizes[] = { TUPLE_FOLD_LEFT_1(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) };
|
static const size_t sizes[] = { TUPLE_FOLD_LEFT_1(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) };
|
||||||
static const size_t increments[] = { TUPLE_FOLD_LEFT_2(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) };
|
static const size_t increments[] = { TUPLE_FOLD_LEFT_2(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) };
|
||||||
static const bool is_executable[] = { TUPLE_FOLD_LEFT_3(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) };
|
static const bool is_executable[] = { TUPLE_FOLD_LEFT_3(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZS, COMMA) };
|
||||||
|
|
||||||
|
static const uintptr_t offs_5x[] = { TUPLE_FOLD_LEFT_0(EVAL(TZRAM_SEGMENT_ID_MAX), _MMAPTZ5XS, COMMA) };
|
||||||
|
|
||||||
for(size_t i = 0, offset = 0; i < TZRAM_SEGMENT_ID_MAX; i++) {
|
for(size_t i = 0, offset = 0; i < TZRAM_SEGMENT_ID_MAX; i++) {
|
||||||
tzram_map_segment(mmu_l3_tbl, TZRAM_SEGMENT_BASE + offset, 0x7C010000ull + offs[i], sizes[i], is_executable[i]);
|
uintptr_t off = (target_firmware < EXOSPHERE_TARGET_FIRMWARE_500) ? offs[i] : offs_5x[i];
|
||||||
|
tzram_map_segment(mmu_l3_tbl, TZRAM_SEGMENT_BASE + offset, 0x7C010000ull + off, sizes[i], is_executable[i]);
|
||||||
offset += increments[i];
|
offset += increments[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void configure_ttbls(void) {
|
static void configure_ttbls(unsigned int target_firmware) {
|
||||||
uintptr_t *mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64);
|
uintptr_t *mmu_l1_tbl;
|
||||||
uintptr_t *mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE);
|
uintptr_t *mmu_l2_tbl;
|
||||||
uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE);
|
uintptr_t *mmu_l3_tbl;
|
||||||
|
if (target_firmware < EXOSPHERE_TARGET_FIRMWARE_500) {
|
||||||
|
mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64);
|
||||||
|
mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE);
|
||||||
|
mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE);
|
||||||
|
} else {
|
||||||
|
mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64);
|
||||||
|
mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE);
|
||||||
|
mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE);
|
||||||
|
}
|
||||||
|
|
||||||
mmu_init_table(mmu_l1_tbl, 64); /* 33-bit address space */
|
mmu_init_table(mmu_l1_tbl, 64); /* 33-bit address space */
|
||||||
mmu_init_table(mmu_l2_tbl, 4096);
|
mmu_init_table(mmu_l2_tbl, 4096);
|
||||||
|
@ -101,7 +114,7 @@ static void configure_ttbls(void) {
|
||||||
mmio_map_all_devices(mmu_l3_tbl);
|
mmio_map_all_devices(mmu_l3_tbl);
|
||||||
lp0_entry_map_all_ram_segments(mmu_l3_tbl);
|
lp0_entry_map_all_ram_segments(mmu_l3_tbl);
|
||||||
warmboot_map_all_ram_segments(mmu_l3_tbl);
|
warmboot_map_all_ram_segments(mmu_l3_tbl);
|
||||||
tzram_map_all_segments(mmu_l3_tbl);
|
tzram_map_all_segments(mmu_l3_tbl, target_firmware);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void do_relocation(const coldboot_crt0_reloc_list_t *reloc_list, size_t index) {
|
static void do_relocation(const coldboot_crt0_reloc_list_t *reloc_list, size_t index) {
|
||||||
|
@ -117,10 +130,19 @@ static void do_relocation(const coldboot_crt0_reloc_list_t *reloc_list, size_t i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t get_coldboot_crt0_stack_address(void) {
|
uintptr_t get_coldboot_crt0_temp_stack_address(void) {
|
||||||
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800;
|
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uintptr_t get_coldboot_crt0_stack_address(void) {
|
||||||
|
if (exosphere_get_target_firmware_for_init() < EXOSPHERE_TARGET_FIRMWARE_500) {
|
||||||
|
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800;
|
||||||
|
} else {
|
||||||
|
return TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x800;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
void coldboot_init(coldboot_crt0_reloc_list_t *reloc_list, uintptr_t start_cold) {
|
void coldboot_init(coldboot_crt0_reloc_list_t *reloc_list, uintptr_t start_cold) {
|
||||||
//MAILBOX_NX_SECMON_BOOT_TIME = TIMERUS_CNTR_1US_0;
|
//MAILBOX_NX_SECMON_BOOT_TIME = TIMERUS_CNTR_1US_0;
|
||||||
|
|
||||||
|
@ -154,8 +176,12 @@ void coldboot_init(coldboot_crt0_reloc_list_t *reloc_list, uintptr_t start_cold)
|
||||||
/* TZRAM accesses should work normally after this point. */
|
/* TZRAM accesses should work normally after this point. */
|
||||||
init_dma_controllers(g_exosphere_target_firmware_for_init);
|
init_dma_controllers(g_exosphere_target_firmware_for_init);
|
||||||
|
|
||||||
configure_ttbls();
|
configure_ttbls(g_exosphere_target_firmware_for_init);
|
||||||
set_memory_registers_enable_mmu();
|
if (g_exosphere_target_firmware_for_init < EXOSPHERE_TARGET_FIRMWARE_500) {
|
||||||
|
set_memory_registers_enable_mmu_1x_ttbr0();
|
||||||
|
} else {
|
||||||
|
set_memory_registers_enable_mmu_5x_ttbr0();
|
||||||
|
}
|
||||||
|
|
||||||
/* Copy or clear the remaining sections */
|
/* Copy or clear the remaining sections */
|
||||||
for(size_t i = 0; i < reloc_list->nb_relocs_post_mmu_init; i++) {
|
for(size_t i = 0; i < reloc_list->nb_relocs_post_mmu_init; i++) {
|
||||||
|
|
|
@ -53,6 +53,16 @@
|
||||||
#define _MMAPTZS6 ( 0x1000ull, 0x1000ull, 0x02000ull, false ) /* L2 translation table */
|
#define _MMAPTZS6 ( 0x1000ull, 0x1000ull, 0x02000ull, false ) /* L2 translation table */
|
||||||
#define _MMAPTZS7 ( 0x2000ull, 0x1000ull, 0x02000ull, false ) /* L3 translation table */
|
#define _MMAPTZS7 ( 0x2000ull, 0x1000ull, 0x02000ull, false ) /* L3 translation table */
|
||||||
|
|
||||||
|
/* TZRAM segments for 5.0.0+. (offset). */
|
||||||
|
#define _MMAPTZ5XS0 ( 0x3000ull ) /* Warmboot crt0 sections and main code segment */
|
||||||
|
#define _MMAPTZ5XS1 ( 0ull ) /* pk2ldr segment */
|
||||||
|
#define _MMAPTZ5XS2 ( 0ull ) /* SPL .bss buffer, NOT mapped at startup */
|
||||||
|
#define _MMAPTZ5XS3 ( 0ull ) /* Core 0ull1,2 stack */
|
||||||
|
#define _MMAPTZ5XS4 ( 0x1000ull ) /* Core 3 stack */
|
||||||
|
#define _MMAPTZ5XS5 ( 0x2000ull ) /* Secure Monitor exception vectors, some init stacks */
|
||||||
|
#define _MMAPTZ5XS6 ( 0x10000 - 0x2000ull ) /* L2 translation table */
|
||||||
|
#define _MMAPTZ5XS7 ( 0x10000 - 0x1000ull ) /* L3 translation table */
|
||||||
|
|
||||||
#define MMIO_BASE 0x1F0080000ull
|
#define MMIO_BASE 0x1F0080000ull
|
||||||
#define LP0_ENTRY_RAM_SEGMENT_BASE (MMIO_BASE + 0x000100000ull)
|
#define LP0_ENTRY_RAM_SEGMENT_BASE (MMIO_BASE + 0x000100000ull)
|
||||||
#define WARMBOOT_RAM_SEGMENT_BASE (LP0_ENTRY_RAM_SEGMENT_BASE + 0x000047000ull) /* increment seems to be arbitrary ? */
|
#define WARMBOOT_RAM_SEGMENT_BASE (LP0_ENTRY_RAM_SEGMENT_BASE + 0x000047000ull) /* increment seems to be arbitrary ? */
|
||||||
|
@ -129,6 +139,7 @@
|
||||||
#define WARMBOOT_GET_RAM_SEGMENT_ATTRIBS(segment_id) (TUPLE_ELEM_2(CAT(_MMAPWBS, EVAL(segment_id))))
|
#define WARMBOOT_GET_RAM_SEGMENT_ATTRIBS(segment_id) (TUPLE_ELEM_2(CAT(_MMAPWBS, EVAL(segment_id))))
|
||||||
|
|
||||||
#define TZRAM_GET_SEGMENT_PA(segment_id) (0x7C010000ull + (TUPLE_ELEM_0(CAT(_MMAPTZS, EVAL(segment_id)))))
|
#define TZRAM_GET_SEGMENT_PA(segment_id) (0x7C010000ull + (TUPLE_ELEM_0(CAT(_MMAPTZS, EVAL(segment_id)))))
|
||||||
|
#define TZRAM_GET_SEGMENT_5X_PA(segment_id) (0x7C010000ull + (TUPLE_ELEM_0(CAT(_MMAPTZ5XS, EVAL(segment_id)))))
|
||||||
#define TZRAM_GET_SEGMENT_ADDRESS(segment_id) (TUPLE_FOLD_LEFT_2(EVAL(segment_id), _MMAPTZS, PLUS) EVAL(TZRAM_SEGMENT_BASE))
|
#define TZRAM_GET_SEGMENT_ADDRESS(segment_id) (TUPLE_FOLD_LEFT_2(EVAL(segment_id), _MMAPTZS, PLUS) EVAL(TZRAM_SEGMENT_BASE))
|
||||||
#define TZRAM_GET_SEGMENT_SIZE(segment_id) (TUPLE_ELEM_1(CAT(_MMAPTZS, EVAL(segment_id))))
|
#define TZRAM_GET_SEGMENT_SIZE(segment_id) (TUPLE_ELEM_1(CAT(_MMAPTZS, EVAL(segment_id))))
|
||||||
#define TZRAM_IS_SEGMENT_EXECUTABLE(segment_id) (TUPLE_ELEM_3(CAT(_MMAPTZS, EVAL(segment_id))))
|
#define TZRAM_IS_SEGMENT_EXECUTABLE(segment_id) (TUPLE_ELEM_3(CAT(_MMAPTZS, EVAL(segment_id))))
|
||||||
|
|
|
@ -101,8 +101,11 @@ __start_cold:
|
||||||
br x16
|
br x16
|
||||||
|
|
||||||
_post_cold_crt0_reloc:
|
_post_cold_crt0_reloc:
|
||||||
|
/* Setup stack for coldboot crt0. */
|
||||||
msr spsel, #0
|
msr spsel, #0
|
||||||
|
bl get_coldboot_crt0_temp_stack_address
|
||||||
|
mov sp, x0
|
||||||
|
mov fp, #0
|
||||||
bl get_coldboot_crt0_stack_address
|
bl get_coldboot_crt0_stack_address
|
||||||
mov sp, x0
|
mov sp, x0
|
||||||
mov fp, #0
|
mov fp, #0
|
||||||
|
@ -128,6 +131,7 @@ _post_cold_crt0_reloc:
|
||||||
ldr x1, =0x80010000
|
ldr x1, =0x80010000
|
||||||
/* Set size in coldboot relocation list. */
|
/* Set size in coldboot relocation list. */
|
||||||
str x21, [x0, #0x8]
|
str x21, [x0, #0x8]
|
||||||
|
|
||||||
bl coldboot_init
|
bl coldboot_init
|
||||||
|
|
||||||
ldr x16, =__jump_to_main_cold
|
ldr x16, =__jump_to_main_cold
|
||||||
|
|
|
@ -8,6 +8,9 @@
|
||||||
#undef MC_BASE
|
#undef MC_BASE
|
||||||
#define MC_BASE (MMIO_GET_DEVICE_PA(MMIO_DEVID_MC))
|
#define MC_BASE (MMIO_GET_DEVICE_PA(MMIO_DEVID_MC))
|
||||||
|
|
||||||
|
#define WARMBOOT_GET_TZRAM_SEGMENT_PA(x) ((g_exosphere_target_firmware_for_init < EXOSPHERE_TARGET_FIRMWARE_500) \
|
||||||
|
? TZRAM_GET_SEGMENT_PA(x) : TZRAM_GET_SEGMENT_5X_PA(x))
|
||||||
|
|
||||||
/* start.s */
|
/* start.s */
|
||||||
void __set_memory_registers(uintptr_t ttbr0, uintptr_t vbar, uint64_t cpuectlr, uint32_t scr,
|
void __set_memory_registers(uintptr_t ttbr0, uintptr_t vbar, uint64_t cpuectlr, uint32_t scr,
|
||||||
uint32_t tcr, uint32_t cptr, uint64_t mair, uint32_t sctlr);
|
uint32_t tcr, uint32_t cptr, uint64_t mair, uint32_t sctlr);
|
||||||
|
@ -15,16 +18,16 @@ void __set_memory_registers(uintptr_t ttbr0, uintptr_t vbar, uint64_t cpuectlr,
|
||||||
unsigned int g_exosphere_target_firmware_for_init = 0;
|
unsigned int g_exosphere_target_firmware_for_init = 0;
|
||||||
|
|
||||||
uintptr_t get_warmboot_crt0_stack_address(void) {
|
uintptr_t get_warmboot_crt0_stack_address(void) {
|
||||||
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE012_STACK) + 0x800;
|
return WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE012_STACK) + 0x800;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t get_warmboot_crt0_stack_address_critsec_enter(void) {
|
uintptr_t get_warmboot_crt0_stack_address_critsec_enter(void) {
|
||||||
unsigned int core_id = get_core_id();
|
unsigned int core_id = get_core_id();
|
||||||
|
|
||||||
if (core_id) {
|
if (core_id) {
|
||||||
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x1000;
|
return WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x1000;
|
||||||
} else {
|
} else {
|
||||||
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x80 * (core_id + 1);
|
return WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x80 * (core_id + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,9 +92,8 @@ void init_dma_controllers(unsigned int target_firmware) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_memory_registers_enable_mmu(void) {
|
void _set_memory_registers_enable_mmu(const uintptr_t ttbr0) {
|
||||||
static const uintptr_t vbar = TZRAM_GET_SEGMENT_ADDRESS(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800;
|
static const uintptr_t vbar = TZRAM_GET_SEGMENT_ADDRESS(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800;
|
||||||
static const uintptr_t ttbr0 = TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
- Disable table walk descriptor access prefetch.
|
- Disable table walk descriptor access prefetch.
|
||||||
|
@ -142,12 +144,22 @@ void set_memory_registers_enable_mmu(void) {
|
||||||
__set_memory_registers(ttbr0, vbar, cpuectlr, scr, tcr, cptr, mair, sctlr);
|
__set_memory_registers(ttbr0, vbar, cpuectlr, scr, tcr, cptr, mair, sctlr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void set_memory_registers_enable_mmu_1x_ttbr0(void) {
|
||||||
|
static const uintptr_t ttbr0 = TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64;
|
||||||
|
_set_memory_registers_enable_mmu(ttbr0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_memory_registers_enable_mmu_5x_ttbr0(void) {
|
||||||
|
static const uintptr_t ttbr0 = TZRAM_GET_SEGMENT_5X_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64;
|
||||||
|
_set_memory_registers_enable_mmu(ttbr0);
|
||||||
|
}
|
||||||
|
|
||||||
#if 0 /* Since we decided not to identity-unmap TZRAM */
|
#if 0 /* Since we decided not to identity-unmap TZRAM */
|
||||||
static void identity_remap_tzram(void) {
|
static void identity_remap_tzram(void) {
|
||||||
/* See also: configure_ttbls (in coldboot_init.c). */
|
/* See also: configure_ttbls (in coldboot_init.c). */
|
||||||
uintptr_t *mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64);
|
uintptr_t *mmu_l1_tbl = (uintptr_t *)(WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64);
|
||||||
uintptr_t *mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE);
|
uintptr_t *mmu_l2_tbl = (uintptr_t *)WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE);
|
||||||
uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE);
|
uintptr_t *mmu_l3_tbl = (uintptr_t *)WARMBOOT_GET_TZRAM_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE);
|
||||||
|
|
||||||
mmu_map_table(1, mmu_l1_tbl, 0x40000000, mmu_l2_tbl, 0);
|
mmu_map_table(1, mmu_l1_tbl, 0x40000000, mmu_l2_tbl, 0);
|
||||||
mmu_map_table(2, mmu_l2_tbl, 0x7C000000, mmu_l3_tbl, 0);
|
mmu_map_table(2, mmu_l2_tbl, 0x7C000000, mmu_l3_tbl, 0);
|
||||||
|
@ -176,5 +188,9 @@ void warmboot_init(void) {
|
||||||
|
|
||||||
/*identity_remap_tzram();*/
|
/*identity_remap_tzram();*/
|
||||||
/* Nintendo pointlessly fully invalidate the TLB & invalidate the data cache on the modified ranges here */
|
/* Nintendo pointlessly fully invalidate the TLB & invalidate the data cache on the modified ranges here */
|
||||||
set_memory_registers_enable_mmu();
|
if (g_exosphere_target_firmware_for_init < EXOSPHERE_TARGET_FIRMWARE_500) {
|
||||||
|
set_memory_registers_enable_mmu_1x_ttbr0();
|
||||||
|
} else {
|
||||||
|
set_memory_registers_enable_mmu_5x_ttbr0();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue