2018-02-25 02:34:15 +00:00
|
|
|
#include "utils.h"
|
|
|
|
#include "memory_map.h"
|
2018-03-03 15:58:23 +00:00
|
|
|
#include "mc.h"
|
2018-02-28 18:06:41 +00:00
|
|
|
#include "arm.h"
|
2018-03-03 02:43:46 +00:00
|
|
|
#include "synchronization.h"
|
|
|
|
|
2018-03-03 15:58:23 +00:00
|
|
|
#undef MC_BASE
|
|
|
|
#define MC_BASE (MMIO_GET_DEVICE_PA(MMIO_DEVID_MC))
|
|
|
|
|
2018-02-28 12:32:18 +00:00
|
|
|
/* start.s */
|
2018-02-28 06:32:14 +00:00
|
|
|
void __set_memory_registers(uintptr_t ttbr0, uintptr_t vbar, uint64_t cpuectlr, uint32_t scr,
|
|
|
|
uint32_t tcr, uint32_t cptr, uint64_t mair, uint32_t sctlr);
|
|
|
|
|
2018-02-25 19:00:50 +00:00
|
|
|
uintptr_t get_warmboot_crt0_stack_address(void) {
|
2018-02-27 15:10:56 +00:00
|
|
|
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE012_STACK) + 0x800;
|
2018-02-25 02:34:15 +00:00
|
|
|
}
|
2018-02-27 01:41:31 +00:00
|
|
|
|
2018-03-03 02:43:46 +00:00
|
|
|
uintptr_t get_warmboot_crt0_stack_address_critsec_enter(void) {
|
|
|
|
unsigned int core_id = get_core_id();
|
|
|
|
|
|
|
|
if (core_id) {
|
|
|
|
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_CORE3_STACK) + 0x1000;
|
2018-03-03 18:31:22 +00:00
|
|
|
} else {
|
2018-03-03 02:43:46 +00:00
|
|
|
return TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x80 * (core_id + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void warmboot_crt0_critical_section_enter(volatile critical_section_t *critical_section) {
|
|
|
|
critical_section_enter(critical_section);
|
|
|
|
}
|
|
|
|
|
2018-03-03 15:58:23 +00:00
|
|
|
void init_dma_controllers(void) {
|
|
|
|
/* TODO: 4.x does slightly different init. How should we handle this? We can't detect master key revision yet. */
|
|
|
|
|
|
|
|
/* SYSCTR0_CNTCR_0 = ENABLE | HALT_ON_DEBUG (write-once init) */
|
|
|
|
(*((volatile uint32_t *)(0x700F0000))) = 3;
|
|
|
|
|
|
|
|
/* Set some unknown registers in HOST1X. */
|
|
|
|
(*((volatile uint32_t *)(0x500038F8))) &= 0xFFFFFFFE;
|
|
|
|
(*((volatile uint32_t *)(0x50003300))) = 0;
|
|
|
|
|
|
|
|
/* AHB_MASTER_SWID_0 */
|
|
|
|
(*((volatile uint32_t *)(0x6000C018))) = 0;
|
|
|
|
|
|
|
|
/* AHB_MASTER_SWID_1 - Makes USB1/USB2 use SWID[1] */
|
|
|
|
(*((volatile uint32_t *)(0x6000C038))) = 0x40040;
|
|
|
|
|
|
|
|
/* APBDMA_CHANNEL_SWID_0 = ~0 (SWID = 1 for all APB-DMA channels) */
|
|
|
|
(*((volatile uint32_t *)(0x6002003C))) = 0xFFFFFFFF;
|
|
|
|
|
|
|
|
/* APBDMA_CHANNEL_SWID1_0 = 0 (See above) */
|
|
|
|
(*((volatile uint32_t *)(0x60020054))) = 0;
|
|
|
|
|
|
|
|
/* APBDMA_SECURITY_REG_0 = 0 (All APB-DMA channels non-secure) */
|
|
|
|
(*((volatile uint32_t *)(0x60020038))) = 0;
|
|
|
|
|
|
|
|
/* MSELECT_CONFIG_0 |= WRAP_TO_INCR_SLAVE0(APC) | WRAP_TO_INCR_SLAVE1(PCIe) | WRAP_TO_INCR_SLAVE2(GPU) */
|
|
|
|
(*((volatile uint32_t *)(0x50060000))) |= 0x38000000;
|
|
|
|
|
|
|
|
/* AHB_ARBITRATION_PRIORITY_CTRL_0 - Select high prio group with prio 7 */
|
|
|
|
(*((volatile uint32_t *)(0x6000C008))) = 0xE0000001;
|
|
|
|
|
|
|
|
/* AHB_GIZMO_TZRAM_0 |= DONT_SPLIT_AHB_WR */
|
|
|
|
(*((volatile uint32_t *)(0x6000C054))) = 0x80;
|
|
|
|
}
|
|
|
|
|
2018-02-28 12:32:18 +00:00
|
|
|
void set_memory_registers_enable_mmu(void) {
|
2018-03-03 14:15:46 +00:00
|
|
|
static const uintptr_t vbar = TZRAM_GET_SEGMENT_ADDRESS(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800;
|
|
|
|
static const uintptr_t ttbr0 = TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64;
|
2018-02-28 12:32:18 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
- Disable table walk descriptor access prefetch.
|
|
|
|
- L2 instruction fetch prefetch distance = 3 (reset value)
|
|
|
|
- L2 load/store data prefetch distance = 8 (reset value)
|
|
|
|
- Enable the processor to receive instruction cache and TLB maintenance operations broadcast from other processors in the cluster
|
|
|
|
*/
|
|
|
|
static const uint64_t cpuectlr = 0x1B00000040ull;
|
|
|
|
|
|
|
|
/*
|
|
|
|
- The next lower level is Aarch64
|
|
|
|
- Secure instruction fetch (when the PE is in Secure state, this bit disables instruction fetch from Non-secure memory)
|
|
|
|
- External Abort/SError taken to EL3
|
|
|
|
- FIQ taken to EL3
|
|
|
|
- NS (EL0 and EL1 are nonsecure)
|
|
|
|
*/
|
|
|
|
static const uint32_t scr = 0x63D;
|
|
|
|
|
|
|
|
/*
|
|
|
|
- PA size: 36-bit (64 GB)
|
|
|
|
- Granule size: 4KB
|
|
|
|
- Shareability attribute for memory associated with translation table walks using TTBR0_EL3: Inner Shareable
|
|
|
|
- Outer cacheability attribute for memory associated with translation table walks using TTBR0_EL3: Normal memory, Outer Write-Back Read-Allocate Write-Allocate Cacheable
|
|
|
|
- Inner cacheability attribute for memory associated with translation table walks using TTBR0_EL3: Normal memory, Inner Write-Back Read-Allocate Write-Allocate Cacheable
|
|
|
|
- T0SZ = 31 (33-bit address space)
|
|
|
|
*/
|
|
|
|
static const uint32_t tcr = TCR_EL3_RSVD | TCR_PS(1) | TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA | TCR_T0SZ(33);
|
|
|
|
|
|
|
|
/* Nothing trapped */
|
|
|
|
static const uint32_t cptr = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
- Attribute 0: Normal memory, Inner and Outer Write-Back Read-Allocate Write-Allocate Non-transient
|
|
|
|
- Attribute 1: Device-nGnRE memory
|
|
|
|
- Other attributes: Device-nGnRnE memory
|
|
|
|
*/
|
|
|
|
static const uint64_t mair = 0x4FFull;
|
|
|
|
|
|
|
|
/*
|
|
|
|
- Cacheability control, for EL3 instruction accesses DISABLED
|
|
|
|
(- SP Alignment check bit NOT SET)
|
|
|
|
- Cacheability control, for EL3 data accesses DISABLED (normal memory accesses from EL3 are cacheable)
|
|
|
|
(- Alignement check bit NOT SET)
|
|
|
|
- MMU enabled for EL3 stage 1 address translation
|
|
|
|
*/
|
|
|
|
static const uint32_t sctlr = 0x30C51835ull;
|
|
|
|
|
|
|
|
__set_memory_registers(ttbr0, vbar, cpuectlr, scr, tcr, cptr, mair, sctlr);
|
|
|
|
}
|
|
|
|
|
2018-03-03 15:58:23 +00:00
|
|
|
static void identity_remap_tzram(void) {
|
|
|
|
/* See also: configure_ttbls (in coldboot_init.c). */
|
|
|
|
uintptr_t *mmu_l1_tbl = (uintptr_t *)(TZRAM_GET_SEGMENT_PA(TZRAM_SEGEMENT_ID_SECMON_EVT) + 0x800 - 64);
|
|
|
|
uintptr_t *mmu_l2_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L2_TRANSLATION_TABLE);
|
|
|
|
uintptr_t *mmu_l3_tbl = (uintptr_t *)TZRAM_GET_SEGMENT_PA(TZRAM_SEGMENT_ID_L3_TRANSLATION_TABLE);
|
|
|
|
|
|
|
|
mmu_map_table(1, mmu_l1_tbl, 0x40000000, mmu_l2_tbl, 0);
|
|
|
|
mmu_map_table(2, mmu_l2_tbl, 0x7C000000, mmu_l3_tbl, 0);
|
|
|
|
|
|
|
|
identity_map_mapping(mmu_l1_tbl, mmu_l3_tbl, IDENTITY_GET_MAPPING_ADDRESS(IDENTITY_MAPPING_TZRAM),
|
|
|
|
IDENTITY_GET_MAPPING_SIZE(IDENTITY_MAPPING_TZRAM), IDENTITY_GET_MAPPING_ATTRIBS(IDENTITY_MAPPING_TZRAM),
|
|
|
|
IDENTITY_IS_MAPPING_BLOCK_RANGE(IDENTITY_MAPPING_TZRAM));
|
|
|
|
}
|
|
|
|
|
2018-03-02 04:10:05 +00:00
|
|
|
void warmboot_init(boot_func_list_t *func_list) {
|
2018-03-03 15:58:23 +00:00
|
|
|
/*
|
|
|
|
From https://events.static.linuxfound.org/sites/events/files/slides/slides_17.pdf :
|
|
|
|
Caches may write back dirty lines at any time:
|
|
|
|
- To make space for new allocations
|
|
|
|
- Even if MMU is off
|
|
|
|
- Even if Cacheable accesses are disabled (caches are never 'off')
|
|
|
|
*/
|
|
|
|
func_list->funcs.flush_dcache_all();
|
|
|
|
func_list->funcs.invalidate_icache_all();
|
|
|
|
|
2018-03-03 18:31:22 +00:00
|
|
|
/* On warmboot (not cpu_on) only */
|
2018-03-03 18:43:44 +00:00
|
|
|
if (MC_SECURITY_CFG3_0 == 0) {
|
2018-03-03 15:58:23 +00:00
|
|
|
init_dma_controllers();
|
|
|
|
}
|
|
|
|
|
|
|
|
identity_remap_tzram();
|
|
|
|
/* Nintendo pointlessly fully invalidate the TLB & invalidate the data cache on the modified ranges here */
|
|
|
|
set_memory_registers_enable_mmu();
|
2018-02-27 15:10:56 +00:00
|
|
|
}
|