1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-12-18 00:12:03 +00:00

Implement exception vectors.

This commit is contained in:
Michael Scire 2018-02-24 06:20:45 -08:00
parent 422cd14aac
commit e8b1e0b965
17 changed files with 374 additions and 73 deletions

View file

@ -16,7 +16,7 @@ uint32_t configitem_set(enum ConfigItem item, uint64_t value) {
bool configitem_is_recovery_boot(void) {
uint64_t is_recovery_boot;
if (configitem_get(CONFIGITEM_ISRECOVERYBOOT, &is_recovery_boot) != 0) {
panic();
generic_panic();
}
return is_recovery_boot != 0;

265
exosphere/exceptions.s Normal file
View file

@ -0,0 +1,265 @@
/* Some macros taken from https://github.com/ARM-software/arm-trusted-firmware/blob/master/include/common/aarch64/asm_macros.S */
/*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* Declare the exception vector table, enforcing it is aligned on a
* 2KB boundary, as required by the ARMv8 architecture.
* Use zero bytes as the fill value to be stored in the padding bytes
* so that it inserts illegal AArch64 instructions. This increases
* security, robustness and potentially facilitates debugging.
*/
.macro vector_base label, section_name=.vectors
.section \section_name, "ax"
.align 11, 0
\label:
.endm
/*
* Create an entry in the exception vector table, enforcing it is
* aligned on a 128-byte boundary, as required by the ARMv8 architecture.
* Use zero bytes as the fill value to be stored in the padding bytes
* so that it inserts illegal AArch64 instructions. This increases
* security, robustness and potentially facilitates debugging.
*/
.macro vector_entry label, section_name=.vectors
.cfi_sections .debug_frame
.section \section_name, "ax"
.align 7, 0
.type \label, %function
.func \label
.cfi_startproc
\label:
.endm
/*
* This macro verifies that the given vector doesnt exceed the
* architectural limit of 32 instructions. This is meant to be placed
* immediately after the last instruction in the vector. It takes the
* vector entry as the parameter
*/
.macro check_vector_size since
.endfunc
.cfi_endproc
.if (. - \since) > (32 * 4)
.error "Vector exceeds 32 instructions"
.endif
.endm
/* Actual Vectors for Exosphere. */
.global exosphere_vectors
vector_base exosphere_vectors
/* Current EL, SP0 */
.global unknown_exception
unknown_exception:
vector_entry synch_sp0
/* Panic with color FF7700, code 10. */
mov x0, #0x10
movk x0, #0x07F0,lsl#16
b panic
check_vector_size synch_sp0
vector_entry irq_sp0
b unknown_exception
check_vector irq_sp0
vector_entry fiq_sp0
b unknown_exception
check_vector fiq_sp0
vector_entry serror_sp0
b unknown_exception
check_vector serror_sp0
/* Current EL, SPx */
vector_entry synch_spx
b unknown_exception
check_vector synch_spx
vector_entry irq_spx
b unknown_exception
check_vector irq_spx
vector_entry fiq_spx
b unknown_exception
check_vector fiq_spx
vector_entry serror_spx
b unknown_exception
check_vector serror_spx
/* Lower EL, A64 */
vector_entry synch_a64
stp x29, x30, [sp, #-0x10]!
/* Verify SMC. */
mrs x30, esr_el3
lsr w29, w30, #0x1A
cmp w29, #0x17
ldp x29, x30, [sp],#0x10
b.ne unknown_exception
/* Call appropriate handler. */
stp x29, x30, [sp, #-0x10]!
mrs x29, mpidr_el1
and x29, x39, #0x3
cmp x29, #0x3
b.ne handle_core012_smc_exception
bl handle_core3_smc_exception
ldp x29, x30, [sp],#0x10
eret
check_vector synch_a64
vector_entry irq_a64
b unknown_exception
check_vector irq_a64
vector_entry fiq_a64
stp x29, x30, [sp, #-0x10]!
mrs x29, mpidr_el1
and x29, x39, #0x3
cmp x29, #0x3
b.ne unknown_exception
stp x28, x29, [sp, #-0x10]!
stp x26, x27, [sp, #-0x10]!
bl handle_fiq_exception
ldp x26, x27, [sp],#0x10
ldp x28, x29, [sp],#0x10
ldp x29, x30, [sp],#0x10
eret
check_vector fiq_a64
vector_entry serror_a64
b unknown_exception
.endfunc
.cfi_endproc
/* To save space, insert in an unused vector segment. */
.global handle_core012_smc_exception
.type handle_core012_smc_exception, %function
handle_core012_smc_exception:
stp x6, x7, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x0, x1, [sp, #-0x10]!
bl set_priv_smc_in_progress
bl get_smc_core012_stack_address
mov x29, x0
ldp x0, x1, [sp],#0x10
ldp x2, x3, [sp],#0x10
ldp x4, x5, [sp],#0x10
ldp x6, x7, [sp],#0x10
mov x30, sp
mov sp, x29
stp x29, x30, [sp, #-0x10]!
bl handle_core3_smc_exception
ldp x29, x30, [sp],#0x10
mov sp, x30
stp x6, x7, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x0, x1, [sp, #-0x10]!
bl clear_priv_smc_in_progress
ldp x0, x1, [sp],#0x10
ldp x2, x3, [sp],#0x10
ldp x4, x5, [sp],#0x10
ldp x6, x7, [sp],#0x10
ldp x29, x30, [sp],#0x10
eret
.if (. - serror_a64) > (32 * 4)
.error "Vector exceeds 32 instructions"
.endif
/* Lower EL, A32 */
vector_entry synch_a32
b unknown_exception
check_vector synch_a32
vector_entry irq_a32
b unknown_exception
check_vector irq_a32
vector_entry fiq_a32
b fiq_a64
.endfunc
.cfi_endproc
/* To save space, insert in an unused vector segment. */
.global handle_fiq_exception
.type handle_fiq_exception, %function
handle_fiq_exception:
stp x29, x30, [sp, #-0x10]!
stp x24, x25, [sp, #-0x10]!
stp x22, x23, [sp, #-0x10]!
stp x20, x21, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x0, x1, [sp, #-0x10]!
bl handle_registered_interrupt
ldp x0, x1, [sp],#0x10
ldp x2, x3, [sp],#0x10
ldp x4, x5, [sp],#0x10
ldp x6, x7, [sp],#0x10
ldp x8, x9, [sp],#0x10
ldp x10, x11, [sp],#0x10
ldp x12, x13, [sp],#0x10
ldp x14, x15, [sp],#0x10
ldp x16, x17, [sp],#0x10
ldp x18, x19, [sp],#0x10
ldp x20, x21, [sp],#0x10
ldp x22, x23, [sp],#0x10
ldp x24, x25, [sp],#0x10
ldp x29, x30, [sp],#0x10
ret
.if (. - fiq_a32) > (32 * 4)
.error "Vector exceeds 32 instructions"
.endif
vector_entry serror_a32
b unknown_exception
.endfunc
.cfi_endproc
/* To save space, insert in an unused vector segment. */
.global handle_core3_smc_exception
.type handle_core3_smc_exception, %function
handle_core3_smc_exception:
stp x29, x30, [sp, #-0x10]!
stp x18, x19, [sp, #-0x10]!
stp x16, x17, [sp, #-0x10]!
stp x14, x15, [sp, #-0x10]!
stp x12, x13, [sp, #-0x10]!
stp x10, x11, [sp, #-0x10]!
stp x8, x9, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
stp x4, x5, [sp, #-0x10]!
stp x2, x3, [sp, #-0x10]!
stp x0, x1, [sp, #-0x10]!
mrs x0, esr_el3
and x0, x0, #0xFFFF
mov x1, sp
bl call_smc_handler
ldp x0, x1, [sp],#0x10
ldp x2, x3, [sp],#0x10
ldp x4, x5, [sp],#0x10
ldp x6, x7, [sp],#0x10
ldp x8, x9, [sp],#0x10
ldp x10, x11, [sp],#0x10
ldp x12, x13, [sp],#0x10
ldp x14, x15, [sp],#0x10
ldp x16, x17, [sp],#0x10
ldp x18, x19, [sp],#0x10
ret
.if (. - serror_a32) > (32 * 4)
.error "Vector exceeds 32 instructions"
.endif

View file

@ -117,11 +117,11 @@ size_t gcm_decrypt_key(void *dst, size_t dst_size, const void *src, size_t src_s
if (is_personalized == 0) {
/* Devkit keys use a different keyformat without a MAC/Device ID. */
if (src_size <= 0x10 || src_size - 0x10 > dst_size) {
panic();
generic_panic();
}
} else {
if (src_size <= 0x30 || src_size - 0x20 > dst_size) {
panic();
generic_panic();
}
}

View file

@ -94,7 +94,7 @@ volatile i2c_registers_t *i2c_get_registers_from_id(unsigned int id) {
case 5:
return I2C6_REGS;
default:
panic();
generic_panic();
}
return NULL;
}

View file

@ -74,7 +74,7 @@ void handle_registered_interrupt(void) {
}
/* We must have found a handler, or something went wrong. */
if (!found_handler) {
panic();
generic_panic();
}
}
}
@ -92,7 +92,7 @@ void intr_register_handler(unsigned int id, void (*handler)(void)) {
}
/* Failure to register is an error condition. */
if (!registered_handler) {
panic();
generic_panic();
}
}

25
exosphere/lock.h Normal file
View file

@ -0,0 +1,25 @@
#ifndef EXOSPHERE_LOCK_H
#define EXOSPHERE_LOCK_H
#include <stdbool.h>
/* Simple atomics driver for Exosphere. */
/* Acquire a lock. */
static inline void lock_acquire(bool *l) {
while (__atomic_test_and_set(l, __ATOMIC_ACQUIRE)) {
/* Wait to acquire lock. */
}
}
/* Release a lock. */
static inline void lock_release(bool *l) {
__atomic_clear(l, __ATOMIC_RELEASE);
}
/* Try to acquire a lock. */
static inline bool lock_try_acquire(bool *l) {
return __atomic_test_and_set(l, __ATOMIC_ACQUIRE);
}
#endif

View file

@ -45,7 +45,7 @@ bool check_mkey_revision(unsigned int revision) {
void mkey_detect_revision(void) {
if (g_determined_mkey_revision) {
panic();
generic_panic();
}
for (unsigned int rev = 0; rev < MASTERKEY_REVISION_MAX; rev++) {
@ -60,13 +60,13 @@ void mkey_detect_revision(void) {
/* TODO: When panic is implemented, make this a really distinctive color. */
/* Maybe bright red? */
if (!g_determined_mkey_revision) {
panic();
generic_panic();
}
}
unsigned int mkey_get_revision(void) {
if (!g_determined_mkey_revision) {
panic();
generic_panic();
}
return g_mkey_revision;
@ -74,11 +74,11 @@ unsigned int mkey_get_revision(void) {
unsigned int mkey_get_keyslot(unsigned int revision) {
if (!g_determined_mkey_revision || revision >= MASTERKEY_REVISION_MAX) {
panic();
generic_panic();
}
if (revision > g_mkey_revision) {
panic();
generic_panic();
}
if (revision == g_mkey_revision) {

View file

@ -7,7 +7,7 @@ volatile security_carveout_t *get_carveout_by_id(unsigned int carveout) {
if (CARVEOUT_ID_MIN <= carveout && carveout <= CARVEOUT_ID_MAX) {
return (volatile security_carveout_t *)(MC_BASE + 0xC08ULL + 0x50 * (carveout - CARVEOUT_ID_MIN));
}
panic();
generic_panic();
return NULL;
switch (carveout) {
case 4: /* Kernel carveout */
@ -15,7 +15,7 @@ volatile security_carveout_t *get_carveout_by_id(unsigned int carveout) {
case 5: /* Unused Kernel carveout */
return (volatile security_carveout_t *)(MC_BASE + 0xD48ULL);
default:
panic();
generic_panic();
return NULL;
}
}
@ -83,7 +83,7 @@ void configure_default_carveouts(void) {
void configure_kernel_carveout(unsigned int carveout_id, uint64_t address, uint64_t size) {
if (carveout_id != 4 && carveout_id != 5) {
panic();
generic_panic();
}
volatile security_carveout_t *carveout = get_carveout_by_id(carveout_id);

View file

@ -209,7 +209,7 @@ void verify_header_signature(package2_header_t *header) {
/* This is normally only allowed on dev units, but we'll allow it anywhere. */
if (bootconfig_is_package2_unsigned() == 0 && rsa2048_pss_verify(header->signature, 0x100, modulus, 0x100, header->encrypted_header, 0x100) == 0) {
panic();
generic_panic();
}
}
@ -314,7 +314,7 @@ uint32_t decrypt_and_validate_header(package2_header_t *header) {
}
/* Ensure we successfully decrypted the header. */
panic();
generic_panic();
}
return 0;
}
@ -358,7 +358,7 @@ void load_package2_sections(package2_meta_t *metadata, uint32_t master_key_rev)
potential_base_end += PACKAGE2_SIZE_MAX;
}
if (!found_safe_carveout) {
panic();
generic_panic();
}
/* Relocate to new carveout. */
memcpy((void *)potential_base_start, load_buf, PACKAGE2_SIZE_MAX);

View file

@ -42,7 +42,7 @@ security_engine_t *get_security_engine_address(void) {
void set_security_engine_callback(unsigned int (*callback)(void)) {
if (callback == NULL || g_se_callback != NULL) {
panic();
generic_panic();
}
g_se_callback = callback;
@ -60,7 +60,7 @@ void se_operation_completed(void) {
void se_check_for_error(void) {
if (SECURITY_ENGINE->INT_STATUS_REG & 0x10000 || SECURITY_ENGINE->FLAGS_REG & 3 || SECURITY_ENGINE->ERR_STATUS_REG) {
panic();
generic_panic();
}
}
@ -70,7 +70,7 @@ void se_trigger_intrrupt(void) {
void se_verify_flags_cleared(void) {
if (SECURITY_ENGINE->FLAGS_REG & 3) {
panic();
generic_panic();
}
}
@ -81,7 +81,7 @@ void se_clear_interrupts(void) {
/* Set the flags for an AES keyslot. */
void set_aes_keyslot_flags(unsigned int keyslot, unsigned int flags) {
if (keyslot >= KEYSLOT_AES_MAX) {
panic();
generic_panic();
}
/* Misc flags. */
@ -98,7 +98,7 @@ void set_aes_keyslot_flags(unsigned int keyslot, unsigned int flags) {
/* Set the flags for an RSA keyslot. */
void set_rsa_keyslot_flags(unsigned int keyslot, unsigned int flags) {
if (keyslot >= KEYSLOT_RSA_MAX) {
panic();
generic_panic();
}
/* Misc flags. */
@ -115,7 +115,7 @@ void set_rsa_keyslot_flags(unsigned int keyslot, unsigned int flags) {
void clear_aes_keyslot(unsigned int keyslot) {
if (keyslot >= KEYSLOT_AES_MAX) {
panic();
generic_panic();
}
/* Zero out the whole keyslot and IV. */
@ -127,7 +127,7 @@ void clear_aes_keyslot(unsigned int keyslot) {
void clear_rsa_keyslot(unsigned int keyslot) {
if (keyslot >= KEYSLOT_RSA_MAX) {
panic();
generic_panic();
}
/* Zero out the whole keyslot. */
@ -145,7 +145,7 @@ void clear_rsa_keyslot(unsigned int keyslot) {
void set_aes_keyslot(unsigned int keyslot, const void *key, size_t key_size) {
if (keyslot >= KEYSLOT_AES_MAX || key_size > KEYSIZE_AES_MAX) {
panic();
generic_panic();
}
for (size_t i = 0; i < (key_size >> 2); i++) {
@ -156,7 +156,7 @@ void set_aes_keyslot(unsigned int keyslot, const void *key, size_t key_size) {
void set_rsa_keyslot(unsigned int keyslot, const void *modulus, size_t modulus_size, const void *exponent, size_t exp_size) {
if (keyslot >= KEYSLOT_RSA_MAX || modulus_size > KEYSIZE_RSA_MAX || exp_size > KEYSIZE_RSA_MAX) {
panic();
generic_panic();
}
for (size_t i = 0; i < (modulus_size >> 2); i++) {
@ -175,7 +175,7 @@ void set_rsa_keyslot(unsigned int keyslot, const void *modulus, size_t modulus_
void set_aes_keyslot_iv(unsigned int keyslot, const void *iv, size_t iv_size) {
if (keyslot >= KEYSLOT_AES_MAX || iv_size > 0x10) {
panic();
generic_panic();
}
for (size_t i = 0; i < (iv_size >> 2); i++) {
@ -186,7 +186,7 @@ void set_aes_keyslot_iv(unsigned int keyslot, const void *iv, size_t iv_size) {
void clear_aes_keyslot_iv(unsigned int keyslot) {
if (keyslot >= KEYSLOT_AES_MAX) {
panic();
generic_panic();
}
for (size_t i = 0; i < (0x10 >> 2); i++) {
@ -203,7 +203,7 @@ void set_se_ctr(const void *ctr) {
void decrypt_data_into_keyslot(unsigned int keyslot_dst, unsigned int keyslot_src, const void *wrapped_key, size_t wrapped_key_size) {
if (keyslot_dst >= KEYSLOT_AES_MAX || keyslot_src >= KEYSIZE_AES_MAX || wrapped_key_size > KEYSIZE_AES_MAX) {
panic();
generic_panic();
}
SECURITY_ENGINE->CONFIG_REG = (ALG_AES_DEC | DST_KEYTAB);
@ -217,7 +217,7 @@ void decrypt_data_into_keyslot(unsigned int keyslot_dst, unsigned int keyslot_sr
void se_aes_crypt_insecure_internal(unsigned int keyslot, uint32_t out_ll_paddr, uint32_t in_ll_paddr, size_t size, unsigned int crypt_config, bool encrypt, unsigned int (*callback)(void)) {
if (keyslot >= KEYSLOT_AES_MAX) {
panic();
generic_panic();
}
if (size == 0) {
@ -281,7 +281,7 @@ void se_exp_mod(unsigned int keyslot, void *buf, size_t size, unsigned int (*cal
uint8_t stack_buf[KEYSIZE_RSA_MAX];
if (keyslot >= KEYSLOT_RSA_MAX || size > KEYSIZE_RSA_MAX) {
panic();
generic_panic();
}
/* Endian swap the input. */
@ -310,7 +310,7 @@ void se_synchronous_exp_mod(unsigned int keyslot, void *dst, size_t dst_size, co
uint8_t stack_buf[KEYSIZE_RSA_MAX];
if (keyslot >= KEYSLOT_RSA_MAX || src_size > KEYSIZE_RSA_MAX || dst_size > KEYSIZE_RSA_MAX) {
panic();
generic_panic();
}
/* Endian swap the input. */
@ -389,7 +389,7 @@ void se_perform_aes_block_operation(void *dst, size_t dst_size, const void *src,
uint8_t block[0x10];
if (src_size > sizeof(block) || dst_size > sizeof(block)) {
panic();
generic_panic();
}
/* Load src data into block. */
@ -408,7 +408,7 @@ void se_perform_aes_block_operation(void *dst, size_t dst_size, const void *src,
void se_aes_ctr_crypt(unsigned int keyslot, void *dst, size_t dst_size, const void *src, size_t src_size, const void *ctr, size_t ctr_size) {
if (keyslot >= KEYSLOT_AES_MAX || ctr_size != 0x10) {
panic();
generic_panic();
}
unsigned int num_blocks = src_size >> 4;
@ -438,7 +438,7 @@ void se_aes_ctr_crypt(unsigned int keyslot, void *dst, size_t dst_size, const vo
void se_aes_ecb_encrypt_block(unsigned int keyslot, void *dst, size_t dst_size, const void *src, size_t src_size, unsigned int config_high) {
if (keyslot >= KEYSLOT_AES_MAX || dst_size != 0x10 || src_size != 0x10) {
panic();
generic_panic();
}
/* Set configuration high (256-bit vs 128-bit) based on parameter. */
@ -459,7 +459,7 @@ void se_aes_256_ecb_encrypt_block(unsigned int keyslot, void *dst, size_t dst_si
void se_aes_ecb_decrypt_block(unsigned int keyslot, void *dst, size_t dst_size, const void *src, size_t src_size) {
if (keyslot >= KEYSLOT_AES_MAX || dst_size != 0x10 || src_size != 0x10) {
panic();
generic_panic();
}
SECURITY_ENGINE->CONFIG_REG = (ALG_AES_DEC | DST_MEMORY);
@ -481,7 +481,7 @@ void shift_left_xor_rb(uint8_t *key) {
void se_compute_aes_cmac(unsigned int keyslot, void *cmac, size_t cmac_size, const void *data, size_t data_size, unsigned int config_high) {
if (keyslot >= KEYSLOT_AES_MAX) {
panic();
generic_panic();
}
/* Generate the derived key, to be XOR'd with final output block. */
@ -563,7 +563,7 @@ void se_calculate_sha256(void *dst, const void *src, size_t src_size) {
/* RNG API */
void se_initialize_rng(unsigned int keyslot) {
if (keyslot >= KEYSLOT_AES_MAX) {
panic();
generic_panic();
}
/* To initialize the RNG, we'll perform an RNG operation into an output buffer. */
@ -581,7 +581,7 @@ void se_initialize_rng(unsigned int keyslot) {
void se_generate_random(unsigned int keyslot, void *dst, size_t size) {
if (keyslot >= KEYSLOT_AES_MAX) {
panic();
generic_panic();
}
uint32_t num_blocks = size >> 4;

View file

@ -29,7 +29,7 @@ void unseal_key_internal(unsigned int keyslot, const void *src, const uint8_t *s
void seal_titlekey(void *dst, size_t dst_size, const void *src, size_t src_size) {
if (dst_size != 0x10 || src_size != 0x10) {
panic();
generic_panic();
}
seal_key_internal(dst, src, g_titlekey_seal_key_source);
@ -38,7 +38,7 @@ void seal_titlekey(void *dst, size_t dst_size, const void *src, size_t src_size)
void unseal_titlekey(unsigned int keyslot, const void *src, size_t src_size) {
if (src_size != 0x10) {
panic();
generic_panic();
}
unseal_key_internal(keyslot, src, g_titlekey_seal_key_source);
@ -47,7 +47,7 @@ void unseal_titlekey(unsigned int keyslot, const void *src, size_t src_size) {
void seal_key(void *dst, size_t dst_size, const void *src, size_t src_size, unsigned int usecase) {
if (usecase >= CRYPTOUSECASE_MAX || dst_size != 0x10 || src_size != 0x10) {
panic();
generic_panic();
}
@ -56,7 +56,7 @@ void seal_key(void *dst, size_t dst_size, const void *src, size_t src_size, unsi
void unseal_key(unsigned int keyslot, const void *src, size_t src_size, unsigned int usecase) {
if (usecase >= CRYPTOUSECASE_MAX || src_size != 0x10) {
panic();
generic_panic();
}
unseal_key_internal(keyslot, src, g_seal_key_sources[usecase]);

View file

@ -3,6 +3,7 @@
#include "utils.h"
#include "configitem.h"
#include "cpu_context.h"
#include "lock.h"
#include "masterkey.h"
#include "mc.h"
#include "mmu.h"
@ -103,7 +104,17 @@ smc_table_t g_smc_tables[2] = {
}
};
bool g_is_smc_in_progress = false;
bool g_is_user_smc_in_progress = false;
bool g_is_priv_smc_in_progress = false;
/* Privileged SMC lock must be available to exceptions.s. */
void set_priv_smc_in_progress(void) {
lock_acquire(&g_is_priv_smc_in_progress);
}
void clear_priv_smc_in_progress(void) {
lock_release(&g_is_priv_smc_in_progress);
}
uint32_t (*g_smc_callback)(void *, uint64_t) = NULL;
uint64_t g_smc_callback_key = 0;
@ -134,28 +145,28 @@ void call_smc_handler(uint32_t handler_id, smc_args_t *args) {
/* Validate top-level handler. */
if (handler_id != SMC_HANDLER_USER && handler_id != SMC_HANDLER_PRIV) {
panic();
generic_panic();
}
/* Validate core is appropriate for handler. */
if (handler_id == SMC_HANDLER_USER && get_core_id() != 3) {
/* USER SMCs must be called via svcCallSecureMonitor on core 3 (where spl runs) */
panic();
generic_panic();
}
/* Validate sub-handler index */
if ((smc_id = (unsigned char)args->X[0]) >= g_smc_tables[handler_id].num_handlers) {
panic();
generic_panic();
}
/* Validate sub-handler */
if (g_smc_tables[handler_id].handlers[smc_id].id != args->X[0]) {
panic();
generic_panic();
}
/* Validate handler. */
if ((smc_handler = g_smc_tables[handler_id].handlers[smc_id].handler) == NULL) {
panic();
generic_panic();
}
/* Call function. */
@ -164,30 +175,26 @@ void call_smc_handler(uint32_t handler_id, smc_args_t *args) {
uint32_t smc_wrapper_sync(smc_args_t *args, uint32_t (*handler)(smc_args_t *)) {
uint32_t result;
/* TODO: Make g_is_smc_in_progress atomic. */
if (g_is_smc_in_progress) {
if (!lock_try_acquire(&g_is_user_smc_in_progress)) {
return 3;
}
g_is_smc_in_progress = true;
result = handler(args);
g_is_smc_in_progress = false;
lock_release(&g_is_user_smc_in_progress);
return result;
}
uint32_t smc_wrapper_async(smc_args_t *args, uint32_t (*handler)(smc_args_t *), uint32_t (*callback)(void *, uint64_t)) {
uint32_t result;
uint64_t key;
/* TODO: Make g_is_smc_in_progress atomic. */
if (g_is_smc_in_progress) {
if (!lock_try_acquire(&g_is_user_smc_in_progress)) {
return 3;
}
g_is_smc_in_progress = 1;
if ((key = try_set_smc_callback(callback)) != 0) {
result = handler(args);
if (result == 0) {
/* Pass the status check key back to userland. */
args->X[1] = key;
/* Early return, leaving g_is_smc_in_progress == 1 */
/* Early return, leaving g_is_user_smc_in_progress locked */
return result;
} else {
/* No status to check. */
@ -197,7 +204,7 @@ uint32_t smc_wrapper_async(smc_args_t *args, uint32_t (*handler)(smc_args_t *),
/* smcCheckStatus needs to be called. */
result = 3;
}
g_is_smc_in_progress = false;
lock_release(&g_is_user_smc_in_progress);
return result;
}
@ -277,7 +284,7 @@ uint32_t smc_exp_mod_get_result(void *buf, uint64_t size) {
se_get_exp_mod_output(buf, 0x100);
/* smc_exp_mod is done now. */
g_is_smc_in_progress = false;
lock_release(&g_is_user_smc_in_progress);
return 0;
}
@ -303,7 +310,7 @@ uint32_t smc_crypt_aes_status_check(void *buf, uint64_t size) {
return 3;
}
/* smc_crypt_aes is done now. */
g_is_smc_in_progress = false;
lock_release(&g_is_user_smc_in_progress);
return 0;
}
@ -352,7 +359,7 @@ uint32_t smc_unwrap_rsa_oaep_wrapped_titlekey_get_result(void *buf, uint64_t siz
se_get_exp_mod_output(rsa_wrapped_titlekey, 0x100);
if (tkey_rsa_oaep_unwrap(aes_wrapped_titlekey, 0x10, rsa_wrapped_titlekey, 0x100) != 0x10) {
/* Failed to extract RSA OAEP wrapped key. */
g_is_smc_in_progress = false;
lock_release(&g_is_user_smc_in_progress);
return 2;
}
@ -363,7 +370,7 @@ uint32_t smc_unwrap_rsa_oaep_wrapped_titlekey_get_result(void *buf, uint64_t siz
p_sealed_key[1] = sealed_titlekey[1];
/* smc_unwrap_rsa_oaep_wrapped_titlekey is done now. */
g_is_smc_in_progress = false;
lock_release(&g_is_user_smc_in_progress);
return 0;
}
@ -403,8 +410,7 @@ uint32_t smc_get_random_bytes_for_priv(smc_args_t *args) {
uint32_t result;
/* TODO: Make atomic. */
if (g_is_smc_in_progress) {
if (!lock_try_acquire(&g_is_user_smc_in_progress)) {
if (args->X[1] > 0x38) {
return 2;
}
@ -413,12 +419,11 @@ uint32_t smc_get_random_bytes_for_priv(smc_args_t *args) {
randomcache_getbytes(&args->X[1], num_bytes);
result = 0;
} else {
g_is_smc_in_progress = true;
/* If the kernel isn't denied service by a usermode SMC, generate fresh random bytes. */
result = user_get_random_bytes(args);
/* Also, refill our cache while we have the chance in case we get denied later. */
randomcache_refill();
g_is_smc_in_progress = false;
lock_release(&g_is_user_smc_in_progress);
}
return result;
}

View file

@ -10,6 +10,11 @@ typedef struct {
uint64_t X[8];
} smc_args_t;
void set_priv_smc_in_progress(void);
void clear_priv_smc_in_progress(void);
void get_smc_core012_stack_address(void);
void call_smc_handler(unsigned int handler_id, smc_args_t *args);
#endif

View file

@ -243,7 +243,7 @@ uint32_t user_crypt_aes(smc_args_t *args) {
size_t size = args->X[6];
if (size & 0xF) {
panic();
generic_panic();
}
set_crypt_aes_done(false);

View file

@ -19,7 +19,7 @@ void tkey_set_expected_label_hash(uint64_t *label_hash) {
void tkey_set_master_key_rev(unsigned int master_key_rev) {
if (master_key_rev >= MASTERKEY_REVISION_MAX) {
panic();
generic_panic();
}
}
@ -28,7 +28,7 @@ void calculate_mgf1_and_xor(void *masked, size_t masked_size, const void *seed,
uint8_t cur_hash[0x20];
uint8_t hash_buf[0xE4];
if (seed_size >= 0xE0) {
panic();
generic_panic();
}
size_t hash_buf_size = seed_size + 4;
@ -64,7 +64,7 @@ void calculate_mgf1_and_xor(void *masked, size_t masked_size, const void *seed,
size_t tkey_rsa_oaep_unwrap(void *dst, size_t dst_size, void *src, size_t src_size) {
if (src_size != 0x100) {
panic();
generic_panic();
}
/* RSA Wrapped titlekeys use RSA-OAEP. */
@ -139,7 +139,7 @@ size_t tkey_rsa_oaep_unwrap(void *dst, size_t dst_size, void *src, size_t src_si
void tkey_aes_unwrap(void *dst, size_t dst_size, const void *src, size_t src_size) {
if (g_tkey_master_key_rev >= MASTERKEY_REVISION_MAX || dst_size != 0x10 || src_size != 0x10) {
panic();
generic_panic();
}
const uint8_t titlekek_source[0x10] = {

View file

@ -15,7 +15,7 @@ bool upage_init(upage_ref_t *upage, void *user_address) {
if (g_secure_page_user_address != NULL) {
/* Different physical address indicate SPL was rebooted, or another process got access to svcCallSecureMonitor. Panic. */
if (g_secure_page_user_address != upage->user_page) {
panic();
generic_panic();
}
upage->secure_page = SECURE_USER_PAGE_ADDR;
} else {

View file

@ -8,7 +8,8 @@
#define BIT(x) (1u << (x))
#define BITL(x) (1ull << (x))
void panic(void);
void panic(uint32_t code);
void generic_panic(void);
uint32_t get_physical_address(void *vaddr);