diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_secure_monitor_base.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_secure_monitor_base.hpp
new file mode 100644
index 000000000..76ec13937
--- /dev/null
+++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_secure_monitor_base.hpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include
+#include
+#include
+
+namespace ams::kern::arch::arm64::smc {
+
+ template
+ void SecureMonitorCall(u64 *buf) {
+ /* Load arguments into registers. */
+ register u64 x0 asm("x0") = buf[0];
+ register u64 x1 asm("x1") = buf[1];
+ register u64 x2 asm("x2") = buf[2];
+ register u64 x3 asm("x3") = buf[3];
+ register u64 x4 asm("x4") = buf[4];
+ register u64 x5 asm("x5") = buf[5];
+ register u64 x6 asm("x6") = buf[6];
+ register u64 x7 asm("x7") = buf[7];
+
+ /* Perform the call. */
+ if constexpr (DisableInterrupt) {
+ KScopedInterruptDisable di;
+
+ /* Backup the current thread pointer. */
+ const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
+
+ __asm__ __volatile__("smc %c[smc_id]"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
+ : [smc_id]"i"(SmcId)
+ : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
+ );
+
+ /* Restore the current thread pointer into X18. */
+ cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
+ } else {
+ /* Backup the current thread pointer. */
+ const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
+
+ __asm__ __volatile__("smc %c[smc_id]"
+ : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
+ : [smc_id]"i"(SmcId)
+ : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
+ );
+
+ /* Restore the current thread pointer into X18. */
+ cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
+ }
+
+ /* Store arguments to output. */
+ buf[0] = x0;
+ buf[1] = x1;
+ buf[2] = x2;
+ buf[3] = x3;
+ buf[4] = x4;
+ buf[5] = x5;
+ buf[6] = x6;
+ buf[7] = x7;
+ }
+
+ enum PsciFunction {
+ PsciFunction_CpuSuspend = 0xC4000001,
+ PsciFunction_CpuOff = 0x84000002,
+ PsciFunction_CpuOn = 0xC4000003,
+ };
+
+ template
+ u64 PsciCall(PsciFunction function, u64 x1 = 0, u64 x2 = 0, u64 x3 = 0, u64 x4 = 0, u64 x5 = 0, u64 x6 = 0, u64 x7 = 0) {
+ ams::svc::lp64::SecureMonitorArguments args = { { function, x1, x2, x3, x4, x5, x6, x7 } };
+
+ SecureMonitorCall(args.r);
+
+ return args.r[0];
+ }
+
+ template
+ u64 CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
+ return PsciCall(PsciFunction_CpuOn, core_id, entrypoint, arg);
+ }
+
+}
diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp
index 003f74570..b12e8d02e 100644
--- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp
+++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp
@@ -15,9 +15,12 @@
*/
#pragma once
#include
+#include
namespace ams::kern {
+ constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
+
constexpr inline size_t MainMemorySize = 4_GB;
constexpr inline size_t MainMemorySizeMax = 8_GB;
diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp
index b7c2f1697..2349e3a99 100644
--- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp
+++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp
@@ -15,23 +15,17 @@
*/
#pragma once
#include
-
-namespace ams::kern {
-
- struct InitialProcessBinaryLayout;
-
-}
+#include
namespace ams::kern::board::nintendo::nx {
- class KSystemControl {
+ class KSystemControl : public KSystemControlBase {
public:
- class Init {
+ class Init : public KSystemControlBase::Init {
public:
/* Initialization. */
+ static size_t GetRealMemorySize();
static size_t GetIntendedMemorySize();
- static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
- static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
static bool ShouldIncreaseThreadResourceLimit();
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
static size_t GetApplicationPoolSize();
@@ -40,7 +34,7 @@ namespace ams::kern::board::nintendo::nx {
static u8 GetDebugLogUartPort();
/* Randomness. */
- static void GenerateRandomBytes(void *dst, size_t size);
+ static void GenerateRandom(u64 *dst, size_t count);
static u64 GenerateRandomRange(u64 min, u64 max);
};
public:
@@ -50,7 +44,7 @@ namespace ams::kern::board::nintendo::nx {
static NOINLINE u32 GetCreateProcessMemoryPool();
/* Randomness. */
- static void GenerateRandomBytes(void *dst, size_t size);
+ static void GenerateRandom(u64 *dst, size_t count);
static u64 GenerateRandomRange(u64 min, u64 max);
static u64 GenerateRandomU64();
@@ -58,23 +52,12 @@ namespace ams::kern::board::nintendo::nx {
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
- static ALWAYS_INLINE u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
- u32 v;
- ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
- return v;
- }
-
- static ALWAYS_INLINE void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
- u32 v;
- ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
- }
-
/* Power management. */
static void SleepSystem();
static NORETURN void StopSystem(void *arg = nullptr);
/* User access. */
- static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
+ static void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args);
/* Secure Memory. */
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
diff --git a/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_memory_layout.hpp
index 003f74570..b18ec45c7 100644
--- a/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_memory_layout.hpp
+++ b/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_memory_layout.hpp
@@ -15,9 +15,12 @@
*/
#pragma once
#include
+#include
namespace ams::kern {
+ constexpr inline KPhysicalAddress MainMemoryAddress = 0x40000000;
+
constexpr inline size_t MainMemorySize = 4_GB;
constexpr inline size_t MainMemorySizeMax = 8_GB;
diff --git a/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_system_control.hpp
index d1d1fa212..78a94fa18 100644
--- a/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_system_control.hpp
+++ b/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_system_control.hpp
@@ -15,71 +15,14 @@
*/
#pragma once
#include
-
-namespace ams::kern {
-
- struct InitialProcessBinaryLayout;
-
-}
+#include
namespace ams::kern::board::qemu::virt {
- class KSystemControl {
+ class KSystemControl : public KSystemControlBase {
public:
- class Init {
- public:
- /* Initialization. */
- static size_t GetIntendedMemorySize();
- static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
- static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
- static bool ShouldIncreaseThreadResourceLimit();
- static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
- static size_t GetApplicationPoolSize();
- static size_t GetAppletPoolSize();
- static size_t GetMinimumNonSecureSystemPoolSize();
- static u8 GetDebugLogUartPort();
-
- /* Randomness. */
- static void GenerateRandomBytes(void *dst, size_t size);
- static u64 GenerateRandomRange(u64 min, u64 max);
- };
- public:
- /* Initialization. */
- static NOINLINE void InitializePhase1();
- static NOINLINE void InitializePhase2();
- static NOINLINE u32 GetCreateProcessMemoryPool();
-
- /* Randomness. */
- static void GenerateRandomBytes(void *dst, size_t size);
- static u64 GenerateRandomRange(u64 min, u64 max);
- static u64 GenerateRandomU64();
-
- /* Privileged Access. */
- static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
- static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
-
- static ALWAYS_INLINE u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
- u32 v;
- ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
- return v;
- }
-
- static ALWAYS_INLINE void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
- u32 v;
- ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
- }
-
- /* Power management. */
- static void SleepSystem();
- static NORETURN void StopSystem(void *arg = nullptr);
-
/* User access. */
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
-
- /* Secure Memory. */
- static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
- static Result AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool);
- static void FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool);
};
}
\ No newline at end of file
diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp
index 5ac2a5908..34d3d2e6a 100644
--- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp
+++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp
@@ -212,13 +212,17 @@ namespace ams::kern {
static NOINLINE auto GetKernelPageTableHeapRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); }
static NOINLINE auto GetKernelInitPageTableRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); }
- static NOINLINE auto GetKernelPoolManagementRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolManagement); }
static NOINLINE auto GetKernelPoolPartitionRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolPartition); }
+ static NOINLINE auto GetKernelPoolManagementRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolManagement); }
static NOINLINE auto GetKernelSystemPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemPool); }
static NOINLINE auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemNonSecurePool); }
static NOINLINE auto GetKernelAppletPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramAppletPool); }
static NOINLINE auto GetKernelApplicationPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramApplicationPool); }
+ static NOINLINE bool HasKernelSystemNonSecurePoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramSystemNonSecurePool) != nullptr; }
+ static NOINLINE bool HasKernelAppletPoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramAppletPool) != nullptr; }
+ static NOINLINE bool HasKernelApplicationPoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramApplicationPool) != nullptr; }
+
static NOINLINE auto GetKernelTraceBufferRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelTraceBuffer); }
};
diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp
new file mode 100644
index 000000000..2abfc948a
--- /dev/null
+++ b/libraries/libmesosphere/include/mesosphere/kern_k_system_control_base.hpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include
+#include
+
+namespace ams::kern {
+
+ struct InitialProcessBinaryLayout;
+
+}
+
+namespace ams::kern {
+
+ class KSystemControlBase {
+ protected:
+ /* Nintendo uses std::mt19937_t for randomness. */
+ /* To save space (and because mt19337_t isn't secure anyway), */
+ /* We will use TinyMT. */
+ static constinit inline bool s_initialized_random_generator;
+ static constinit inline util::TinyMT s_random_generator{util::ConstantInitialize};
+ static constinit inline KSpinLock s_random_lock;
+ public:
+ class Init {
+ public:
+ /* Initialization. */
+ static size_t GetRealMemorySize();
+ static size_t GetIntendedMemorySize();
+ static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
+ static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
+ static bool ShouldIncreaseThreadResourceLimit();
+ static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
+ static size_t GetApplicationPoolSize();
+ static size_t GetAppletPoolSize();
+ static size_t GetMinimumNonSecureSystemPoolSize();
+ static u8 GetDebugLogUartPort();
+
+ /* Randomness. */
+ static void GenerateRandom(u64 *dst, size_t count);
+ static u64 GenerateRandomRange(u64 min, u64 max);
+ };
+ public:
+ /* Initialization. */
+ static NOINLINE void InitializePhase1(bool skip_target_system = false);
+ static NOINLINE void InitializePhase2();
+ static NOINLINE u32 GetCreateProcessMemoryPool();
+
+ /* Randomness. */
+ static void GenerateRandom(u64 *dst, size_t count);
+ static u64 GenerateRandomRange(u64 min, u64 max);
+ static u64 GenerateRandomU64();
+
+ /* Register access Access. */
+ static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
+ static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
+
+ static u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address);
+ static void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value);
+
+ /* Power management. */
+ static void SleepSystem();
+ static NORETURN void StopSystem(void *arg = nullptr);
+
+ /* User access. */
+ #if defined(ATMOSPHERE_ARCH_ARM64)
+ static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
+ #endif
+
+ /* Secure Memory. */
+ static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
+ static Result AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool);
+ static void FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool);
+ protected:
+ template
+ static ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
+ /* Handle the case where the difference is too large to represent. */
+ if (max == std::numeric_limits::max() && min == std::numeric_limits::min()) {
+ return f();
+ }
+
+ /* Iterate until we get a value in range. */
+ const u64 range_size = ((max + 1) - min);
+ const u64 effective_max = (std::numeric_limits::max() / range_size) * range_size;
+ while (true) {
+ if (const u64 rnd = f(); rnd < effective_max) {
+ return min + (rnd % range_size);
+ }
+ }
+ }
+
+ /* User access. */
+ #if defined(ATMOSPHERE_ARCH_ARM64)
+ static void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args);
+ #endif
+ };
+
+}
\ No newline at end of file
diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp
index bcb53ec20..87b72c0fa 100644
--- a/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp
+++ b/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp
@@ -21,6 +21,7 @@ namespace ams::kern {
class KTargetSystem {
private:
+ friend class KSystemControlBase;
friend class KSystemControl;
private:
static inline constinit bool s_is_debug_mode;
diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_system_control.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_system_control.hpp
index 0ea19592d..cc0edc55c 100644
--- a/libraries/libmesosphere/include/mesosphere/kern_select_system_control.hpp
+++ b/libraries/libmesosphere/include/mesosphere/kern_select_system_control.hpp
@@ -15,6 +15,7 @@
*/
#pragma once
#include
+#include
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
#include
@@ -33,3 +34,18 @@
#else
#error "Unknown board for KSystemControl"
#endif
+
+namespace ams::kern {
+
+ ALWAYS_INLINE u32 KSystemControlBase::ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
+ u32 v;
+ KSystemControl::ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
+ return v;
+ }
+
+ ALWAYS_INLINE void KSystemControlBase::WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
+ u32 v;
+ KSystemControl::ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
+ }
+
+}
diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp
index 1a244b362..6927c714c 100644
--- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp
+++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp
@@ -1130,7 +1130,7 @@ namespace ams::kern::board::nintendo::nx {
size_t cur_size;
{
/* Get the current contiguous range. */
- KPageTableBase::MemoryRange contig_range = {};
+ KPageTableBase::MemoryRange contig_range = { .address = Null, .size = 0 };
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + mapped_size, size - mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
/* Ensure we close the range when we're done. */
@@ -1288,7 +1288,7 @@ namespace ams::kern::board::nintendo::nx {
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* We need to traverse the ranges that make up our mapping, to make sure they're all good. Start by getting a contiguous range. */
- KPageTableBase::MemoryRange contig_range = {};
+ KPageTableBase::MemoryRange contig_range = { .address = Null, .size = 0 };
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), process_address, size))) {
return false;
}
diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp
index 2a08929cb..713a8e0c6 100644
--- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp
+++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp
@@ -73,7 +73,7 @@ namespace ams::kern::board::nintendo::nx {
void PowerOnCpu(int core_id, KPhysicalAddress entry_phys_addr, u64 context_id) {
/* Request the secure monitor power on the core. */
- smc::CpuOn(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id);
+ ::ams::kern::arch::arm64::smc::CpuOn(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id);
}
void WaitOtherCpuPowerOff() {
diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp
index 3f91ecfb7..ed1fc14b9 100644
--- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp
+++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp
@@ -21,8 +21,7 @@ namespace ams::kern::board::nintendo::nx {
namespace {
- constexpr uintptr_t DramPhysicalAddress = 0x80000000;
- constexpr size_t SecureAlignment = 128_KB;
+ constexpr size_t SecureAlignment = 128_KB;
/* Global variables for panic. */
constinit bool g_call_smc_on_panic;
@@ -38,22 +37,6 @@ namespace ams::kern::board::nintendo::nx {
constinit KPhysicalAddress g_secure_region_phys_addr = Null;
constinit size_t g_secure_region_size = 0;
- /* Global variables for randomness. */
- /* Nintendo uses std::mt19937_t for randomness. */
- /* To save space (and because mt19337_t isn't secure anyway), */
- /* We will use TinyMT. */
- constinit bool g_initialized_random_generator;
- constinit util::TinyMT g_random_generator{util::ConstantInitialize};
- constinit KSpinLock g_random_lock;
-
- ALWAYS_INLINE size_t GetRealMemorySizeForInit() {
- /* TODO: Move this into a header for the MC in general. */
- constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
- u32 config_value;
- MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
- return static_cast(config_value & 0x3FFF) << 20;
- }
-
ALWAYS_INLINE util::BitPack32 GetKernelConfigurationForInit() {
u64 value = 0;
smc::init::GetConfig(&value, 1, smc::ConfigItem::KernelConfiguration);
@@ -86,7 +69,7 @@ namespace ams::kern::board::nintendo::nx {
ALWAYS_INLINE u64 GenerateRandomU64ForInit() {
u64 value;
- smc::init::GenerateRandomBytes(&value, sizeof(value));
+ smc::init::GenerateRandomBytes(std::addressof(value), sizeof(value));
return value;
}
@@ -96,27 +79,6 @@ namespace ams::kern::board::nintendo::nx {
return value;
}
- ALWAYS_INLINE u64 GenerateRandomU64FromGenerator() {
- return g_random_generator.GenerateRandomU64();
- }
-
- template
- ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
- /* Handle the case where the difference is too large to represent. */
- if (max == std::numeric_limits::max() && min == std::numeric_limits::min()) {
- return f();
- }
-
- /* Iterate until we get a value in range. */
- const u64 range_size = ((max + 1) - min);
- const u64 effective_max = (std::numeric_limits::max() / range_size) * range_size;
- while (true) {
- if (const u64 rnd = f(); rnd < effective_max) {
- return min + (rnd % range_size);
- }
- }
- }
-
ALWAYS_INLINE u64 GetConfigU64(smc::ConfigItem which) {
u64 value;
smc::GetConfig(&value, 1, which);
@@ -324,6 +286,14 @@ namespace ams::kern::board::nintendo::nx {
}
/* Initialization. */
+ size_t KSystemControl::Init::GetRealMemorySize() {
+ /* TODO: Move this into a header for the MC in general. */
+ constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
+ u32 config_value;
+ MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
+ return static_cast(config_value & 0x3FFF) << 20;
+ }
+
size_t KSystemControl::Init::GetIntendedMemorySize() {
switch (GetKernelConfigurationForInit().Get()) {
case smc::MemorySize_4GB:
@@ -336,23 +306,6 @@ namespace ams::kern::board::nintendo::nx {
}
}
- KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) {
- const size_t real_dram_size = GetRealMemorySizeForInit();
- const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
- if (intended_dram_size * 2 < real_dram_size) {
- return base_address;
- } else {
- return base_address + ((real_dram_size - intended_dram_size) / 2);
- }
- }
-
- void KSystemControl::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
- *out = {
- .address = GetInteger(GetKernelPhysicalBaseAddress(DramPhysicalAddress)) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
- ._08 = 0,
- };
- }
-
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
return GetKernelConfigurationForInit().Get();
}
@@ -424,17 +377,17 @@ namespace ams::kern::board::nintendo::nx {
}
void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
- smc::init::CpuOn(core_id, entrypoint, arg);
+ MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn(core_id, entrypoint, arg)) == 0);
}
/* Randomness for Initialization. */
- void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) {
- MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
- smc::init::GenerateRandomBytes(dst, size);
+ void KSystemControl::Init::GenerateRandom(u64 *dst, size_t count) {
+ MESOSPHERE_INIT_ABORT_UNLESS(count <= 7);
+ smc::init::GenerateRandomBytes(dst, count * sizeof(u64));
}
u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) {
- return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
+ return KSystemControlBase::GenerateUniformRange(min, max, GenerateRandomU64ForInit);
}
/* System Initialization. */
@@ -443,8 +396,8 @@ namespace ams::kern::board::nintendo::nx {
{
u64 seed;
smc::GenerateRandomBytes(std::addressof(seed), sizeof(seed));
- g_random_generator.Initialize(reinterpret_cast(std::addressof(seed)), sizeof(seed) / sizeof(u32));
- g_initialized_random_generator = true;
+ s_random_generator.Initialize(reinterpret_cast(std::addressof(seed)), sizeof(seed) / sizeof(u32));
+ s_initialized_random_generator = true;
}
/* Set IsDebugMode. */
@@ -483,25 +436,8 @@ namespace ams::kern::board::nintendo::nx {
smc::ConfigureCarveout(0, carveout.GetAddress(), carveout.GetSize());
}
- /* System ResourceLimit initialization. */
- {
- /* Construct the resource limit object. */
- KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
- KAutoObject::Create(std::addressof(sys_res_limit));
- sys_res_limit.Initialize();
-
- /* Set the initial limits. */
- const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
- const auto &slab_counts = init::GetSlabResourceCounts();
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
-
- /* Reserve system memory. */
- MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
- }
+ /* Initialize the system resource limit (and potentially other things). */
+ KSystemControlBase::InitializePhase1(true);
}
void KSystemControl::InitializePhase2() {
@@ -520,11 +456,8 @@ namespace ams::kern::board::nintendo::nx {
g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr);
}
- /* Initialize KTrace. */
- if constexpr (IsKTraceEnabled) {
- const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
- KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
- }
+ /* Initialize KTrace (and potentially other init). */
+ KSystemControlBase::InitializePhase2();
}
u32 KSystemControl::GetCreateProcessMemoryPool() {
@@ -546,29 +479,29 @@ namespace ams::kern::board::nintendo::nx {
}
/* Randomness. */
- void KSystemControl::GenerateRandomBytes(void *dst, size_t size) {
- MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
- smc::GenerateRandomBytes(dst, size);
+ void KSystemControl::GenerateRandom(u64 *dst, size_t count) {
+ MESOSPHERE_INIT_ABORT_UNLESS(count <= 7);
+ smc::GenerateRandomBytes(dst, count * sizeof(u64));
}
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
KScopedInterruptDisable intr_disable;
- KScopedSpinLock lk(g_random_lock);
+ KScopedSpinLock lk(s_random_lock);
- if (AMS_LIKELY(g_initialized_random_generator)) {
- return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator);
+ if (AMS_LIKELY(s_initialized_random_generator)) {
+ return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
} else {
- return GenerateUniformRange(min, max, GenerateRandomU64FromSmc);
+ return KSystemControlBase::GenerateUniformRange(min, max, GenerateRandomU64FromSmc);
}
}
u64 KSystemControl::GenerateRandomU64() {
KScopedInterruptDisable intr_disable;
- KScopedSpinLock lk(g_random_lock);
+ KScopedSpinLock lk(s_random_lock);
- if (AMS_LIKELY(g_initialized_random_generator)) {
- return GenerateRandomU64FromGenerator();
+ if (AMS_LIKELY(s_initialized_random_generator)) {
+ return s_random_generator.GenerateRandomU64();
} else {
return GenerateRandomU64FromSmc();
}
@@ -672,52 +605,18 @@ namespace ams::kern::board::nintendo::nx {
}
/* User access. */
- void KSystemControl::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
- /* Get the function id for the current call. */
- u64 function_id = args->r[0];
-
- /* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
- auto &page_table = GetCurrentProcess().GetPageTable();
- auto *bim = page_table.GetBlockInfoManager();
-
- constexpr size_t MaxMappedRegisters = 7;
- std::array page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
-
- for (size_t i = 0; i < MaxMappedRegisters; i++) {
- const size_t reg_id = i + 1;
- if (function_id & (1ul << (8 + reg_id))) {
- /* Create and open a new page group for the address. */
- KVirtualAddress virt_addr = args->r[reg_id];
-
- if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
- /* Translate the virtual address to a physical address. */
- const auto it = page_groups[i].begin();
- MESOSPHERE_ASSERT(it != page_groups[i].end());
- MESOSPHERE_ASSERT(it->GetNumPages() == 1);
-
- args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
- } else {
- /* If we couldn't map, we should clear the address. */
- args->r[reg_id] = 0;
- }
- }
- }
-
+ void KSystemControl::CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* Invoke the secure monitor. */
- smc::CallSecureMonitorFromUser(args);
-
- /* Make sure that we close any pages that we opened. */
- for (size_t i = 0; i < MaxMappedRegisters; i++) {
- page_groups[i].Close();
- }
+ return smc::CallSecureMonitorFromUser(args);
}
/* Secure Memory. */
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
if (pool == KMemoryManager::Pool_Applet) {
return 0;
+ } else {
+ return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
}
- return size;
}
Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp
index 2ec4cbe2c..6b1e3923f 100644
--- a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp
+++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp
@@ -20,10 +20,6 @@ namespace ams::kern::board::nintendo::nx::smc {
namespace {
- struct SecureMonitorArguments {
- u64 x[8];
- };
-
enum UserFunctionId : u32 {
UserFunctionId_SetConfig = 0xC3000401,
UserFunctionId_GetConfigUser = 0xC3000002,
@@ -45,9 +41,6 @@ namespace ams::kern::board::nintendo::nx::smc {
};
enum FunctionId : u32 {
- FunctionId_CpuSuspend = 0xC4000001,
- FunctionId_CpuOff = 0x84000002,
- FunctionId_CpuOn = 0xC4000003,
FunctionId_GetConfig = 0xC3000004,
FunctionId_GenerateRandomBytes = 0xC3000005,
FunctionId_Panic = 0xC3000006,
@@ -58,171 +51,60 @@ namespace ams::kern::board::nintendo::nx::smc {
FunctionId_SetConfig = 0xC3000409,
};
- void CallPrivilegedSecureMonitorFunction(SecureMonitorArguments &args) {
- /* Load arguments into registers. */
- register u64 x0 asm("x0") = args.x[0];
- register u64 x1 asm("x1") = args.x[1];
- register u64 x2 asm("x2") = args.x[2];
- register u64 x3 asm("x3") = args.x[3];
- register u64 x4 asm("x4") = args.x[4];
- register u64 x5 asm("x5") = args.x[5];
- register u64 x6 asm("x6") = args.x[6];
- register u64 x7 asm("x7") = args.x[7];
-
- /* Actually make the call. */
- {
- /* Disable interrupts while making the call. */
- KScopedInterruptDisable intr_disable;
-
- {
- /* Backup the current thread pointer. */
- const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
-
- __asm__ __volatile__("smc #1"
- : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
- :
- : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
- );
-
- /* Restore the current thread pointer into X18. */
- cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
-
- /* Store arguments to output. */
- args.x[0] = x0;
- args.x[1] = x1;
- args.x[2] = x2;
- args.x[3] = x3;
- args.x[4] = x4;
- args.x[5] = x5;
- args.x[6] = x6;
- args.x[7] = x7;
- }
- }
- }
-
- void CallUserSecureMonitorFunction(ams::svc::lp64::SecureMonitorArguments *args) {
- /* Load arguments into registers. */
- register u64 x0 asm("x0") = args->r[0];
- register u64 x1 asm("x1") = args->r[1];
- register u64 x2 asm("x2") = args->r[2];
- register u64 x3 asm("x3") = args->r[3];
- register u64 x4 asm("x4") = args->r[4];
- register u64 x5 asm("x5") = args->r[5];
- register u64 x6 asm("x6") = args->r[6];
- register u64 x7 asm("x7") = args->r[7];
-
- /* Actually make the call. */
- {
- /* Disable interrupts while making the call. */
- KScopedInterruptDisable intr_disable;
-
- {
- /* Backup the current thread pointer. */
- const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
-
- __asm__ __volatile__("smc #0"
- : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
- :
- : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
- );
-
- /* Restore the current thread pointer into X18. */
- cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
-
- /* Store arguments to output. */
- args->r[0] = x0;
- args->r[1] = x1;
- args->r[2] = x2;
- args->r[3] = x3;
- args->r[4] = x4;
- args->r[5] = x5;
- args->r[6] = x6;
- args->r[7] = x7;
- }
- }
- }
-
- void CallPrivilegedSecureMonitorFunctionForInit(SecureMonitorArguments &args) {
- /* Load arguments into registers. */
- register u64 x0 asm("x0") = args.x[0];
- register u64 x1 asm("x1") = args.x[1];
- register u64 x2 asm("x2") = args.x[2];
- register u64 x3 asm("x3") = args.x[3];
- register u64 x4 asm("x4") = args.x[4];
- register u64 x5 asm("x5") = args.x[5];
- register u64 x6 asm("x6") = args.x[6];
- register u64 x7 asm("x7") = args.x[7];
-
- /* Actually make the call. */
- __asm__ __volatile__("smc #1"
- : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
- :
- : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
- );
-
- /* Store arguments to output. */
- args.x[0] = x0;
- args.x[1] = x1;
- args.x[2] = x2;
- args.x[3] = x3;
- args.x[4] = x4;
- args.x[5] = x5;
- args.x[6] = x6;
- args.x[7] = x7;
- }
-
/* Global lock for generate random bytes. */
- KSpinLock g_generate_random_lock;
+ constinit KSpinLock g_generate_random_lock;
}
/* SMC functionality needed for init. */
namespace init {
- void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
- SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg };
- CallPrivilegedSecureMonitorFunctionForInit(args);
- }
-
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
- SecureMonitorArguments args = { FunctionId_GetConfig, static_cast(config_item) };
- CallPrivilegedSecureMonitorFunctionForInit(args);
- MESOSPHERE_INIT_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success));
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast(config_item) } };
+
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
+ MESOSPHERE_INIT_ABORT_UNLESS((static_cast(args.r[0]) == SmcResult::Success));
+
for (size_t i = 0; i < num_qwords && i < 7; i++) {
- out[i] = args.x[1 + i];
+ out[i] = args.r[1 + i];
}
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Call SmcGenerateRandomBytes() */
- SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size };
- MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
+ MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
- CallPrivilegedSecureMonitorFunctionForInit(args);
- MESOSPHERE_INIT_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success));
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
+ MESOSPHERE_INIT_ABORT_UNLESS((static_cast(args.r[0]) == SmcResult::Success));
/* Copy output. */
- std::memcpy(dst, std::addressof(args.x[1]), size);
+ std::memcpy(dst, std::addressof(args.r[1]), size);
}
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
- SecureMonitorArguments args = { FunctionId_ReadWriteRegister, address, mask, value };
- CallPrivilegedSecureMonitorFunctionForInit(args);
- *out = args.x[1];
- return static_cast(args.x[0]) == SmcResult::Success;
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
+
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
+ MESOSPHERE_INIT_ABORT_UNLESS((static_cast(args.r[0]) == SmcResult::Success));
+
+ *out = args.r[1];
+
+ return static_cast(args.r[0]) == SmcResult::Success;
}
}
bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
- SecureMonitorArguments args = { FunctionId_GetConfig, static_cast(config_item) };
- CallPrivilegedSecureMonitorFunction(args);
- if (static_cast(args.x[0]) != SmcResult::Success) {
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast(config_item) } };
+
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
+ if (AMS_UNLIKELY(static_cast(args.r[0]) != SmcResult::Success)) {
return false;
}
for (size_t i = 0; i < num_qwords && i < 7; i++) {
- out[i] = args.x[1 + i];
+ out[i] = args.r[1 + i];
}
return true;
@@ -233,55 +115,58 @@ namespace ams::kern::board::nintendo::nx::smc {
}
bool SetConfig(ConfigItem config_item, u64 value) {
- SecureMonitorArguments args = { FunctionId_SetConfig, static_cast(config_item), 0, value };
- CallPrivilegedSecureMonitorFunction(args);
- return static_cast(args.x[0]) == SmcResult::Success;
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast(config_item), 0, value } };
+
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
+
+ return static_cast(args.r[0]) == SmcResult::Success;
}
bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
- SecureMonitorArguments args = { FunctionId_ReadWriteRegister, address, mask, value };
- CallPrivilegedSecureMonitorFunction(args);
- *out = static_cast(args.x[1]);
- return static_cast(args.x[0]) == SmcResult::Success;
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
+
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
+
+ *out = static_cast(args.r[1]);
+ return static_cast(args.r[0]) == SmcResult::Success;
}
void ConfigureCarveout(size_t which, uintptr_t address, size_t size) {
- SecureMonitorArguments args = { FunctionId_ConfigureCarveout, static_cast(which), static_cast(address), static_cast(size) };
- CallPrivilegedSecureMonitorFunction(args);
- MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success));
- }
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast(which), static_cast(address), static_cast(size) } };
- void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
- SecureMonitorArguments args = { FunctionId_CpuOn, core_id, static_cast(entrypoint), static_cast(arg) };
- CallPrivilegedSecureMonitorFunction(args);
- MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success));
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
+
+ MESOSPHERE_ABORT_UNLESS((static_cast(args.r[0]) == SmcResult::Success));
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Setup for call. */
- SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size };
- MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
+ MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
/* Make call. */
{
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_generate_random_lock);
- CallPrivilegedSecureMonitorFunction(args);
+
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
}
- MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success));
+ MESOSPHERE_ABORT_UNLESS((static_cast(args.r[0]) == SmcResult::Success));
/* Copy output. */
- std::memcpy(dst, std::addressof(args.x[1]), size);
+ std::memcpy(dst, std::addressof(args.r[1]), size);
}
void NORETURN Panic(u32 color) {
- SecureMonitorArguments args = { FunctionId_Panic, color };
- CallPrivilegedSecureMonitorFunction(args);
+ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_Panic, color } };
+
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args.r);
+
AMS_INFINITE_LOOP();
}
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
- CallUserSecureMonitorFunction(args);
+ ::ams::kern::arch::arm64::smc::SecureMonitorCall(args->r);
}
}
\ No newline at end of file
diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp
index cf1d0cdac..b172fa540 100644
--- a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp
+++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp
@@ -15,10 +15,16 @@
*/
#pragma once
#include
+#include
namespace ams::kern::board::nintendo::nx::smc {
/* Types. */
+ enum SmcId {
+ SmcId_User = 0,
+ SmcId_Supervisor = 1,
+ };
+
enum MemorySize {
MemorySize_4GB = 0,
MemorySize_6GB = 1,
@@ -105,15 +111,12 @@ namespace ams::kern::board::nintendo::nx::smc {
bool SetConfig(ConfigItem config_item, u64 value);
- void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
-
void NORETURN Panic(u32 color);
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
namespace init {
- void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
void GenerateRandomBytes(void *dst, size_t size);
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
diff --git a/libraries/libmesosphere/source/board/qemu/virt/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/qemu/virt/kern_k_system_control.cpp
index 53526eece..59972ef65 100644
--- a/libraries/libmesosphere/source/board/qemu/virt/kern_k_system_control.cpp
+++ b/libraries/libmesosphere/source/board/qemu/virt/kern_k_system_control.cpp
@@ -18,447 +18,10 @@
namespace ams::kern::board::qemu::virt {
- namespace {
-
- constexpr uintptr_t DramPhysicalAddress = 0x40000000;
- constexpr size_t SecureAlignment = 128_KB;
-
- /* Global variables for secure memory. */
- constexpr size_t SecureAppletMemorySize = 4_MB;
- constinit KSpinLock g_secure_applet_lock;
- constinit bool g_secure_applet_memory_used = false;
- constinit KVirtualAddress g_secure_applet_memory_address = Null;
-
- constinit KSpinLock g_secure_region_lock;
- constinit bool g_secure_region_used = false;
- constinit KPhysicalAddress g_secure_region_phys_addr = Null;
- constinit size_t g_secure_region_size = 0;
-
- /* Global variables for randomness. */
- constinit bool g_initialized_random_generator;
- constinit util::TinyMT g_random_generator;
- constinit KSpinLock g_random_lock;
-
- ALWAYS_INLINE u64 GenerateRandomU64FromGenerator() {
- return g_random_generator.GenerateRandomU64();
- }
-
- template
- ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
- /* Handle the case where the difference is too large to represent. */
- if (max == std::numeric_limits::max() && min == std::numeric_limits::min()) {
- return f();
- }
-
- /* Iterate until we get a value in range. */
- const u64 range_size = ((max + 1) - min);
- const u64 effective_max = (std::numeric_limits::max() / range_size) * range_size;
- while (true) {
- if (const u64 rnd = f(); rnd < effective_max) {
- return min + (rnd % range_size);
- }
- }
- }
-
-
- /* TODO */
-
- ALWAYS_INLINE size_t GetRealMemorySizeForInit() {
- return 4_GB;
- }
-
- bool SetSecureRegion(KPhysicalAddress phys_addr, size_t size) {
- /* Ensure address and size are aligned. */
- if (!util::IsAligned(GetInteger(phys_addr), SecureAlignment)) {
- return false;
- }
- if (!util::IsAligned(size, SecureAlignment)) {
- return false;
- }
-
- /* Disable interrupts and acquire the secure region lock. */
- KScopedInterruptDisable di;
- KScopedSpinLock lk(g_secure_region_lock);
-
- /* If size is non-zero, we're allocating the secure region. Otherwise, we're freeing it. */
- if (size != 0) {
- /* Verify that the secure region is free. */
- if (g_secure_region_used) {
- return false;
- }
-
- /* Set the secure region. */
- g_secure_region_used = true;
- g_secure_region_phys_addr = phys_addr;
- g_secure_region_size = size;
- } else {
- /* Verify that the secure region is in use. */
- if (!g_secure_region_used) {
- return false;
- }
-
- /* Verify that the address being freed is the secure region. */
- if (phys_addr != g_secure_region_phys_addr) {
- return false;
- }
-
- /* Clear the secure region. */
- g_secure_region_used = false;
- g_secure_region_phys_addr = Null;
- g_secure_region_size = 0;
- }
-
- // /* Configure the carveout with the secure monitor. */
- // smc::ConfigureCarveout(1, GetInteger(phys_addr), size);
-
- return true;
- }
-
- Result AllocateSecureMemoryForApplet(KVirtualAddress *out, size_t size) {
- /* Verify that the size is valid. */
- R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
- R_UNLESS(size <= SecureAppletMemorySize, svc::ResultOutOfMemory());
-
- /* Disable interrupts and acquire the secure applet lock. */
- KScopedInterruptDisable di;
- KScopedSpinLock lk(g_secure_applet_lock);
-
- /* Check that memory is reserved for secure applet use. */
- MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null);
-
- /* Verify that the secure applet memory isn't already being used. */
- R_UNLESS(!g_secure_applet_memory_used, svc::ResultOutOfMemory());
-
- /* Return the secure applet memory. */
- g_secure_applet_memory_used = true;
- *out = g_secure_applet_memory_address;
-
- return ResultSuccess();
- }
-
- void FreeSecureMemoryForApplet(KVirtualAddress address, size_t size) {
- /* Disable interrupts and acquire the secure applet lock. */
- KScopedInterruptDisable di;
- KScopedSpinLock lk(g_secure_applet_lock);
-
- /* Verify that the memory being freed is correct. */
- MESOSPHERE_ABORT_UNLESS(address == g_secure_applet_memory_address);
- MESOSPHERE_ABORT_UNLESS(size <= SecureAppletMemorySize);
- MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
- MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_used);
-
- /* Release the secure applet memory. */
- g_secure_applet_memory_used = false;
- }
-
- void EnsureRandomGeneratorSeeded() {
- if (AMS_UNLIKELY(!g_initialized_random_generator)) {
- u64 seed = UINT64_C(0xF5F5F5F5F5F5F5F5);
- g_random_generator.Initialize(reinterpret_cast(std::addressof(seed)), sizeof(seed) / sizeof(u32));
- g_initialized_random_generator = true;
- }
- }
-
- }
-
- /* Initialization. */
- size_t KSystemControl::Init::GetIntendedMemorySize() {
- return 4_GB;
- }
-
- KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) {
- const size_t real_dram_size = GetRealMemorySizeForInit();
- const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
- if (intended_dram_size * 2 < real_dram_size) {
- return base_address;
- } else {
- return base_address + ((real_dram_size - intended_dram_size) / 2);
- }
- }
-
- void KSystemControl::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
- *out = {
- .address = GetInteger(GetKernelPhysicalBaseAddress(DramPhysicalAddress)) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
- ._08 = 0,
- };
- }
-
-
- bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
- return true;
- }
-
-
- size_t KSystemControl::Init::GetApplicationPoolSize() {
- /* Get the base pool size. */
- const size_t base_pool_size = 3285_MB;
-
- /* Return (possibly) adjusted size. */
- return base_pool_size;
- }
-
- size_t KSystemControl::Init::GetAppletPoolSize() {
- /* Get the base pool size. */
- const size_t base_pool_size = 507_MB;
-
- /* Return (possibly) adjusted size. */
- constexpr size_t ExtraSystemMemoryForAtmosphere = 40_MB;
- return base_pool_size - ExtraSystemMemoryForAtmosphere - KTraceBufferSize;
- }
-
- size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() {
- return 0x29C8000;
- }
-
- void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
- smc::init::CpuOn(core_id, entrypoint, arg);
- }
-
- /* Randomness for Initialization. */
- void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) {
- EnsureRandomGeneratorSeeded();
-
- u8 *dst_8 = static_cast(dst);
- while (size > 0) {
- const u64 random = GenerateRandomU64FromGenerator();
- std::memcpy(dst_8, std::addressof(random), std::min(size, sizeof(u64)));
- size -= std::min(size, sizeof(u64));
- }
- }
-
- u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) {
- EnsureRandomGeneratorSeeded();
-
- return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator);
- }
-
- /* System Initialization. */
- void KSystemControl::InitializePhase1() {
- /* Set IsDebugMode. */
- {
- KTargetSystem::SetIsDebugMode(true);
-
- /* If debug mode, we want to initialize uart logging. */
- KTargetSystem::EnableDebugLogging(true);
- KDebugLog::Initialize();
- }
-
- /* Set Kernel Configuration. */
- {
- KTargetSystem::EnableDebugMemoryFill(false);
- KTargetSystem::EnableUserExceptionHandlers(true);
- KTargetSystem::EnableDynamicResourceLimits(true);
- KTargetSystem::EnableUserPmuAccess(false);
- }
-
- /* Set Kernel Debugging. */
- {
- /* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */
- /* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */
- KTargetSystem::EnableKernelDebugging(true);
- }
-
- /* System ResourceLimit initialization. */
- {
- /* Construct the resource limit object. */
- KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
- KAutoObject::Create(std::addressof(sys_res_limit));
- sys_res_limit.Initialize();
-
- /* Set the initial limits. */
- const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
- const auto &slab_counts = init::GetSlabResourceCounts();
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
- MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
-
- /* Reserve system memory. */
- MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
- }
- }
-
- void KSystemControl::InitializePhase2() {
- /* Reserve secure applet memory. */
- if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
- MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address == Null);
- MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize));
-
- constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
- const KPhysicalAddress secure_applet_memory_phys_addr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption);
- MESOSPHERE_ABORT_UNLESS(secure_applet_memory_phys_addr != Null);
-
- g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr);
- }
-
- /* Initialize KTrace. */
- if constexpr (IsKTraceEnabled) {
- const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
- KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
- }
- }
-
- u32 KSystemControl::GetCreateProcessMemoryPool() {
- return KMemoryManager::Pool_Unsafe;
- }
-
- /* Privileged Access. */
- void KSystemControl::ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
- MESOSPHERE_UNUSED(out, address, mask, value);
- MESOSPHERE_UNIMPLEMENTED();
- }
-
- Result KSystemControl::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
- MESOSPHERE_UNUSED(out, address, mask, value);
- MESOSPHERE_UNIMPLEMENTED();
- }
-
- /* Randomness. */
- void KSystemControl::GenerateRandomBytes(void *dst, size_t size) {
- KScopedInterruptDisable intr_disable;
- KScopedSpinLock lk(g_random_lock);
-
- u8 *dst_8 = static_cast(dst);
- while (size > 0) {
- const u64 random = GenerateRandomU64FromGenerator();
- std::memcpy(dst_8, std::addressof(random), std::min(size, sizeof(u64)));
- size -= std::min(size, sizeof(u64));
- }
- }
-
- u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
- KScopedInterruptDisable intr_disable;
- KScopedSpinLock lk(g_random_lock);
-
- return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator);
- }
-
- u64 KSystemControl::GenerateRandomU64() {
- KScopedInterruptDisable intr_disable;
- KScopedSpinLock lk(g_random_lock);
-
- return GenerateRandomU64FromGenerator();
- }
-
- void KSystemControl::SleepSystem() {
- MESOSPHERE_LOG("SleepSystem() was called\n");
- MESOSPHERE_UNIMPLEMENTED();
- }
-
- void KSystemControl::StopSystem(void *arg) {
- MESOSPHERE_UNUSED(arg);
- AMS_INFINITE_LOOP();
- }
-
/* User access. */
void KSystemControl::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
- /* Get the function id for the current call. */
- u64 function_id = args->r[0];
-
- /* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
- auto &page_table = GetCurrentProcess().GetPageTable();
- auto *bim = page_table.GetBlockInfoManager();
-
- constexpr size_t MaxMappedRegisters = 7;
- std::array page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
-
- for (size_t i = 0; i < MaxMappedRegisters; i++) {
- const size_t reg_id = i + 1;
- if (function_id & (1ul << (8 + reg_id))) {
- /* Create and open a new page group for the address. */
- KVirtualAddress virt_addr = args->r[reg_id];
-
- if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
- /* Translate the virtual address to a physical address. */
- const auto it = page_groups[i].begin();
- MESOSPHERE_ASSERT(it != page_groups[i].end());
- MESOSPHERE_ASSERT(it->GetNumPages() == 1);
-
- args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
- } else {
- /* If we couldn't map, we should clear the address. */
- args->r[reg_id] = 0;
- }
- }
- }
-
/* Invoke the secure monitor. */
- smc::CallSecureMonitorFromUser(args);
-
- /* Make sure that we close any pages that we opened. */
- for (size_t i = 0; i < MaxMappedRegisters; i++) {
- page_groups[i].Close();
- }
- }
-
- /* Secure Memory. */
- size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
- if (pool == KMemoryManager::Pool_Applet) {
- return 0;
- }
- return size;
- }
-
- Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
- /* Applet secure memory is handled separately. */
- if (pool == KMemoryManager::Pool_Applet) {
- return AllocateSecureMemoryForApplet(out, size);
- }
-
- /* Ensure the size is aligned. */
- const size_t alignment = (pool == KMemoryManager::Pool_System ? PageSize : SecureAlignment);
- R_UNLESS(util::IsAligned(size, alignment), svc::ResultInvalidSize());
-
- /* Allocate the memory. */
- const size_t num_pages = size / PageSize;
- const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast(pool), KMemoryManager::Direction_FromFront));
- R_UNLESS(paddr != Null, svc::ResultOutOfMemory());
-
- /* Ensure we don't leak references to the memory on error. */
- auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(paddr, num_pages); };
-
- /* If the memory isn't already secure, set it as secure. */
- if (pool != KMemoryManager::Pool_System) {
- /* Set the secure region. */
- R_UNLESS(SetSecureRegion(paddr, size), svc::ResultOutOfMemory());
- }
-
- /* We succeeded. */
- mem_guard.Cancel();
- *out = KPageTable::GetHeapVirtualAddress(paddr);
- return ResultSuccess();
- }
-
- void KSystemControl::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
- /* Applet secure memory is handled separately. */
- if (pool == KMemoryManager::Pool_Applet) {
- return FreeSecureMemoryForApplet(address, size);
- }
-
- /* Ensure the size is aligned. */
- const size_t alignment = (pool == KMemoryManager::Pool_System ? PageSize : SecureAlignment);
- MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), alignment));
- MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, alignment));
-
- /* If the memory isn't secure system, reset the secure region. */
- if (pool != KMemoryManager::Pool_System) {
- /* Check that the size being freed is the current secure region size. */
- MESOSPHERE_ABORT_UNLESS(g_secure_region_size == size);
-
- /* Get the physical address. */
- const KPhysicalAddress paddr = KPageTable::GetHeapPhysicalAddress(address);
- MESOSPHERE_ABORT_UNLESS(paddr != Null);
-
- /* Check that the memory being freed is the current secure region. */
- MESOSPHERE_ABORT_UNLESS(paddr == g_secure_region_phys_addr);
-
- /* Free the secure region. */
- MESOSPHERE_ABORT_UNLESS(SetSecureRegion(paddr, 0));
- }
-
- /* Close the secure region's pages. */
- Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
+ return smc::CallSecureMonitorFromUser(args);
}
}
\ No newline at end of file
diff --git a/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.cpp b/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.cpp
index c4fa1c380..a97fd53b7 100644
--- a/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.cpp
+++ b/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.cpp
@@ -20,10 +20,6 @@ namespace ams::kern::board::qemu::virt::smc {
namespace {
- struct SecureMonitorArguments {
- u64 x[8];
- };
-
enum UserFunctionId : u32 {
UserFunctionId_SetConfig = 0xC3000401,
UserFunctionId_GetConfig = 0xC3000002,
@@ -44,102 +40,6 @@ namespace ams::kern::board::qemu::virt::smc {
UserFunctionId_PrepareEsCommonTitleKey = 0xC3000012,
};
- enum FunctionId : u32 {
- FunctionId_CpuSuspend = 0xC4000001,
- FunctionId_CpuOff = 0x84000002,
- FunctionId_CpuOn = 0xC4000003,
- };
-
- void CallPrivilegedSecureMonitorFunction(SecureMonitorArguments &args) {
- /* Load arguments into registers. */
- register u64 x0 asm("x0") = args.x[0];
- register u64 x1 asm("x1") = args.x[1];
- register u64 x2 asm("x2") = args.x[2];
- register u64 x3 asm("x3") = args.x[3];
- register u64 x4 asm("x4") = args.x[4];
- register u64 x5 asm("x5") = args.x[5];
- register u64 x6 asm("x6") = args.x[6];
- register u64 x7 asm("x7") = args.x[7];
-
- /* Actually make the call. */
- {
- /* Disable interrupts while making the call. */
- KScopedInterruptDisable intr_disable;
-
- {
- /* Backup the current thread pointer. */
- const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
-
- __asm__ __volatile__("smc #0"
- : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
- :
- : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
- );
-
- /* Restore the current thread pointer into X18. */
- cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
-
- /* Store arguments to output. */
- args.x[0] = x0;
- args.x[1] = x1;
- args.x[2] = x2;
- args.x[3] = x3;
- args.x[4] = x4;
- args.x[5] = x5;
- args.x[6] = x6;
- args.x[7] = x7;
- }
- }
- }
-
- void CallPrivilegedSecureMonitorFunctionForInit(SecureMonitorArguments &args) {
- /* Load arguments into registers. */
- register u64 x0 asm("x0") = args.x[0];
- register u64 x1 asm("x1") = args.x[1];
- register u64 x2 asm("x2") = args.x[2];
- register u64 x3 asm("x3") = args.x[3];
- register u64 x4 asm("x4") = args.x[4];
- register u64 x5 asm("x5") = args.x[5];
- register u64 x6 asm("x6") = args.x[6];
- register u64 x7 asm("x7") = args.x[7];
-
- /* Actually make the call. */
- __asm__ __volatile__("smc #0"
- : "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
- :
- : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
- );
-
- /* Store arguments to output. */
- args.x[0] = x0;
- args.x[1] = x1;
- args.x[2] = x2;
- args.x[3] = x3;
- args.x[4] = x4;
- args.x[5] = x5;
- args.x[6] = x6;
- args.x[7] = x7;
- }
-
- /* Global lock for generate random bytes. */
- KSpinLock g_generate_random_lock;
-
- }
-
- /* SMC functionality needed for init. */
- namespace init {
-
- void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
- SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg };
- CallPrivilegedSecureMonitorFunctionForInit(args);
- }
-
- }
-
- void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
- SecureMonitorArguments args = { FunctionId_CpuOn, core_id, static_cast(entrypoint), static_cast(arg) };
- CallPrivilegedSecureMonitorFunction(args);
- MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success));
}
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
diff --git a/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.hpp
index 869624007..8aeaec112 100644
--- a/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.hpp
+++ b/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.hpp
@@ -63,14 +63,6 @@ namespace ams::kern::board::qemu::virt::smc {
NotPermitted = 6,
};
- void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
-
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
- namespace init {
-
- void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
-
- }
-
}
\ No newline at end of file
diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp
index 8f20de0e7..a0b4d303f 100644
--- a/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp
+++ b/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp
@@ -19,7 +19,6 @@ namespace ams::kern {
namespace {
- constexpr uintptr_t DramPhysicalAddress = 0x80000000;
constexpr size_t ReservedEarlyDramSize = 0x60000;
constexpr size_t CarveoutAlignment = 0x20000;
@@ -100,7 +99,7 @@ namespace ams::kern {
void SetupDramPhysicalMemoryRegions() {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
- const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
+ const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress);
/* Insert blocks into the tree. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
@@ -173,16 +172,21 @@ namespace ams::kern {
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
- /* Insert the pool management region. */
+ /* Determine final total overhead size. */
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
- const uintptr_t pool_management_start = unsafe_system_pool_start - total_overhead_size;
+
+ /* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the four UserPool regions are contiguous. */
+
+ /* Insert the system pool. */
+ const uintptr_t system_pool_start = pool_partitions_start + total_overhead_size;
+ const size_t system_pool_size = unsafe_system_pool_start - system_pool_start;
+ InsertPoolPartitionRegionIntoBothTrees(system_pool_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
+
+ /* Insert the pool management region. */
+ const uintptr_t pool_management_start = pool_partitions_start;
const size_t pool_management_size = total_overhead_size;
u32 pool_management_attr = 0;
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
-
- /* Insert the system pool. */
- const uintptr_t system_pool_size = pool_management_start - pool_partitions_start;
- InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
} else {
/* On < 5.0.0, setup a legacy 2-pool layout for backwards compatibility. */
@@ -249,14 +253,18 @@ namespace ams::kern {
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
}
- /* Insert the secure pool. */
- InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, secure_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
-
- /* Insert the pool management region. */
+ /* Validate the true overhead size. */
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= approximate_total_overhead_size);
- const uintptr_t pool_management_start = pool_partitions_start + secure_pool_size;
- const size_t pool_management_size = unsafe_memory_start - pool_management_start;
+ /* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the UserPool regions are contiguous. */
+
+ /* Insert the secure pool. */
+ const uintptr_t secure_pool_start = unsafe_memory_start - secure_pool_size;
+ InsertPoolPartitionRegionIntoBothTrees(secure_pool_start, secure_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
+
+ /* Insert the pool management region. */
+ const uintptr_t pool_management_start = pool_partitions_start;
+ const size_t pool_management_size = secure_pool_start - pool_management_start;
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= pool_management_size);
u32 pool_management_attr = 0;
diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.board.qemu_virt.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.board.qemu_virt.cpp
index 578761d65..34631169a 100644
--- a/libraries/libmesosphere/source/kern_k_memory_layout.board.qemu_virt.cpp
+++ b/libraries/libmesosphere/source/kern_k_memory_layout.board.qemu_virt.cpp
@@ -19,12 +19,8 @@ namespace ams::kern {
namespace {
- constexpr uintptr_t DramPhysicalAddress = 0x40000000;
constexpr size_t ReservedEarlyDramSize = 0x00080000;
- constexpr size_t CarveoutAlignment = 0x20000;
- constexpr size_t CarveoutSizeMax = 512_MB - CarveoutAlignment;
-
template requires (std::same_as && ...)
constexpr ALWAYS_INLINE KMemoryRegionType GetMemoryRegionType(KMemoryRegionType base, T... attr) {
return util::FromUnderlying(util::ToUnderlying(base) | (util::ToUnderlying(attr) | ...));
@@ -32,6 +28,7 @@ namespace ams::kern {
void InsertPoolPartitionRegionIntoBothTrees(size_t start, size_t size, KMemoryRegionType phys_type, KMemoryRegionType virt_type, u32 &cur_attr) {
const u32 attr = cur_attr++;
+
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(start, size, phys_type, attr));
const KMemoryRegion *phys = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(phys_type, attr);
MESOSPHERE_INIT_ABORT_UNLESS(phys != nullptr);
@@ -50,7 +47,7 @@ namespace ams::kern {
void SetupDramPhysicalMemoryRegions() {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
- const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
+ const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress);
/* Insert blocks into the tree. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
@@ -76,9 +73,6 @@ namespace ams::kern {
const KMemoryRegion *kernel_dram_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramKernelBase);
MESOSPHERE_INIT_ABORT_UNLESS(kernel_dram_region != nullptr);
- const uintptr_t kernel_dram_start = kernel_dram_region->GetAddress();
- MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(kernel_dram_start, CarveoutAlignment));
-
/* Find the start of the pool partitions region. */
const KMemoryRegion *pool_partitions_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(KMemoryRegionType_DramPoolPartition, 0);
MESOSPHERE_INIT_ABORT_UNLESS(pool_partitions_region != nullptr);
@@ -93,43 +87,56 @@ namespace ams::kern {
/* Decide on starting addresses for our pools. */
const uintptr_t application_pool_start = pool_end - application_pool_size;
const uintptr_t applet_pool_start = application_pool_start - applet_pool_size;
- const uintptr_t unsafe_system_pool_start = std::min(kernel_dram_start + CarveoutSizeMax, util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment));
+ const uintptr_t unsafe_system_pool_start = util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, PageSize);
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;
/* We want to arrange application pool depending on where the middle of dram is. */
const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
u32 cur_pool_attr = 0;
size_t total_overhead_size = 0;
- if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
- InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
- total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
- } else {
- const size_t first_application_pool_size = dram_midpoint - application_pool_start;
- const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint;
- InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
- InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
- total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
- total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
+
+ /* Insert the application pool. */
+ if (application_pool_size > 0) {
+ if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
+ InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
+ total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
+ } else {
+ const size_t first_application_pool_size = dram_midpoint - application_pool_start;
+ const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint;
+ InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
+ InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
+ total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
+ total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
+ }
}
/* Insert the applet pool. */
- InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
- total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
+ if (applet_pool_size > 0) {
+ InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
+ total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
+ }
/* Insert the nonsecure system pool. */
- InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
- total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
+ if (unsafe_system_pool_size > 0) {
+ InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
+ total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
+ }
+
+ /* Determine final total overhead size. */
+ total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
+
+ /* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the four UserPool regions are contiguous. */
+
+ /* Insert the system pool. */
+ const uintptr_t system_pool_start = pool_partitions_start + total_overhead_size;
+ const size_t system_pool_size = unsafe_system_pool_start - system_pool_start;
+ InsertPoolPartitionRegionIntoBothTrees(system_pool_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
/* Insert the pool management region. */
- total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
- const uintptr_t pool_management_start = unsafe_system_pool_start - total_overhead_size;
+ const uintptr_t pool_management_start = pool_partitions_start;
const size_t pool_management_size = total_overhead_size;
u32 pool_management_attr = 0;
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
-
- /* Insert the system pool. */
- const uintptr_t system_pool_size = pool_management_start - pool_partitions_start;
- InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
}
}
diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp
index 4bfa6a158..5b2b509fe 100644
--- a/libraries/libmesosphere/source/kern_k_process.cpp
+++ b/libraries/libmesosphere/source/kern_k_process.cpp
@@ -238,7 +238,7 @@ namespace ams::kern {
}
/* Generate random entropy. */
- KSystemControl::GenerateRandomBytes(m_entropy, sizeof(m_entropy));
+ KSystemControl::GenerateRandom(m_entropy, util::size(m_entropy));
/* Clear remaining fields. */
m_num_running_threads = 0;
diff --git a/libraries/libmesosphere/source/kern_k_system_control_base.cpp b/libraries/libmesosphere/source/kern_k_system_control_base.cpp
new file mode 100644
index 000000000..a0cf3dfc0
--- /dev/null
+++ b/libraries/libmesosphere/source/kern_k_system_control_base.cpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#include
+#if defined(ATMOSPHERE_ARCH_ARM64)
+#include
+#endif
+
+namespace ams::kern {
+
+ /* Initialization. */
+ size_t KSystemControlBase::Init::GetRealMemorySize() {
+ return ams::kern::MainMemorySize;
+ }
+
+ size_t KSystemControlBase::Init::GetIntendedMemorySize() {
+ return ams::kern::MainMemorySize;
+ }
+
+ KPhysicalAddress KSystemControlBase::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
+ const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
+ const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
+ if (intended_dram_size * 2 < real_dram_size) {
+ return base_address;
+ } else {
+ return base_address + ((real_dram_size - intended_dram_size) / 2);
+ }
+ }
+
+ void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
+ *out = {
+ .address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
+ ._08 = 0,
+ };
+ }
+
+
+ bool KSystemControlBase::Init::ShouldIncreaseThreadResourceLimit() {
+ return true;
+ }
+
+
+ size_t KSystemControlBase::Init::GetApplicationPoolSize() {
+ return 0;
+ }
+
+ size_t KSystemControlBase::Init::GetAppletPoolSize() {
+ return 0;
+ }
+
+ size_t KSystemControlBase::Init::GetMinimumNonSecureSystemPoolSize() {
+ return 0;
+ }
+
+ u8 KSystemControlBase::Init::GetDebugLogUartPort() {
+ return 0;
+ }
+
+ void KSystemControlBase::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
+ #if defined(ATMOSPHERE_ARCH_ARM64)
+ MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0);
+ #else
+ AMS_INFINITE_LOOP();
+ #endif
+ }
+
+ /* Randomness for Initialization. */
+ void KSystemControlBase::Init::GenerateRandom(u64 *dst, size_t count) {
+ if (AMS_UNLIKELY(!s_initialized_random_generator)) {
+ const u64 seed = KHardwareTimer::GetTick();
+ s_random_generator.Initialize(reinterpret_cast(std::addressof(seed)), sizeof(seed) / sizeof(u32));
+ s_initialized_random_generator = true;
+ }
+
+ for (size_t i = 0; i < count; ++i) {
+ dst[i] = s_random_generator.GenerateRandomU64();
+ }
+ }
+
+ u64 KSystemControlBase::Init::GenerateRandomRange(u64 min, u64 max) {
+ if (AMS_UNLIKELY(!s_initialized_random_generator)) {
+ const u64 seed = KHardwareTimer::GetTick();
+ s_random_generator.Initialize(reinterpret_cast(std::addressof(seed)), sizeof(seed) / sizeof(u32));
+ s_initialized_random_generator = true;
+ }
+
+ return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
+ }
+
+ /* System Initialization. */
+ void KSystemControlBase::InitializePhase1(bool skip_target_system) {
+ /* Initialize the rng, if we somehow haven't already. */
+ if (AMS_UNLIKELY(!s_initialized_random_generator)) {
+ const u64 seed = KHardwareTimer::GetTick();
+ s_random_generator.Initialize(reinterpret_cast(std::addressof(seed)), sizeof(seed) / sizeof(u32));
+ s_initialized_random_generator = true;
+ }
+
+ /* Configure KTargetSystem, if we haven't already by an implementation SystemControl. */
+ if (!skip_target_system) {
+ /* Set IsDebugMode. */
+ {
+ KTargetSystem::SetIsDebugMode(true);
+
+ /* If debug mode, we want to initialize uart logging. */
+ KTargetSystem::EnableDebugLogging(true);
+ KDebugLog::Initialize();
+ }
+
+ /* Set Kernel Configuration. */
+ {
+ KTargetSystem::EnableDebugMemoryFill(false);
+ KTargetSystem::EnableUserExceptionHandlers(true);
+ KTargetSystem::EnableDynamicResourceLimits(true);
+ KTargetSystem::EnableUserPmuAccess(false);
+ }
+
+ /* Set Kernel Debugging. */
+ {
+ /* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */
+ /* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */
+ KTargetSystem::EnableKernelDebugging(true);
+ }
+ }
+
+ /* System ResourceLimit initialization. */
+ {
+ /* Construct the resource limit object. */
+ KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
+ KAutoObject::Create(std::addressof(sys_res_limit));
+ sys_res_limit.Initialize();
+
+ /* Set the initial limits. */
+ const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
+ const auto &slab_counts = init::GetSlabResourceCounts();
+ MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
+ MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
+ MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
+ MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
+ MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
+
+ /* Reserve system memory. */
+ MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
+ }
+ }
+
+ void KSystemControlBase::InitializePhase2() {
+ /* Initialize KTrace. */
+ if constexpr (IsKTraceEnabled) {
+ const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
+ KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
+ }
+ }
+
+ u32 KSystemControlBase::GetCreateProcessMemoryPool() {
+ return KMemoryManager::Pool_System;
+ }
+
+ /* Privileged Access. */
+ void KSystemControlBase::ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
+ /* TODO */
+ MESOSPHERE_UNUSED(out, address, mask, value);
+ MESOSPHERE_UNIMPLEMENTED();
+ }
+
+ Result KSystemControlBase::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
+ MESOSPHERE_UNUSED(out, address, mask, value);
+ return svc::ResultNotImplemented();
+ }
+
+ /* Randomness. */
+ void KSystemControlBase::GenerateRandom(u64 *dst, size_t count) {
+ KScopedInterruptDisable intr_disable;
+ KScopedSpinLock lk(s_random_lock);
+
+ for (size_t i = 0; i < count; ++i) {
+ dst[i] = s_random_generator.GenerateRandomU64();
+ }
+ }
+
+ u64 KSystemControlBase::GenerateRandomRange(u64 min, u64 max) {
+ KScopedInterruptDisable intr_disable;
+ KScopedSpinLock lk(s_random_lock);
+
+ return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
+ }
+
+ u64 KSystemControlBase::GenerateRandomU64() {
+ KScopedInterruptDisable intr_disable;
+ KScopedSpinLock lk(s_random_lock);
+
+ return s_random_generator.GenerateRandomU64();
+ }
+
+ void KSystemControlBase::SleepSystem() {
+ MESOSPHERE_LOG("SleepSystem() was called\n");
+ }
+
+ void KSystemControlBase::StopSystem(void *) {
+ MESOSPHERE_LOG("KSystemControlBase::StopSystem\n");
+ AMS_INFINITE_LOOP();
+ }
+
+ /* User access. */
+ #if defined(ATMOSPHERE_ARCH_ARM64)
+ void KSystemControlBase::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
+ /* Get the function id for the current call. */
+ u64 function_id = args->r[0];
+
+ /* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
+ auto &page_table = GetCurrentProcess().GetPageTable();
+ auto *bim = page_table.GetBlockInfoManager();
+
+ constexpr size_t MaxMappedRegisters = 7;
+ std::array page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
+
+ for (size_t i = 0; i < MaxMappedRegisters; i++) {
+ const size_t reg_id = i + 1;
+ if (function_id & (1ul << (8 + reg_id))) {
+ /* Create and open a new page group for the address. */
+ KVirtualAddress virt_addr = args->r[reg_id];
+
+ if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
+ /* Translate the virtual address to a physical address. */
+ const auto it = page_groups[i].begin();
+ MESOSPHERE_ASSERT(it != page_groups[i].end());
+ MESOSPHERE_ASSERT(it->GetNumPages() == 1);
+
+ args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
+ } else {
+ /* If we couldn't map, we should clear the address. */
+ args->r[reg_id] = 0;
+ }
+ }
+ }
+
+ /* Invoke the secure monitor. */
+ KSystemControl::CallSecureMonitorFromUserImpl(args);
+
+ /* Make sure that we close any pages that we opened. */
+ for (size_t i = 0; i < MaxMappedRegisters; i++) {
+ page_groups[i].Close();
+ }
+ }
+
+ void KSystemControlBase::CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
+ /* By default, we don't actually support secure monitor, so just set args to a failure code. */
+ args->r[0] = 1;
+ }
+ #endif
+
+ /* Secure Memory. */
+ size_t KSystemControlBase::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
+ MESOSPHERE_UNUSED(pool);
+ return size;
+ }
+
+ Result KSystemControlBase::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
+ /* Ensure the size is aligned. */
+ constexpr size_t Alignment = PageSize;
+ R_UNLESS(util::IsAligned(size, Alignment), svc::ResultInvalidSize());
+
+ /* Allocate the memory. */
+ const size_t num_pages = size / PageSize;
+ const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, Alignment / PageSize, KMemoryManager::EncodeOption(static_cast(pool), KMemoryManager::Direction_FromFront));
+ R_UNLESS(paddr != Null, svc::ResultOutOfMemory());
+
+ *out = KPageTable::GetHeapVirtualAddress(paddr);
+ return ResultSuccess();
+ }
+
+ void KSystemControlBase::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
+ /* Ensure the size is aligned. */
+ constexpr size_t Alignment = PageSize;
+ MESOSPHERE_UNUSED(pool);
+ MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), Alignment));
+ MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, Alignment));
+
+ /* Close the secure region's pages. */
+ Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
+ }
+
+}
\ No newline at end of file
diff --git a/libraries/libmesosphere/source/kern_kernel.cpp b/libraries/libmesosphere/source/kern_kernel.cpp
index 090d5028b..a6e9b15a5 100644
--- a/libraries/libmesosphere/source/kern_kernel.cpp
+++ b/libraries/libmesosphere/source/kern_kernel.cpp
@@ -139,14 +139,20 @@ namespace ams::kern {
PrintMemoryRegion(" InitPageTable", KMemoryLayout::GetKernelInitPageTableRegionPhysicalExtents());
PrintMemoryRegion(" MemoryPoolRegion", KMemoryLayout::GetKernelPoolPartitionRegionPhysicalExtents());
if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
- PrintMemoryRegion(" System", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
- PrintMemoryRegion(" SystemUnsafe", KMemoryLayout::GetKernelSystemNonSecurePoolRegionPhysicalExtents());
- PrintMemoryRegion(" Applet", KMemoryLayout::GetKernelAppletPoolRegionPhysicalExtents());
- PrintMemoryRegion(" Application", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
+ PrintMemoryRegion(" System", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
+ if (KMemoryLayout::HasKernelSystemNonSecurePoolRegion()) {
+ PrintMemoryRegion(" SystemUnsafe", KMemoryLayout::GetKernelSystemNonSecurePoolRegionPhysicalExtents());
+ }
+ if (KMemoryLayout::HasKernelAppletPoolRegion()) {
+ PrintMemoryRegion(" Applet", KMemoryLayout::GetKernelAppletPoolRegionPhysicalExtents());
+ }
+ if (KMemoryLayout::HasKernelApplicationPoolRegion()) {
+ PrintMemoryRegion(" Application", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
+ }
} else {
- PrintMemoryRegion(" Secure", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
+ PrintMemoryRegion(" Secure", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
PrintMemoryRegion(" Unsafe", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
}
if constexpr (IsKTraceEnabled) {
diff --git a/libraries/libmesosphere/source/svc/kern_svc_cache.cpp b/libraries/libmesosphere/source/svc/kern_svc_cache.cpp
index 07576c889..fe76b399b 100644
--- a/libraries/libmesosphere/source/svc/kern_svc_cache.cpp
+++ b/libraries/libmesosphere/source/svc/kern_svc_cache.cpp
@@ -36,7 +36,7 @@ namespace ams::kern::svc {
size_t remaining = size;
while (remaining > 0) {
/* Get a contiguous range to operate on. */
- KPageTableBase::MemoryRange contig_range = {};
+ KPageTableBase::MemoryRange contig_range = { .address = Null, .size = 0 };
R_TRY(page_table.OpenMemoryRangeForProcessCacheOperation(std::addressof(contig_range), cur_address, aligned_end - cur_address));
/* Close the range when we're done operating on it. */