diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp
index f155cebc1..135b6b5a6 100644
--- a/libraries/libmesosphere/include/mesosphere.hpp
+++ b/libraries/libmesosphere/include/mesosphere.hpp
@@ -36,3 +36,6 @@
/* Core functionality. */
#include "mesosphere/kern_select_interrupts.hpp"
#include "mesosphere/kern_select_k_system_control.hpp"
+
+/* Supervisor Calls. */
+#include "mesosphere/kern_svc.hpp"
diff --git a/libraries/libmesosphere/include/mesosphere/kern_svc.hpp b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp
new file mode 100644
index 000000000..6eaa75f9a
--- /dev/null
+++ b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include "svc/kern_svc_k_user_pointer.hpp"
+#include "svc/kern_svc_prototypes.hpp"
+#include "svc/kern_svc_tables.hpp"
diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp
new file mode 100644
index 000000000..368ff39dd
--- /dev/null
+++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include
+
+namespace ams::kern::svc {
+
+ /* TODO: Actually implement this type. */
+ template
+ struct KUserPointer : impl::KUserPointerTag {
+ public:
+ static_assert(std::is_pointer::value);
+ static constexpr bool IsInput = std::is_const::type>::value;
+ private:
+ T pointer;
+ };
+
+
+}
diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp
new file mode 100644
index 000000000..3412373ba
--- /dev/null
+++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include
+#include "kern_svc_k_user_pointer.hpp"
+
+namespace ams::kern::svc {
+
+ #define AMS_KERN_SVC_DECLARE_PROTOTYPE_64(ID, RETURN_TYPE, NAME, ...) \
+ RETURN_TYPE NAME##64(__VA_ARGS__);
+ #define AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32(ID, RETURN_TYPE, NAME, ...) \
+ RETURN_TYPE NAME##64From32(__VA_ARGS__);
+
+ AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_PROTOTYPE_64, lp64)
+ AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32, ilp32)
+
+ /* TODO: Support _32 ABI */
+
+ #undef AMS_KERN_SVC_DECLARE_PROTOTYPE_64
+ #undef AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32
+
+
+}
diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp
new file mode 100644
index 000000000..42b4a62bd
--- /dev/null
+++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include
+
+namespace ams::kern::svc {
+
+ static constexpr size_t NumSupervisorCalls = 0x80;
+ using SvcTableEntry = void (*)();
+
+ /* TODO: 32-bit ABI */
+
+ extern const std::array SvcTable64From32;
+ extern const std::array SvcTable64;
+
+}
diff --git a/libraries/libmesosphere/source/svc/kern_svc_tables.cpp b/libraries/libmesosphere/source/svc/kern_svc_tables.cpp
new file mode 100644
index 000000000..a2fd6a28d
--- /dev/null
+++ b/libraries/libmesosphere/source/svc/kern_svc_tables.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#include
+#include
+
+namespace ams::kern::svc {
+
+ namespace {
+
+ #define DECLARE_SVC_STRUCT(ID, RETURN_TYPE, NAME, ...) \
+ class NAME { \
+ private: \
+ using Impl = ::ams::svc::codegen::KernelSvcWrapper<::ams::kern::svc::NAME##64, ::ams::kern::svc::NAME##64From32>; \
+ public: \
+ static NOINLINE void Call64() { return Impl::Call64(); } \
+ static NOINLINE void Call64From32() { return Impl::Call64From32(); } \
+ };
+
+
+
+ /* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */
+ #pragma GCC push_options
+ #pragma GCC optimize ("omit-frame-pointer")
+
+ AMS_SVC_FOREACH_KERN_DEFINITION(DECLARE_SVC_STRUCT, _)
+
+ #pragma GCC pop_options
+
+ }
+
+ /* TODO: 32-bit ABI */
+ const std::array SvcTable64From32 = [] {
+ std::array table = {};
+
+ #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \
+ table[ID] = NAME::Call64From32;
+ AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _)
+ #undef AMS_KERN_SVC_SET_TABLE_ENTRY
+
+ return table;
+ }();
+
+ const std::array SvcTable64 = [] {
+ std::array table = {};
+
+ #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \
+ table[ID] = NAME::Call64;
+ AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _)
+ #undef AMS_KERN_SVC_SET_TABLE_ENTRY
+
+ return table;
+ }();
+
+}
diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp
new file mode 100644
index 000000000..6a13d93e0
--- /dev/null
+++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include "svc_codegen_impl_common.hpp"
+
+namespace ams::svc::codegen::impl {
+
+ #define SVC_CODEGEN_FOR_I_FROM_0_TO_64(HANDLER, ...) \
+ HANDLER( 0, ## __VA_ARGS__); HANDLER( 1, ## __VA_ARGS__); HANDLER( 2, ## __VA_ARGS__); HANDLER( 3, ## __VA_ARGS__); \
+ HANDLER( 4, ## __VA_ARGS__); HANDLER( 5, ## __VA_ARGS__); HANDLER( 6, ## __VA_ARGS__); HANDLER( 7, ## __VA_ARGS__); \
+ HANDLER( 8, ## __VA_ARGS__); HANDLER( 9, ## __VA_ARGS__); HANDLER(10, ## __VA_ARGS__); HANDLER(11, ## __VA_ARGS__); \
+ HANDLER(12, ## __VA_ARGS__); HANDLER(13, ## __VA_ARGS__); HANDLER(14, ## __VA_ARGS__); HANDLER(15, ## __VA_ARGS__); \
+ HANDLER(16, ## __VA_ARGS__); HANDLER(17, ## __VA_ARGS__); HANDLER(18, ## __VA_ARGS__); HANDLER(19, ## __VA_ARGS__); \
+ HANDLER(20, ## __VA_ARGS__); HANDLER(21, ## __VA_ARGS__); HANDLER(22, ## __VA_ARGS__); HANDLER(23, ## __VA_ARGS__); \
+ HANDLER(24, ## __VA_ARGS__); HANDLER(25, ## __VA_ARGS__); HANDLER(26, ## __VA_ARGS__); HANDLER(27, ## __VA_ARGS__); \
+ HANDLER(28, ## __VA_ARGS__); HANDLER(29, ## __VA_ARGS__); HANDLER(30, ## __VA_ARGS__); HANDLER(31, ## __VA_ARGS__); \
+ HANDLER(32, ## __VA_ARGS__); HANDLER(33, ## __VA_ARGS__); HANDLER(34, ## __VA_ARGS__); HANDLER(35, ## __VA_ARGS__); \
+ HANDLER(36, ## __VA_ARGS__); HANDLER(37, ## __VA_ARGS__); HANDLER(38, ## __VA_ARGS__); HANDLER(39, ## __VA_ARGS__); \
+ HANDLER(40, ## __VA_ARGS__); HANDLER(41, ## __VA_ARGS__); HANDLER(42, ## __VA_ARGS__); HANDLER(43, ## __VA_ARGS__); \
+ HANDLER(44, ## __VA_ARGS__); HANDLER(45, ## __VA_ARGS__); HANDLER(46, ## __VA_ARGS__); HANDLER(47, ## __VA_ARGS__); \
+ HANDLER(48, ## __VA_ARGS__); HANDLER(49, ## __VA_ARGS__); HANDLER(50, ## __VA_ARGS__); HANDLER(51, ## __VA_ARGS__); \
+ HANDLER(52, ## __VA_ARGS__); HANDLER(53, ## __VA_ARGS__); HANDLER(54, ## __VA_ARGS__); HANDLER(55, ## __VA_ARGS__); \
+ HANDLER(56, ## __VA_ARGS__); HANDLER(57, ## __VA_ARGS__); HANDLER(58, ## __VA_ARGS__); HANDLER(59, ## __VA_ARGS__); \
+ HANDLER(60, ## __VA_ARGS__); HANDLER(61, ## __VA_ARGS__); HANDLER(62, ## __VA_ARGS__); HANDLER(63, ## __VA_ARGS__);
+
+
+ class Aarch64CodeGenerator {
+ private:
+ struct RegisterPair {
+ size_t First;
+ size_t Second;
+ };
+
+ template
+ struct RegisterPairHelper;
+
+ template
+ struct RegisterPairHelper {
+ static constexpr size_t PairCount = 1 + RegisterPairHelper::PairCount;
+ static constexpr std::array Pairs = [] {
+ std::array pairs = {};
+ pairs[0] = RegisterPair{First, Second};
+ if constexpr (RegisterPairHelper::PairCount) {
+ for (size_t i = 0; i < RegisterPairHelper::PairCount; i++) {
+ pairs[1+i] = RegisterPairHelper::Pairs[i];
+ }
+ }
+ return pairs;
+ }();
+ };
+
+ template
+ struct RegisterPairHelper {
+ static constexpr size_t PairCount = 1;
+ static constexpr std::array Pairs = { RegisterPair{First, Second} };
+ };
+
+ template
+ struct RegisterPairHelper {
+ static constexpr size_t PairCount = 0;
+ static constexpr std::array Pairs = {};
+ };
+
+ template
+ static ALWAYS_INLINE void ClearRegister() {
+ __asm__ __volatile__("mov x%c[r], xzr" :: [r]"i"(Reg) : "memory");
+ }
+
+ template
+ static ALWAYS_INLINE void SaveRegister() {
+ __asm__ __volatile__("str x%c[r], [sp, -16]!" :: [r]"i"(Reg) : "memory");
+ }
+
+ template
+ static ALWAYS_INLINE void RestoreRegister() {
+ __asm__ __volatile__("ldr x%c[r], [sp], 16" :: [r]"i"(Reg) : "memory");
+ }
+
+ template
+ static ALWAYS_INLINE void SaveRegisterPair() {
+ __asm__ __volatile__("stp x%c[r0], x%c[r1], [sp, -16]!" :: [r0]"i"(Reg0), [r1]"i"(Reg1) : "memory");
+ }
+
+ template
+ static ALWAYS_INLINE void RestoreRegisterPair() {
+ __asm__ __volatile__("ldp x%c[r0], x%c[r1], [sp], 16" :: [r0]"i"(Reg0), [r1]"i"(Reg1) : "memory");
+ }
+
+ template
+ static ALWAYS_INLINE void SaveRegistersImpl() {
+ #define SVC_CODEGEN_HANDLER(n) \
+ do { if constexpr ((63 - n) < Pairs.size()) { SaveRegisterPair(); } } while (0)
+
+ if constexpr (sizeof...(Rest) % 2 == 1) {
+ /* Even number of registers. */
+ constexpr auto Pairs = RegisterPairHelper::Pairs;
+ static_assert(Pairs.size() <= 8);
+ SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER)
+ } else if constexpr (sizeof...(Rest) > 0) {
+ /* Odd number of registers. */
+ constexpr auto Pairs = RegisterPairHelper::Pairs;
+ static_assert(Pairs.size() <= 8);
+ SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER)
+
+ SaveRegister();
+ } else {
+ /* Only one register. */
+ SaveRegister();
+ }
+
+ #undef SVC_CODEGEN_HANDLER
+ }
+
+ template
+ static ALWAYS_INLINE void RestoreRegistersImpl() {
+ #define SVC_CODEGEN_HANDLER(n) \
+ do { if constexpr (n < Pairs.size()) { RestoreRegisterPair(); } } while (0)
+
+ if constexpr (sizeof...(Rest) % 2 == 1) {
+ /* Even number of registers. */
+ constexpr auto Pairs = RegisterPairHelper::Pairs;
+ static_assert(Pairs.size() <= 8);
+ SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER)
+ } else if constexpr (sizeof...(Rest) > 0) {
+ /* Odd number of registers. */
+ RestoreRegister();
+
+ constexpr auto Pairs = RegisterPairHelper::Pairs;
+ static_assert(Pairs.size() <= 8);
+ SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER)
+ } else {
+ /* Only one register. */
+ RestoreRegister();
+ }
+
+ #undef SVC_CODEGEN_HANDLER
+ }
+
+ public:
+ template
+ static ALWAYS_INLINE void SaveRegisters() {
+ if constexpr (sizeof...(Registers) > 0) {
+ SaveRegistersImpl();
+ }
+ }
+
+ template
+ static ALWAYS_INLINE void RestoreRegisters() {
+ if constexpr (sizeof...(Registers) > 0) {
+ RestoreRegistersImpl();
+ }
+ }
+
+ template
+ static ALWAYS_INLINE void ClearRegisters() {
+ static_assert(sizeof...(Registers) <= 8);
+ (ClearRegister(), ...);
+ }
+
+ template
+ static ALWAYS_INLINE void AllocateStackSpace() {
+ if constexpr (Size > 0) {
+ __asm__ __volatile__("sub sp, sp, %c[size]" :: [size]"i"(util::AlignUp(Size, 16)) : "memory");
+ }
+ }
+
+ template
+ static ALWAYS_INLINE void FreeStackSpace() {
+ if constexpr (Size > 0) {
+ __asm__ __volatile__("add sp, sp, %c[size]" :: [size]"i"(util::AlignUp(Size, 16)) : "memory");
+ }
+ }
+
+ template
+ static ALWAYS_INLINE void MoveRegister() {
+ __asm__ __volatile__("mov x%c[dst], x%c[src]" :: [dst]"i"(Dst), [src]"i"(Src) : "memory");
+ }
+
+ template
+ static ALWAYS_INLINE void LoadFromStack() {
+ if constexpr (Size == 4) {
+ __asm__ __volatile__("ldr w%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory");
+ } else if constexpr (Size == 8) {
+ __asm__ __volatile__("ldr x%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory");
+ } else {
+ static_assert(Size != Size);
+ }
+ }
+
+ template
+ static ALWAYS_INLINE void LoadPairFromStack() {
+ if constexpr (Size == 4) {
+ __asm__ __volatile__("ldp w%c[r0], w%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory");
+ } else if constexpr (Size == 8) {
+ __asm__ __volatile__("ldp x%c[r0], x%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory");
+ } else {
+ static_assert(Size != Size);
+ }
+ }
+
+ template
+ static ALWAYS_INLINE void StoreToStack() {
+ if constexpr (Size == 4) {
+ __asm__ __volatile__("str w%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory");
+ } else if constexpr (Size == 8) {
+ __asm__ __volatile__("str x%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory");
+ } else {
+ static_assert(Size != Size);
+ }
+ }
+
+ template
+ static ALWAYS_INLINE void StorePairToStack() {
+ if constexpr (Size == 4) {
+ __asm__ __volatile__("stp w%c[r0], w%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory");
+ } else if constexpr (Size == 8) {
+ __asm__ __volatile__("stp x%c[r0], x%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory");
+ } else {
+ static_assert(Size != Size);
+ }
+ }
+
+ template
+ static ALWAYS_INLINE void Pack() {
+ __asm__ __volatile__("orr x%c[dst], x%c[low], x%c[high], lsl #32" :: [dst]"i"(Dst), [low]"i"(Low), [high]"i"(High) : "memory");
+ }
+
+ template
+ static ALWAYS_INLINE void Unpack() {
+ if constexpr (Src != Low) {
+ MoveRegister();
+ }
+
+ __asm__ __volatile__("lsr x%c[high], x%c[src], #32" :: [high]"i"(High), [src]"i"(Src) : "memory");
+ }
+
+ template
+ static ALWAYS_INLINE void LoadStackAddress() {
+ if constexpr (Offset > 0) {
+ __asm__ __volatile__("add x%c[dst], sp, %c[offset]" :: [dst]"i"(Dst), [offset]"i"(Offset) : "memory");
+ } else if constexpr (Offset == 0) {
+ __asm__ __volatile__("mov x%c[dst], sp" :: [dst]"i"(Dst) : "memory");
+ }
+ }
+ };
+
+ class Aarch32CodeGenerator {
+ /* TODO */
+ };
+
+ template
+ static ALWAYS_INLINE void GenerateCodeForMetaCode(MetaCodeHolder) {
+ constexpr auto MetaCode = UNWRAP_TEMPLATE_CONSTANT(MetaCodeHolder);
+ constexpr size_t NumOperations = MetaCode.GetNumOperations();
+ static_assert(NumOperations <= 64);
+ #define SVC_CODEGEN_HANDLER(n) do { if constexpr (n < NumOperations) { constexpr auto Operation = MetaCode.GetOperation(n); GenerateCodeForOperation(WRAP_TEMPLATE_CONSTANT(Operation)); } } while (0)
+ SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER)
+ #undef SVC_CODEGEN_HANDLER
+ }
+
+ #undef SVC_CODEGEN_FOR_I_FROM_0_TO_64
+
+}
\ No newline at end of file
diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp
new file mode 100644
index 000000000..c87b4e7c3
--- /dev/null
+++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+
+namespace ams::svc::codegen::impl {
+
+ template
+ constexpr inline bool IsIntegral = std::is_integral::value;
+
+ template<>
+ constexpr inline bool IsIntegral<::ams::svc::Address> = true;
+
+ template<>
+ constexpr inline bool IsIntegral<::ams::svc::Size> = true;
+
+ template
+ constexpr inline bool IsKUserPointer = std::is_base_of::value;
+
+ template
+ constexpr inline bool IsIntegralOrUserPointer = IsIntegral || IsUserPointer || IsKUserPointer;
+
+ template
+ constexpr std::index_sequence IndexSequenceCat(std::index_sequence, std::index_sequence) {
+ return std::index_sequence{};
+ }
+
+ template
+ constexpr inline std::array ConvertToArray(std::index_sequence) {
+ return std::array{ Is... };
+ }
+
+ template
+ class FunctionTraits {
+ private:
+ template
+ static R GetReturnTypeImpl(R(*)(A...));
+
+ template
+ static std::tuple GetArgsImpl(R(*)(A...));
+ public:
+ using ReturnType = decltype(GetReturnTypeImpl(Function));
+ using ArgsType = decltype(GetArgsImpl(Function));
+ };
+
+ enum class CodeGenerationKind {
+ SvcInvocationToKernelProcedure,
+ PrepareForKernelProcedureToSvcInvocation,
+ KernelProcedureToSvcInvocation,
+ Invalid,
+ };
+
+ enum class ArgumentType {
+ In,
+ Out,
+ InUserPointer,
+ OutUserPointer,
+ Invalid,
+ };
+
+ template
+ constexpr inline ArgumentType GetArgumentType = [] {
+ static_assert(!std::is_reference::value, "SVC ABI: Reference types not allowed.");
+ static_assert(sizeof(T) <= sizeof(uint64_t), "SVC ABI: Type too large");
+ if constexpr (std::is_pointer::value) {
+ static_assert(!std::is_const::type>::value, "SVC ABI: Output (T*) must not be const");
+ return ArgumentType::Out;
+ } else if constexpr (IsUserPointer || IsKUserPointer) {
+ if constexpr (T::IsInput) {
+ return ArgumentType::InUserPointer;
+ } else {
+ return ArgumentType::OutUserPointer;
+ }
+ } else {
+ return ArgumentType::In;
+ }
+ }();
+
+ template
+ struct AbiType {
+ static constexpr size_t RegisterSize = RS;
+ static constexpr size_t RegisterCount = RC;
+ static constexpr size_t ArgumentRegisterCount = ARC;
+ static constexpr size_t PointerSize = PC;
+
+ template
+ static constexpr size_t GetSize() {
+ if constexpr (std::is_same::value || std::is_same::value || IsUserPointer || IsKUserPointer) {
+ return PointerSize;
+ } else if constexpr(std::is_pointer::value) {
+ /* Out parameter. */
+ return GetSize::type>();
+ } else if constexpr (std::is_same::value) {
+ return 0;
+ } else {
+ return sizeof(T);
+ }
+ }
+
+ template
+ static constexpr inline size_t Size = GetSize();
+ };
+
+ using Aarch64Lp64Abi = AbiType<8, 8, 8, 8>;
+ using Aarch64Ilp32Abi = AbiType<8, 8, 8, 4>;
+ using Aarch32Ilp32Abi = AbiType<4, 4, 4, 4>;
+
+ using Aarch64SvcInvokeAbi = AbiType<8, 8, 8, 8>;
+ using Aarch32SvcInvokeAbi = AbiType<4, 8, 4, 4>;
+
+ struct Abi {
+ size_t register_size;
+ size_t register_count;
+ size_t pointer_size;
+
+ template
+ static constexpr Abi Convert() { return { AbiType::RegisterSize, AbiType::RegisterCount, AbiType::PointerSize }; }
+ };
+
+ template
+ constexpr inline bool IsPassedByPointer = [] {
+ if (GetArgumentType != ArgumentType::In) {
+ return true;
+ }
+
+ return (!IsIntegral && AbiType::template Size > AbiType::RegisterSize);
+ }();
+
+ template
+ class RegisterAllocator {
+ private:
+ std::array map;
+ public:
+ constexpr explicit RegisterAllocator() : map() { /* ... */ }
+
+ constexpr bool IsAllocated(size_t i) const { return this->map[i]; }
+ constexpr bool IsFree(size_t i) const { return !this->IsAllocated(i); }
+
+ constexpr void Allocate(size_t i) {
+ if (this->IsAllocated(i)) {
+ std::abort();
+ }
+
+ this->map[i] = true;
+ }
+
+ constexpr bool TryAllocate(size_t i) {
+ if (this->IsAllocated(i)) {
+ return false;
+ }
+
+ this->map[i] = true;
+ return true;
+ }
+
+ constexpr size_t AllocateFirstFree() {
+ for (size_t i = 0; i < N; i++) {
+ if (!this->IsAllocated(i)) {
+ this->map[i] = true;
+ return i;
+ }
+ }
+
+ std::abort();
+ }
+
+ constexpr void Free(size_t i) {
+ if (!this->IsAllocated(i)) {
+ std::abort();
+ }
+
+ this->map[i] = false;
+ }
+
+ constexpr size_t GetRegisterCount() const {
+ return N;
+ }
+ };
+
+
+}
\ No newline at end of file
diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp
new file mode 100644
index 000000000..3fffe60fa
--- /dev/null
+++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include "svc_codegen_impl_common.hpp"
+#include "svc_codegen_impl_parameter.hpp"
+#include "svc_codegen_impl_layout.hpp"
+#include "svc_codegen_impl_meta_code.hpp"
+#include "svc_codegen_impl_layout_conversion.hpp"
+#include "svc_codegen_impl_code_generator.hpp"
+
+namespace ams::svc::codegen::impl {
+
+ template
+ class KernelSvcWrapperHelperImpl;
+
+ template
+ class KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, ReturnType, std::tuple> {
+ private:
+ static constexpr bool TryToPerformCoalescingOptimizations = true;
+
+ template
+ static constexpr void CoalesceOperations(MetaCodeGenerator &out_mcg, const std::array stack_modified, size_t stack_top) {
+ enum class State { WaitingForRegister, ParsingRegister, ParsedRegister, EmittingCode };
+ State cur_state = State::WaitingForRegister;
+ size_t num_regs = 0;
+ size_t registers[2] = { InvalidRegisterId, InvalidRegisterId };
+ size_t widths[2] = {};
+ size_t index = 0;
+ size_t store_base = 0;
+ while (index < stack_top) {
+ if (cur_state == State::WaitingForRegister) {
+ while (stack_modified[index] == InvalidRegisterId && index < stack_top) {
+ index++;
+ }
+ cur_state = State::ParsingRegister;
+ } else if (cur_state == State::ParsingRegister) {
+ const size_t start_index = index;
+ if (num_regs == 0) {
+ store_base = start_index;
+ }
+ const size_t reg = stack_modified[index];
+ registers[num_regs] = reg;
+ while (index < stack_top && index < start_index + KernelAbiType::RegisterSize && stack_modified[index] == reg) {
+ widths[num_regs]++;
+ index++;
+ }
+ num_regs++;
+ cur_state = State::ParsedRegister;
+ } else if (cur_state == State::ParsedRegister) {
+ if (num_regs == 2 || stack_modified[index] == InvalidRegisterId) {
+ cur_state = State::EmittingCode;
+ } else {
+ cur_state = State::ParsingRegister;
+ }
+ } else if (cur_state == State::EmittingCode) {
+ /* Emit an operation! */
+ MetaCode::Operation st_op = {};
+
+ if (num_regs == 2) {
+ if (registers[0] == registers[1]) {
+ std::abort();
+ }
+ if (widths[0] == widths[1]) {
+ st_op.kind = PairKind;
+ st_op.num_parameters = 4;
+ st_op.parameters[0] = registers[0];
+ st_op.parameters[1] = registers[1];
+ st_op.parameters[2] = store_base;
+ st_op.parameters[3] = widths[0];
+ } else {
+ std::abort();
+ }
+ } else if (num_regs == 1) {
+ st_op.kind = SingleKind;
+ st_op.num_parameters = 3;
+ st_op.parameters[0] = registers[0];
+ st_op.parameters[1] = store_base;
+ st_op.parameters[2] = widths[0];
+ } else {
+ std::abort();
+ }
+
+ out_mcg.AddOperationDirectly(st_op);
+
+ /* Go back to beginning of parse. */
+ for (size_t i = 0; i < num_regs; i++) {
+ registers[i] = InvalidRegisterId;
+ widths[i] = 0;
+ }
+ num_regs = 0;
+ cur_state = State::WaitingForRegister;
+ } else {
+ std::abort();
+ }
+ }
+
+ if (cur_state == State::ParsedRegister) {
+ /* Emit an operation! */
+ if (num_regs == 2 && widths[0] == widths[1]) {
+ MetaCode::Operation st_op = {};
+ st_op.kind = PairKind;
+ st_op.num_parameters = 4;
+ st_op.parameters[0] = registers[0];
+ st_op.parameters[1] = registers[1];
+ st_op.parameters[2] = store_base;
+ st_op.parameters[3] = widths[0];
+ out_mcg.AddOperationDirectly(st_op);
+ } else {
+ for (size_t i = 0; i < num_regs; i++) {
+ MetaCode::Operation st_op = {};
+ st_op.kind = SingleKind;
+ st_op.num_parameters = 3;
+ st_op.parameters[0] = registers[i];
+ st_op.parameters[1] = store_base;
+ st_op.parameters[2] = widths[i];
+
+ store_base += widths[i];
+ out_mcg.AddOperationDirectly(st_op);
+ }
+ }
+ }
+ }
+
+ /* Basic optimization of store coalescing. */
+ template
+ static constexpr bool TryPrepareForKernelProcedureToSvcInvocationCoalescing(std::tuple, MetaCodeGenerator &out_mcg, RegisterAllocator &out_allocator) {
+ /* For debugging, allow ourselves to disable these optimizations. */
+ if constexpr (!TryToPerformCoalescingOptimizations) {
+ return false;
+ }
+
+ /* Generate expected code. */
+ MetaCodeGenerator mcg;
+ RegisterAllocator allocator = out_allocator;
+ (Conversion::template GenerateCode(mcg, allocator), ...);
+ MetaCode mc = mcg.GetMetaCode();
+
+ /* This is a naive optimization pass. */
+ /* We want to reorder code of the form: */
+ /* - Store to Stack sequence 0... */
+ /* - Load Stack Address 0 */
+ /* - Store to Stack 1... */
+ /* - Load Stack Address 1 */
+ /* Into the form: */
+ /* - Store to stack Sequence 0 + 1... */
+ /* - Load Stack Address 0 + 1... */
+ /* But only if they are semantically equivalent. */
+
+ /* We'll do a simple, naive pass to check if any registers are stored to stack that are modified. */
+ /* This shouldn't happen in any cases we care about, so we can probably get away with it. */
+ /* TODO: Eventually this should be e.g. operation.ModifiesRegister() / operation.CanReorderBefore() */
+ /* However, this will be more work, and if it's not necessary it can be put off until it is. */
+ constexpr size_t MaxStackIndex = 0x100;
+ constexpr size_t InvalidRegisterId = N;
+ bool register_modified[N] = {};
+ std::array stack_address_loaded = {};
+ for (size_t i = 0; i < N; i++) { stack_address_loaded[i] = MaxStackIndex; }
+ std::array stack_modified = {};
+ for (size_t i = 0; i < MaxStackIndex; i++) { stack_modified[i] = InvalidRegisterId; }
+ size_t stack_top = 0;
+ for (size_t i = 0; i < mc.GetNumOperations(); i++) {
+ const auto mco = mc.GetOperation(i);
+ if (mco.kind == MetaCode::OperationKind::StoreToStack) {
+ if (register_modified[mco.parameters[0]]) {
+ return false;
+ }
+ const size_t offset = mco.parameters[1];
+ const size_t width = mco.parameters[2] == 0 ? KernelAbiType::RegisterSize : mco.parameters[2];
+ for (size_t j = 0; j < width; j++) {
+ const size_t index = offset + j;
+ if (index >= MaxStackIndex) {
+ std::abort();
+ }
+ if (stack_modified[index] != InvalidRegisterId) {
+ return false;
+ }
+ stack_modified[index] = mco.parameters[0];
+ stack_top = std::max(index + 1, stack_top);
+ }
+ } else if (mco.kind == MetaCode::OperationKind::LoadStackAddress) {
+ if (stack_address_loaded[mco.parameters[0]] != MaxStackIndex) {
+ return false;
+ }
+ if (register_modified[mco.parameters[0]]) {
+ return false;
+ }
+ if (mco.parameters[1] >= MaxStackIndex) {
+ std::abort();
+ }
+ stack_address_loaded[mco.parameters[0]] = mco.parameters[1];
+ register_modified[mco.parameters[0]] = true;
+ } else {
+ /* TODO: Better operation reasoning process. */
+ return false;
+ }
+ }
+
+ /* Looks like we can reorder! */
+ /* Okay, let's do this the naive way, too. */
+ constexpr auto PairKind = MetaCode::OperationKind::StorePairToStack;
+ constexpr auto SingleKind = MetaCode::OperationKind::StoreToStack;
+ CoalesceOperations(out_mcg, stack_modified, stack_top);
+ for (size_t i = 0; i < N; i++) {
+ if (stack_address_loaded[i] != MaxStackIndex) {
+ MetaCode::Operation load_op = {};
+ load_op.kind = MetaCode::OperationKind::LoadStackAddress;
+ load_op.num_parameters = 2;
+ load_op.parameters[0] = i;
+ load_op.parameters[1] = stack_address_loaded[i];
+ out_mcg.AddOperationDirectly(load_op);
+ }
+ }
+
+ /* Ensure the out allocator state is correct. */
+ out_allocator = allocator;
+
+ return true;
+ }
+
+ /* Basic optimization of load coalescing. */
+ template
+ static constexpr bool TryKernelProcedureToSvcInvocationCoalescing(std::tuple, MetaCodeGenerator &out_mcg, RegisterAllocator &out_allocator) {
+ /* For debugging, allow ourselves to disable these optimizations. */
+ if constexpr (!TryToPerformCoalescingOptimizations) {
+ return false;
+ }
+
+ /* Generate expected code. */
+ MetaCodeGenerator mcg;
+ RegisterAllocator allocator = out_allocator;
+ (Conversion::template GenerateCode(mcg, allocator), ...);
+ MetaCode mc = mcg.GetMetaCode();
+
+ /* This is a naive optimization pass. */
+ /* We want to coalesce all sequential stack loads, if possible. */
+ /* But only if they are semantically equivalent. */
+
+ /* We'll do a simple, naive pass to check if any registers are used after being loaded from stack that. */
+ /* This shouldn't happen in any cases we care about, so we can probably get away with it. */
+ /* TODO: Eventually this should be e.g. operation.ModifiesRegister() / operation.CanReorderBefore() */
+ /* However, this will be more work, and if it's not necessary it can be put off until it is. */
+ constexpr size_t MaxStackIndex = 0x100;
+ constexpr size_t InvalidRegisterId = N;
+ bool register_modified[N] = {};
+ std::array stack_offset_loaded = {};
+ for (size_t i = 0; i < N; i++) { stack_offset_loaded[i] = MaxStackIndex; }
+ std::array stack_modified = {};
+ for (size_t i = 0; i < MaxStackIndex; i++) { stack_modified[i] = InvalidRegisterId; }
+ size_t stack_top = 0;
+ for (size_t i = 0; i < mc.GetNumOperations(); i++) {
+ const auto mco = mc.GetOperation(i);
+ if (mco.kind == MetaCode::OperationKind::Unpack) {
+ if (register_modified[mco.parameters[0]] || register_modified[mco.parameters[1]] || register_modified[mco.parameters[2]]) {
+ return false;
+ }
+ register_modified[mco.parameters[0]] = true;
+ register_modified[mco.parameters[1]] = true;
+ } else if (mco.kind == MetaCode::OperationKind::LoadFromStack) {
+ if (stack_offset_loaded[mco.parameters[0]] != MaxStackIndex) {
+ return false;
+ }
+ if (register_modified[mco.parameters[0]] != false) {
+ return false;
+ }
+ if (mco.parameters[1] >= MaxStackIndex) {
+ std::abort();
+ }
+ stack_offset_loaded[mco.parameters[0]] = mco.parameters[1];
+ register_modified[mco.parameters[0]] = true;
+
+ const size_t offset = mco.parameters[1];
+ const size_t width = mco.parameters[2] == 0 ? KernelAbiType::RegisterSize : mco.parameters[2];
+ for (size_t j = 0; j < width; j++) {
+ const size_t index = offset + j;
+ if (index >= MaxStackIndex) {
+ std::abort();
+ }
+ if (stack_modified[index] != InvalidRegisterId) {
+ return false;
+ }
+ stack_modified[index] = mco.parameters[0];
+ stack_top = std::max(index + 1, stack_top);
+ }
+ } else {
+ /* TODO: Better operation reasoning process. */
+ return false;
+ }
+ }
+
+ /* Any operations that don't load from stack, we can just re-add. */
+ for (size_t i = 0; i < mc.GetNumOperations(); i++) {
+ const auto mco = mc.GetOperation(i);
+ if (mco.kind != MetaCode::OperationKind::LoadFromStack) {
+ out_mcg.AddOperationDirectly(mco);
+ }
+ }
+ constexpr auto PairKind = MetaCode::OperationKind::LoadPairFromStack;
+ constexpr auto SingleKind = MetaCode::OperationKind::LoadFromStack;
+ CoalesceOperations(out_mcg, stack_modified, stack_top);
+
+ /* Ensure the out allocator state is correct. */
+ out_allocator = allocator;
+
+ return true;
+ }
+
+ template
+ struct TypeIndexFilter {
+ template
+ static constexpr auto GetFilteredTupleImpl(UseArrayHolder, std::tuple, std::index_sequence) {
+ constexpr auto UseArray = UNWRAP_TEMPLATE_CONSTANT(UseArrayHolder);
+ static_assert(sizeof...(TailType) == sizeof...(TailIndex));
+ static_assert(HeadIndex <= UseArray.size());
+
+ if constexpr (sizeof...(TailType) == 0) {
+ if constexpr (!UseArray[HeadIndex]) {
+ return std::tuple{};
+ } else {
+ return std::tuple<>{};
+ }
+ } else {
+ auto tail_tuple = GetFilteredTupleImpl(UseArrayHolder{}, std::tuple{}, std::index_sequence{});
+ if constexpr (!UseArray[HeadIndex]) {
+ return std::tuple_cat(std::tuple{}, tail_tuple);
+ } else {
+ return std::tuple_cat(std::tuple<>{}, tail_tuple);
+ }
+ }
+ }
+
+ template
+ static constexpr auto GetFilteredTuple(UseArrayHolder) {
+ return GetFilteredTupleImpl(UseArrayHolder{}, std::tuple{}, std::make_index_sequence());
+ }
+ };
+
+ template
+ static constexpr auto GetModifiedOperations(AllocatorHolder, std::tuple ops) {
+ constexpr size_t ModifyRegister = [] {
+ auto allocator = UNWRAP_TEMPLATE_CONSTANT(AllocatorHolder);
+ return allocator.AllocateFirstFree();
+ }();
+
+ using ModifiedFirstOperation = typename FirstOperation::template ModifiedType;
+ using NewMoveOperation = typename LayoutConversionBase::template OperationMove;
+ return std::tuple{};
+ }
+
+ template
+ static constexpr auto GenerateBeforeOperations(MetaCodeGenerator &mcg, AllocatorHolder, std::tuple ops) -> RegisterAllocator {
+ constexpr size_t NumOperations = 1 + sizeof...(OtherOperations);
+ using OperationsTuple = decltype(ops);
+ using FilterHelper = TypeIndexFilter;
+
+ constexpr auto ProcessOperation = [](MetaCodeGenerator &pr_mcg, auto &allocator, Operation) {
+ if (Conversion::template CanGenerateCode(allocator)) {
+ Conversion::template GenerateCode(pr_mcg, allocator);
+ return true;
+ }
+ return false;
+ };
+
+ constexpr auto ProcessResults = [ProcessOperation](std::tuple) {
+ auto allocator = UNWRAP_TEMPLATE_CONSTANT(AllocatorHolder);
+ MetaCodeGenerator pr_mcg;
+ auto use_array = std::array{ ProcessOperation(pr_mcg, allocator, Operations{})... };
+ return std::make_tuple(use_array, allocator, pr_mcg);
+ }(OperationsTuple{});
+
+ constexpr auto CanGenerate = std::get<0>(ProcessResults);
+ constexpr auto AfterAllocator = std::get<1>(ProcessResults);
+ constexpr auto GeneratedCode = std::get<2>(ProcessResults).GetMetaCode();
+
+ for (size_t i = 0; i < GeneratedCode.GetNumOperations(); i++) {
+ mcg.AddOperationDirectly(GeneratedCode.GetOperation(i));
+ }
+
+ constexpr auto FilteredOperations = FilterHelper::template GetFilteredTuple(WRAP_TEMPLATE_CONSTANT(CanGenerate));
+ static_assert(std::tuple_size::value <= NumOperations);
+ if constexpr (std::tuple_size::value > 0) {
+ if constexpr (std::tuple_size::value != NumOperations) {
+ return GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(AfterAllocator), FilteredOperations);
+ } else {
+ /* No progress was made, so we need to make a change. */
+ constexpr auto ModifiedOperations = GetModifiedOperations(WRAP_TEMPLATE_CONSTANT(AfterAllocator), FilteredOperations);
+ return GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(AfterAllocator), ModifiedOperations);
+ }
+ } else {
+ return AfterAllocator;
+ }
+ }
+
+ static constexpr MetaCode GenerateOriginalBeforeMetaCode() {
+ MetaCodeGenerator mcg;
+ RegisterAllocator allocator;
+ static_assert(SvcAbiType::RegisterCount == KernelAbiType::RegisterCount);
+
+ /* Reserve registers used by the input layout. */
+ constexpr auto InitialAllocator = [] {
+ RegisterAllocator initial_allocator;
+ for (size_t i = 0; i < SvcAbiType::RegisterCount; i++) {
+ if (Conversion::LayoutForSvc.GetInputLayout().UsesRegister(i)) {
+ initial_allocator.Allocate(i);
+ }
+ }
+ return initial_allocator;
+ }();
+
+ /* Save every register that needs to be preserved to the stack. */
+ if constexpr (Conversion::NumPreserveRegisters > 0) {
+ [&mcg](std::index_sequence) {
+ mcg.template SaveRegisters();
+ }(typename Conversion::PreserveRegisters{});
+ }
+
+ /* Allocate space on the stack for parameters that need it. */
+ if constexpr (UsedStackSpace > 0) {
+ mcg.template AllocateStackSpace();
+ }
+
+ /* Generate code for before operations. */
+ if constexpr (Conversion::NumBeforeOperations > 0) {
+ allocator = GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(InitialAllocator), typename Conversion::BeforeOperations{});
+ } else {
+ allocator = InitialAllocator;
+ }
+
+ /* Generate code for after operations. */
+ if constexpr (Conversion::NumAfterOperations > 0) {
+ if (!TryPrepareForKernelProcedureToSvcInvocationCoalescing(typename Conversion::AfterOperations{}, mcg, allocator)) {
+ /* We're not eligible for the straightforward optimization. */
+ [&mcg, &allocator](std::index_sequence) {
+ (Conversion::template GenerateCode::type, CodeGenerationKind::PrepareForKernelProcedureToSvcInvocation>(mcg, allocator), ...);
+ }(std::make_index_sequence());
+ }
+ }
+
+ return mcg.GetMetaCode();
+ }
+ public:
+ using SvcAbiType = _SvcAbiType;
+ using UserAbiType = _UserAbiType;
+ using KernelAbiType = _KernelAbiType;
+
+ using Conversion = LayoutConversion;
+
+ static constexpr size_t UsedStackSpace = Conversion::NonAbiUsedStackIndices * KernelAbiType::RegisterSize;
+
+ static constexpr MetaCode OriginalBeforeMetaCode = [] {
+ return GenerateOriginalBeforeMetaCode();
+ }();
+
+ static constexpr MetaCode OriginalAfterMetaCode = [] {
+ MetaCodeGenerator mcg;
+ RegisterAllocator allocator;
+ static_assert(SvcAbiType::RegisterCount == KernelAbiType::RegisterCount);
+
+ /* Generate code for after operations. */
+ if constexpr (Conversion::NumAfterOperations > 0) {
+ if (!TryKernelProcedureToSvcInvocationCoalescing(typename Conversion::AfterOperations{}, mcg, allocator)) {
+ [&mcg, &allocator](std::index_sequence) {
+ (Conversion::template GenerateCode::type, CodeGenerationKind::KernelProcedureToSvcInvocation>(mcg, allocator), ...);
+ }(std::make_index_sequence());
+ }
+ }
+
+ /* Allocate space on the stack for parameters that need it. */
+ if constexpr (UsedStackSpace > 0) {
+ mcg.template FreeStackSpace();
+ }
+
+ if constexpr (Conversion::NumClearRegisters > 0) {
+ [&mcg](std::index_sequence) {
+ mcg.template ClearRegisters();
+ }(typename Conversion::ClearRegisters{});
+ }
+
+ /* Restore registers we previously saved to the stack. */
+ if constexpr (Conversion::NumPreserveRegisters > 0) {
+ [&mcg](std::index_sequence) {
+ mcg.template RestoreRegisters();
+ }(typename Conversion::PreserveRegisters{});
+ }
+
+ return mcg.GetMetaCode();
+ }();
+
+ /* TODO: Implement meta code optimization via separate layer. */
+ /* Right now some basic optimizations are just implemented by the above generators. */
+ static constexpr MetaCode OptimizedBeforeMetaCode = OriginalBeforeMetaCode;
+ static constexpr MetaCode OptimizedAfterMetaCode = OriginalAfterMetaCode;
+ };
+
+ template
+ class KernelSvcWrapperHelper {
+ private:
+ using Traits = FunctionTraits;
+ public:
+ using Impl = KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, typename Traits::ReturnType, typename Traits::ArgsType>;
+
+ static constexpr bool IsAarch64Kernel = std::is_same<_KernelAbiType, Aarch64Lp64Abi>::value;
+ static constexpr bool IsAarch32Kernel = std::is_same<_KernelAbiType, Aarch32Ilp32Abi>::value;
+ static_assert(IsAarch64Kernel || IsAarch32Kernel);
+
+ using CodeGenerator = typename std::conditional::type;
+
+ static constexpr auto BeforeMetaCode = Impl::OptimizedBeforeMetaCode;
+ static constexpr auto AfterMetaCode = Impl::OptimizedAfterMetaCode;
+
+
+/* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */
+#pragma GCC push_options
+#pragma GCC optimize ("omit-frame-pointer")
+
+ static ALWAYS_INLINE void WrapSvcFunction() {
+ /* Generate appropriate assembly. */
+ GenerateCodeForMetaCode(WRAP_TEMPLATE_CONSTANT(BeforeMetaCode));
+ ON_SCOPE_EXIT { GenerateCodeForMetaCode(WRAP_TEMPLATE_CONSTANT(AfterMetaCode)); };
+
+ return reinterpret_cast(Function)();
+ }
+
+#pragma GCC pop_options
+ };
+
+
+}
\ No newline at end of file
diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp
new file mode 100644
index 000000000..132b13ae1
--- /dev/null
+++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include "svc_codegen_impl_common.hpp"
+#include "svc_codegen_impl_parameter.hpp"
+
+namespace ams::svc::codegen::impl {
+
+ class ParameterLayout {
+ public:
+ static constexpr size_t MaxParameters = 8;
+ private:
+ static constexpr size_t InvalidIndex = std::numeric_limits::max();
+ private:
+ /* ABI parameters. */
+ Abi abi;
+
+ /* Parameter storage. */
+ size_t num_parameters;
+ Parameter parameters[MaxParameters];
+ public:
+ constexpr explicit ParameterLayout(Abi a)
+ : abi(a), num_parameters(0), parameters()
+ { /* ... */ }
+
+ constexpr void AddSingle(Parameter::Identifier id, ArgumentType type, size_t ts, size_t ps, bool p, Storage s, size_t idx) {
+ for (size_t i = 0; i < this->num_parameters; i++) {
+ if (this->parameters[i].Is(id)) {
+ this->parameters[i].AddLocation(Location(s, idx));
+ return;
+ }
+ }
+ this->parameters[this->num_parameters++] = Parameter(id, type, ts, ps, p, Location(s, idx));
+ }
+
+ constexpr size_t Add(Parameter::Identifier id, ArgumentType type, size_t ts, size_t ps, bool p, Storage s, size_t i) {
+ size_t required_registers = 0;
+
+ while (required_registers * this->abi.register_size < ps) {
+ this->AddSingle(id, type, ts, ps, p, s, i++);
+ required_registers++;
+ }
+
+ return required_registers;
+ }
+
+ constexpr bool UsesLocation(Location l) const {
+ for (size_t i = 0; i < this->num_parameters; i++) {
+ if (this->parameters[i].UsesLocation(l)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ constexpr bool UsesRegister(size_t i) const {
+ return this->UsesLocation(Location(Storage::Register, i));
+ }
+
+ constexpr bool IsRegisterFree(size_t i) const {
+ return !(this->UsesRegister(i));
+ }
+
+ constexpr size_t GetNumParameters() const {
+ return this->num_parameters;
+ }
+
+ constexpr Parameter GetParameter(size_t i) const {
+ return this->parameters[i];
+ }
+
+ constexpr bool HasParameter(Parameter::Identifier id) const {
+ for (size_t i = 0; i < this->num_parameters; i++) {
+ if (this->parameters[i].Is(id)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ constexpr Parameter GetParameter(Parameter::Identifier id) const {
+ for (size_t i = 0; i < this->num_parameters; i++) {
+ if (this->parameters[i].Is(id)) {
+ return this->parameters[i];
+ }
+ }
+ std::abort();
+ }
+ };
+
+ class ProcedureLayout {
+ private:
+ Abi abi;
+ ParameterLayout input;
+ ParameterLayout output;
+ private:
+ template
+ constexpr void ProcessArgument(size_t i, size_t &NGRN, size_t &NSAA) {
+ /* We currently don't implement support for floating point types. */
+ static_assert(!std::is_floating_point::value);
+ static_assert(!std::is_same::value);
+
+ constexpr size_t ArgumentTypeSize = AbiType::template Size;
+ constexpr bool PassedByPointer = IsPassedByPointer;
+ constexpr size_t ArgumentPassSize = PassedByPointer ? AbiType::PointerSize : ArgumentTypeSize;
+
+ /* TODO: Is there ever a case where this is not the correct alignment? */
+ constexpr size_t ArgumentAlignment = ArgumentPassSize;
+
+ /* Ensure NGRN is aligned. */
+ if constexpr (ArgumentAlignment > AbiType::RegisterSize) {
+ NGRN += (NGRN & 1);
+ }
+
+ /* TODO: We don't support splitting arguments between registers and stack, but AAPCS32 does. */
+ /* Is this a problem? Nintendo seems to not ever do this. */
+
+ auto id = Parameter::Identifier("FunctionParameter", i);
+
+ /* Allocate integral types specially per aapcs. */
+ constexpr ArgumentType Type = GetArgumentType;
+ const size_t registers_available = AbiType::RegisterCount - NGRN;
+ if constexpr (!PassedByPointer && IsIntegralOrUserPointer && ArgumentTypeSize > AbiType::RegisterSize) {
+ if (registers_available >= 2) {
+ this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Register, NGRN);
+ NGRN += 2;
+ } else {
+ /* Argument went on stack, so stop allocating arguments in registers. */
+ NGRN = AbiType::RegisterCount;
+
+ NSAA += (NSAA & 1);
+ this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Stack, NSAA);
+ NSAA += 2;
+ }
+ } else {
+ if (ArgumentPassSize <= AbiType::RegisterSize * registers_available) {
+ NGRN += this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Register, NGRN);
+ } else {
+ /* Argument went on stack, so stop allocating arguments in registers. */
+ NGRN = AbiType::RegisterCount;
+
+ /* TODO: Stack pointer alignment is only ensured for aapcs64. */
+ /* What should we do here? */
+
+ NSAA += this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Stack, NSAA);
+ }
+ }
+ }
+ public:
+ constexpr explicit ProcedureLayout(Abi a) : abi(a), input(a), output(a) { /* ... */ }
+
+ template
+ static constexpr ProcedureLayout Create() {
+ ProcedureLayout layout(Abi::Convert());
+
+ /* 1. The Next General-purpose Register Number (NGRN) is set to zero. */
+ [[maybe_unused]] size_t NGRN = 0;
+
+ /* 2. The next stacked argument address (NSAA) is set to the current stack-pointer value (SP). */
+ [[maybe_unused]] size_t NSAA = 0; /* Should be considered an offset from stack pointer. */
+
+ /* 3. Handle the return type. */
+ /* TODO: It's unclear how to handle the non-integral and too-large case. */
+ if constexpr (!std::is_same::value) {
+ constexpr size_t ReturnTypeSize = AbiType::template Size;
+ layout.output.Add(Parameter::Identifier("ReturnType"), ArgumentType::Invalid, ReturnTypeSize, ReturnTypeSize, false, Storage::Register, 0);
+ static_assert(IsIntegral || ReturnTypeSize <= AbiType::RegisterSize);
+ }
+
+ /* Process all arguments, in order. */
+ size_t i = 0;
+ (layout.ProcessArgument(i++, NGRN, NSAA), ...);
+
+ return layout;
+ }
+
+ constexpr ParameterLayout GetInputLayout() const {
+ return this->input;
+ }
+
+ constexpr ParameterLayout GetOutputLayout() const {
+ return this->output;
+ }
+
+ constexpr Parameter GetParameter(Parameter::Identifier id) const {
+ if (this->input.HasParameter(id)) {
+ return this->input.GetParameter(id);
+ } else {
+ return this->output.GetParameter(id);
+ }
+ }
+ };
+
+ class SvcInvocationLayout {
+ private:
+ Abi abi;
+ ParameterLayout input;
+ ParameterLayout output;
+ private:
+ template
+ constexpr void ForEachInputArgument(ParameterLayout param_layout, F f) {
+ /* We want to iterate over the parameters in sorted order. */
+ std::array map = {};
+ const size_t num_parameters = param_layout.GetNumParameters();
+ for (size_t i = 0; i < num_parameters; i++) {
+ map[i] = i;
+ }
+ for (size_t i = 1; i < num_parameters; i++) {
+ for (size_t j = i; j > 0 && param_layout.GetParameter(map[j-1]).GetLocation(0) > param_layout.GetParameter(map[j]).GetLocation(0); j--) {
+ /* std::swap is not constexpr until c++20 :( */
+ /* TODO: std::swap(map[j], map[j-1]); */
+ const size_t tmp = map[j];
+ map[j] = map[j-1];
+ map[j-1] = tmp;
+ }
+ }
+
+ for (size_t i = 0; i < param_layout.GetNumParameters(); i++) {
+ const auto Parameter = param_layout.GetParameter(map[i]);
+ if (Parameter.GetArgumentType() == ArgumentType::In && !Parameter.IsPassedByPointer()) {
+ f(Parameter);
+ }
+ }
+ for (size_t i = 0; i < param_layout.GetNumParameters(); i++) {
+ const auto Parameter = param_layout.GetParameter(map[i]);
+ if (Parameter.GetArgumentType() == ArgumentType::InUserPointer) {
+ f(Parameter);
+ }
+ }
+ for (size_t i = 0; i < param_layout.GetNumParameters(); i++) {
+ const auto Parameter = param_layout.GetParameter(map[i]);
+ if (Parameter.GetArgumentType() == ArgumentType::OutUserPointer) {
+ f(Parameter);
+ }
+ }
+ }
+
+ template
+ constexpr void ForEachInputPointerArgument(ParameterLayout param_layout, F f) {
+ for (size_t i = 0; i < param_layout.GetNumParameters(); i++) {
+ const auto Parameter = param_layout.GetParameter(i);
+ if (Parameter.GetArgumentType() == ArgumentType::In && Parameter.IsPassedByPointer()) {
+ f(Parameter);
+ }
+ }
+ }
+
+ template
+ constexpr void ForEachOutputArgument(ParameterLayout param_layout, F f) {
+ for (size_t i = 0; i < param_layout.GetNumParameters(); i++) {
+ const auto Parameter = param_layout.GetParameter(i);
+ if (Parameter.GetArgumentType() == ArgumentType::Out) {
+ f(Parameter);
+ }
+ }
+ }
+
+ template
+ static constexpr void AddRegisterParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) {
+ for (size_t i = 0; i < param.GetNumLocations(); i++) {
+ const auto location = param.GetLocation(i);
+ if (location.GetStorage() == Storage::Register) {
+ reg_allocator.Allocate(location.GetIndex());
+ dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), param.GetTypeSize(), param.GetPassedSize(), param.IsPassedByPointer(), Storage::Register, location.GetIndex());
+ }
+ }
+ }
+
+ template
+ static constexpr void AddStackParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) {
+ for (size_t i = 0; i < param.GetNumLocations(); i++) {
+ const auto location = param.GetLocation(i);
+ if (location.GetStorage() == Storage::Stack) {
+ const size_t free_reg = reg_allocator.AllocateFirstFree();
+ dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), param.GetTypeSize(), param.GetPassedSize(), param.IsPassedByPointer(), Storage::Register, free_reg);
+ }
+ }
+ }
+
+ template
+ static constexpr void AddIndirectParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) {
+ const size_t type_size = param.GetTypeSize();
+ for (size_t sz = 0; sz < type_size; sz += AbiType::RegisterSize) {
+ const size_t free_reg = reg_allocator.AllocateFirstFree();
+ dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), type_size, type_size, false, Storage::Register, free_reg);
+ }
+ }
+ public:
+ constexpr explicit SvcInvocationLayout(Abi a) : abi(a), input(a), output(a) { /* ... */ }
+
+ template
+ static constexpr SvcInvocationLayout Create(ProcedureLayout procedure_layout) {
+ SvcInvocationLayout layout(Abi::Convert());
+ RegisterAllocator input_register_allocator, output_register_allocator;
+
+ /* Input first wants to map in register -> register */
+ layout.ForEachInputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) {
+ AddRegisterParameter(layout.input, input_register_allocator, parameter);
+ });
+
+ /* And then input wants to map in stack -> stack */
+ layout.ForEachInputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) {
+ AddStackParameter(layout.input, input_register_allocator, parameter);
+ });
+
+ /* And then input wants to map in indirects -> register */
+ layout.ForEachInputPointerArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) {
+ AddIndirectParameter(layout.input, input_register_allocator, parameter);
+ });
+
+ /* Handle the return type. */
+ if (procedure_layout.GetOutputLayout().GetNumParameters() > 0) {
+ if (procedure_layout.GetOutputLayout().GetNumParameters() != 1) {
+ std::abort();
+ }
+ const auto return_param = procedure_layout.GetOutputLayout().GetParameter(0);
+ if (return_param.GetIdentifier() != Parameter::Identifier("ReturnType")) {
+ std::abort();
+ }
+ AddRegisterParameter(layout.output, output_register_allocator, return_param);
+ }
+
+ /* Handle other outputs. */
+ layout.ForEachOutputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) {
+ AddIndirectParameter(layout.output, output_register_allocator, parameter);
+ });
+
+ return layout;
+ }
+
+ constexpr ParameterLayout GetInputLayout() const {
+ return this->input;
+ }
+
+ constexpr ParameterLayout GetOutputLayout() const {
+ return this->output;
+ }
+ };
+
+
+}
\ No newline at end of file
diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp
new file mode 100644
index 000000000..2e3d95775
--- /dev/null
+++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2018-2019 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+#pragma once
+#include "svc_codegen_impl_common.hpp"
+#include "svc_codegen_impl_parameter.hpp"
+#include "svc_codegen_impl_layout.hpp"
+#include "svc_codegen_impl_meta_code.hpp"
+
+namespace ams::svc::codegen::impl {
+
+ class LayoutConversionBase {
+ public:
+ enum class OperationKind {
+ Move,
+ LoadAndStore,
+ PackAndUnpack,
+ Scatter,
+ Invalid,
+ };
+
+ class OperationMoveImpl;
+ class OperationLoadAndStoreImpl;
+ class OperationPackAndUnpackImpl;
+ class OperationScatterImpl;
+
+ class OperationBase{};
+
+ template
+ class Operation : public OperationBase {
+ public:
+ static constexpr OperationKind Kind = _Kind;
+ static constexpr size_t RegisterSize = RS;
+ static constexpr size_t PassedSize = PS;
+ static constexpr size_t StackOffset = SO;
+ static constexpr size_t ProcedureIndex = PIdx;
+
+ static constexpr size_t NumSvcIndices = sizeof...(SIdx);
+ static constexpr std::array SvcIndices = { SIdx... };
+ static constexpr std::index_sequence SvcIndexSequence = {};
+
+ template
+ static constexpr size_t SvcIndex = SvcIndices[I];
+
+ template
+ static void ForEachSvcIndex(F f) {
+ (f(SIdx), ...);
+ }
+
+ using ImplType = typename std::conditional::type>::type>::type>::type;
+
+ template
+ using ModifiedType = Operation;
+ };
+
+ template
+ using OperationMove = Operation;
+
+ template
+ using OperationLoadAndStore = Operation;
+
+ template
+ using OperationPackAndUnpack = Operation;
+
+ template
+ using OperationScatter = Operation;
+
+ class OperationMoveImpl {
+ public:
+ template
+ static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) {
+ static_assert(Operation::Kind == OperationKind::Move);
+ allocator.Free(Operation::template SvcIndex<0>);
+ return allocator.TryAllocate(Operation::ProcedureIndex);
+ }
+
+ template
+ static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) {
+ static_assert(Operation::Kind == OperationKind::Move);
+ allocator.Free(Operation::template SvcIndex<0>);
+ allocator.Allocate(Operation::ProcedureIndex);
+ mcg.template MoveRegister>();
+ }
+ };
+
+ class OperationLoadAndStoreImpl {
+ public:
+ template
+ static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) {
+ static_assert(Operation::Kind == OperationKind::LoadAndStore);
+ allocator.Free(Operation::template SvcIndex<0>);
+ return true;
+ }
+
+ template
+ static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) {
+ static_assert(Operation::Kind == OperationKind::LoadAndStore);
+ allocator.Free(Operation::template SvcIndex<0>);
+ constexpr size_t StackOffset = Operation::ProcedureIndex * Operation::RegisterSize;
+ mcg.template StoreToStack, StackOffset>();
+ }
+ };
+
+ class OperationPackAndUnpackImpl {
+ public:
+ template
+ static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) {
+ static_assert(Operation::Kind == OperationKind::PackAndUnpack);
+ allocator.Free(Operation::template SvcIndex<0>);
+ allocator.Free(Operation::template SvcIndex<1>);
+ return allocator.TryAllocate(Operation::ProcedureIndex);
+ }
+
+ template
+ static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) {
+ static_assert(Operation::Kind == OperationKind::PackAndUnpack);
+ allocator.Free(Operation::template SvcIndex<0>);
+ allocator.Free(Operation::template SvcIndex<1>);
+ allocator.Allocate(Operation::ProcedureIndex);
+ mcg.template Pack, Operation::template SvcIndex<1>>();
+ }
+
+ template