diff --git a/thermosphere/src/cpu/hvisor_cpu_instructions.hpp b/thermosphere/src/cpu/hvisor_cpu_instructions.hpp
new file mode 100644
index 000000000..6a454f113
--- /dev/null
+++ b/thermosphere/src/cpu/hvisor_cpu_instructions.hpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019-2020 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#pragma once
+
+#include "../defines.hpp"
+
+#define _ASM_ARITHMETIC_UNARY_HELPER(sz, regalloc, op) ({\
+ u##sz res;\
+ __asm__ __volatile__ (STRINGIZE(op) " %" STRINGIZE(regalloc) "[res], %" STRINGIZE(regalloc) "[val]" : [res] "=r" (res) : [val] "r" (val));\
+ res;\
+})
+
+#define DECLARE_SINGLE_ASM_INSN2(name, what) static inline void name() { __asm__ __volatile__ (what ::: "memory"); }
+#define DECLARE_SINGLE_ASM_INSN(name) static inline void name() { __asm__ __volatile__ (STRINGIZE(name) ::: "memory"); }
+
+namespace ams::hvisor::cpu {
+
+
+ template
+ ALWAYS_INLINE static T rbit(T val)
+ {
+ static_assert(std::is_integral_v && (sizeof(T) == 8 || sizeof(T) == 4));
+ if constexpr (sizeof(T) == 8) {
+ return _ASM_ARITHMETIC_UNARY_HELPER(64, x, rbit);
+ } else {
+ return _ASM_ARITHMETIC_UNARY_HELPER(32, w, rbit);
+ }
+ }
+
+ DECLARE_SINGLE_ASM_INSN(wfi)
+ DECLARE_SINGLE_ASM_INSN(wfe)
+ DECLARE_SINGLE_ASM_INSN(sevl)
+ DECLARE_SINGLE_ASM_INSN(sev)
+ DECLARE_SINGLE_ASM_INSN2(dmb, "dmb ish")
+ DECLARE_SINGLE_ASM_INSN2(dmbSy, "dmb sy")
+ DECLARE_SINGLE_ASM_INSN2(dsb, "dsb ish")
+ DECLARE_SINGLE_ASM_INSN2(dsbSy, "dsb sy")
+ DECLARE_SINGLE_ASM_INSN(isb)
+
+ DECLARE_SINGLE_ASM_INSN2(TlbInvalidateEl2, "tlbi alle2is")
+ DECLARE_SINGLE_ASM_INSN2(TlbInvalidateEl1, "tlbi vmalle1is")
+ DECLARE_SINGLE_ASM_INSN2(TlbInvalidateEl1Stage12, "tlbi alle1is")
+
+ ALWAYS_INLINE static void TlbInvalidateEl2Page(uintptr_t addr)
+ {
+ __asm__ __volatile__ ("tlbi vae2is, %0" :: "r"(addr) : "memory");
+ }
+
+}
+
+#undef DECLARE_SINGLE_ASM_INSN
+#undef DECLARE_SINGLE_ASM_INSN2
+#undef _ASM_ARITHMETIC_UNARY_HELPER
diff --git a/thermosphere/src/defines.hpp b/thermosphere/src/defines.hpp
index 05aa8b135..d715c0142 100644
--- a/thermosphere/src/defines.hpp
+++ b/thermosphere/src/defines.hpp
@@ -24,4 +24,5 @@
#include
#include
+#include "preprocessor.h"
#include "debug_log.h"
diff --git a/thermosphere/src/hvisor_synchronization.cpp b/thermosphere/src/hvisor_synchronization.cpp
new file mode 100644
index 000000000..84d302317
--- /dev/null
+++ b/thermosphere/src/hvisor_synchronization.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2019-2020 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#include "hvisor_synchronization.hpp"
+#include "core_ctx.h"
+
+namespace ams::hvisor {
+
+ void Spinlock::lock()
+ {
+ u32 tmp1;
+ const u32 tmp2 = 1;
+ __asm__ __volatile__(
+ "prfm pstl1keep, %[val] \n"
+ "sevl \n"
+ "1: \n"
+ " wfe \n"
+ " 2: \n"
+ " ldaxr %[tmp1], %[val] \n"
+ " cbnz %[tmp1], 1b \n"
+ " stxr %[tmp1], %[tmp2], %[val] \n"
+ " cbnz %[tmp1], 2b \n"
+ : [tmp1] "=&r"(tmp1), [val] "+Q" (m_val)
+ : [tmp2] "r"(tmp2)
+ : "cc", "memory"
+ );
+ }
+
+ void Spinlock::unlock() noexcept
+ {
+ __asm__ __volatile__("stlr wzr, %[val]" : [val] "=Q" (m_val) :: "memory");
+ }
+
+ void Barrier::Join()
+ {
+ const u32 mask = BIT(currentCoreCtx->coreId);
+ u32 newval, tmp;
+ __asm__ __volatile__(
+ "prfm pstl1keep, %[val] \n"
+
+ /* Fetch-and */
+ "1: \n"
+ " ldaxr %[newval], %[val] \n"
+ " bic %[newval], %[newval], %[mask] \n"
+ " stlxr %[tmp], %[newval], %[val] \n"
+ " cbnz %[tmp], 1b \n"
+
+ /* Check if now/already 0, wait if not */
+ "cbz %[newval], 3f \n"
+
+ /* Event will be signaled if the stlxr succeeds for another core... */
+ "2: \n"
+ " wfe \n"
+ " ldaxr %[newval], %[val] \n"
+ " cbnz %[newval], 2b \n"
+ "3: \n"
+ : [newval] "=&r"(newval), [tmp] "=&r" (tmp), [val] "+Q" (m_val)
+ : [mask] "r"(mask)
+ : "cc", "memory"
+ );
+ }
+
+ void RecursiveSpinlock::lock()
+ {
+ u32 tag = currentCoreCtx->coreId + 1;
+ if (AMS_LIKELY(tag != m_tag)) {
+ m_spinlock.lock();
+ m_tag = tag;
+ m_count = 1;
+ } else {
+ ++m_count;
+ }
+ }
+
+ void RecursiveSpinlock::unlock() noexcept
+ {
+ if (AMS_LIKELY(--m_count == 0)) {
+ m_tag = 0;
+ m_spinlock.unlock();
+ }
+ }
+}
diff --git a/thermosphere/src/hvisor_synchronization.hpp b/thermosphere/src/hvisor_synchronization.hpp
new file mode 100644
index 000000000..d87ee207d
--- /dev/null
+++ b/thermosphere/src/hvisor_synchronization.hpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019-2020 Atmosphère-NX
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+#pragma once
+
+#include "defines.hpp"
+
+
+namespace ams::hvisor {
+
+ class Spinlock final {
+ NON_COPYABLE(Spinlock);
+ NON_MOVEABLE(Spinlock);
+ private:
+ u32 m_val = 0;
+ public:
+ Spinlock() = default;
+ void lock();
+ void unlock() noexcept;
+ };
+
+ class Barrier final {
+ NON_COPYABLE(Barrier);
+ NON_MOVEABLE(Barrier);
+ private:
+ u32 m_val = 0;
+ public:
+ Barrier() = default;
+ void Join();
+
+ constexpr void Reset(u32 val)
+ {
+ m_val = val;
+ }
+ };
+
+ class RecursiveSpinlock final {
+ NON_COPYABLE(RecursiveSpinlock);
+ NON_MOVEABLE(RecursiveSpinlock);
+ private:
+ Spinlock m_spinlock{};
+ u32 m_tag = 0;
+ u32 m_count = 0;
+ public:
+ RecursiveSpinlock() = default;
+ void lock();
+ void unlock() noexcept;
+ };
+
+}