mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-19 08:52:25 +00:00
kern: use new AtomicRef, use Atomic<bool>
This commit is contained in:
parent
aed9d3f535
commit
20716cb3de
7 changed files with 334 additions and 160 deletions
|
@ -39,13 +39,13 @@ namespace ams::kern {
|
||||||
static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
|
static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
|
||||||
|
|
||||||
struct SchedulingState {
|
struct SchedulingState {
|
||||||
util::Atomic<u8> needs_scheduling{false};
|
util::Atomic<bool> needs_scheduling{false};
|
||||||
bool interrupt_task_runnable{false};
|
bool interrupt_task_runnable{false};
|
||||||
bool should_count_idle{false};
|
bool should_count_idle{false};
|
||||||
u64 idle_count{0};
|
u64 idle_count{0};
|
||||||
KThread *highest_priority_thread{nullptr};
|
KThread *highest_priority_thread{nullptr};
|
||||||
void *idle_thread_stack{nullptr};
|
void *idle_thread_stack{nullptr};
|
||||||
util::Atomic<KThread *> prev_thread{nullptr};
|
KThread *prev_thread{nullptr};
|
||||||
KInterruptTaskManager *interrupt_task_manager{nullptr};
|
KInterruptTaskManager *interrupt_task_manager{nullptr};
|
||||||
|
|
||||||
constexpr SchedulingState() = default;
|
constexpr SchedulingState() = default;
|
||||||
|
@ -100,7 +100,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KThread *GetPreviousThread() const {
|
ALWAYS_INLINE KThread *GetPreviousThread() const {
|
||||||
return m_state.prev_thread.Load<std::memory_order_relaxed>();
|
return m_state.prev_thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const {
|
ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const {
|
||||||
|
|
|
@ -76,18 +76,20 @@ namespace ams::kern {
|
||||||
NON_MOVEABLE(KSlabHeapBase);
|
NON_MOVEABLE(KSlabHeapBase);
|
||||||
private:
|
private:
|
||||||
size_t m_obj_size{};
|
size_t m_obj_size{};
|
||||||
util::Atomic<uintptr_t> m_peak{0};
|
uintptr_t m_peak{};
|
||||||
uintptr_t m_start{};
|
uintptr_t m_start{};
|
||||||
uintptr_t m_end{};
|
uintptr_t m_end{};
|
||||||
private:
|
private:
|
||||||
ALWAYS_INLINE void UpdatePeakImpl(uintptr_t obj) {
|
ALWAYS_INLINE void UpdatePeakImpl(uintptr_t obj) {
|
||||||
|
const util::AtomicRef<uintptr_t> peak_ref(m_peak);
|
||||||
|
|
||||||
const uintptr_t alloc_peak = obj + this->GetObjectSize();
|
const uintptr_t alloc_peak = obj + this->GetObjectSize();
|
||||||
uintptr_t cur_peak = m_peak.Load<std::memory_order_relaxed>();
|
uintptr_t cur_peak = m_peak;
|
||||||
do {
|
do {
|
||||||
if (alloc_peak <= cur_peak) {
|
if (alloc_peak <= cur_peak) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (!m_peak.CompareExchangeStrong(cur_peak, alloc_peak));
|
} while (!peak_ref.CompareExchangeStrong(cur_peak, alloc_peak));
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KSlabHeapBase() = default;
|
constexpr KSlabHeapBase() = default;
|
||||||
|
@ -110,8 +112,7 @@ namespace ams::kern {
|
||||||
const size_t num_obj = (memory_size / obj_size);
|
const size_t num_obj = (memory_size / obj_size);
|
||||||
m_start = reinterpret_cast<uintptr_t>(memory);
|
m_start = reinterpret_cast<uintptr_t>(memory);
|
||||||
m_end = m_start + num_obj * obj_size;
|
m_end = m_start + num_obj * obj_size;
|
||||||
|
m_peak = m_start;
|
||||||
m_peak.Store<std::memory_order_relaxed>(m_start);
|
|
||||||
|
|
||||||
/* Free the objects. */
|
/* Free the objects. */
|
||||||
u8 *cur = reinterpret_cast<u8 *>(m_end);
|
u8 *cur = reinterpret_cast<u8 *>(m_end);
|
||||||
|
@ -175,7 +176,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE size_t GetPeakIndex() const {
|
ALWAYS_INLINE size_t GetPeakIndex() const {
|
||||||
return this->GetObjectIndex(reinterpret_cast<const void *>(m_peak.Load<std::memory_order_relaxed>()));
|
return this->GetObjectIndex(reinterpret_cast<const void *>(m_peak));
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE uintptr_t GetSlabHeapAddress() const {
|
ALWAYS_INLINE uintptr_t GetSlabHeapAddress() const {
|
||||||
|
|
|
@ -225,7 +225,7 @@ namespace ams::kern {
|
||||||
s32 m_original_physical_ideal_core_id{};
|
s32 m_original_physical_ideal_core_id{};
|
||||||
s32 m_num_core_migration_disables{};
|
s32 m_num_core_migration_disables{};
|
||||||
ThreadState m_thread_state{};
|
ThreadState m_thread_state{};
|
||||||
util::Atomic<u8> m_termination_requested{false};
|
util::Atomic<bool> m_termination_requested{false};
|
||||||
bool m_wait_cancelled{};
|
bool m_wait_cancelled{};
|
||||||
bool m_cancellable{};
|
bool m_cancellable{};
|
||||||
bool m_signaled{};
|
bool m_signaled{};
|
||||||
|
|
|
@ -246,9 +246,9 @@ namespace ams::kern {
|
||||||
if (cur_process != nullptr) {
|
if (cur_process != nullptr) {
|
||||||
/* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */
|
/* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */
|
||||||
if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) {
|
if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) {
|
||||||
m_state.prev_thread.Store<std::memory_order_relaxed>(cur_thread);
|
m_state.prev_thread = cur_thread;
|
||||||
} else {
|
} else {
|
||||||
m_state.prev_thread.Store<std::memory_order_relaxed>(nullptr);
|
m_state.prev_thread =nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,9 +270,12 @@ namespace ams::kern {
|
||||||
void KScheduler::ClearPreviousThread(KThread *thread) {
|
void KScheduler::ClearPreviousThread(KThread *thread) {
|
||||||
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
|
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
|
||||||
for (size_t i = 0; i < cpu::NumCores; ++i) {
|
for (size_t i = 0; i < cpu::NumCores; ++i) {
|
||||||
|
/* Get an atomic reference to the core scheduler's previous thread. */
|
||||||
|
const util::AtomicRef<KThread *> prev_thread(Kernel::GetScheduler(static_cast<s32>(i)).m_state.prev_thread);
|
||||||
|
|
||||||
/* Atomically clear the previous thread if it's our target. */
|
/* Atomically clear the previous thread if it's our target. */
|
||||||
KThread *compare = thread;
|
KThread *compare = thread;
|
||||||
Kernel::GetScheduler(static_cast<s32>(i)).m_state.prev_thread.CompareExchangeStrong(compare, nullptr);
|
prev_thread.CompareExchangeStrong(compare, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1184,7 +1184,7 @@ namespace ams::kern {
|
||||||
/* Determine if this is the first termination request. */
|
/* Determine if this is the first termination request. */
|
||||||
const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool {
|
const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool {
|
||||||
/* Perform an atomic compare-and-swap from false to true. */
|
/* Perform an atomic compare-and-swap from false to true. */
|
||||||
u8 expected = false;
|
bool expected = false;
|
||||||
return m_termination_requested.CompareExchangeStrong(expected, true);
|
return m_termination_requested.CompareExchangeStrong(expected, true);
|
||||||
}();
|
}();
|
||||||
|
|
||||||
|
|
|
@ -100,6 +100,121 @@ namespace ams::util {
|
||||||
|
|
||||||
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION
|
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_STORE_EXCLUSIVE_FUNCTION
|
||||||
|
|
||||||
|
template<UsableAtomicType T>
|
||||||
|
constexpr ALWAYS_INLINE T ConvertToTypeForAtomic(AtomicStorage<T> s) {
|
||||||
|
if constexpr (std::integral<T>) {
|
||||||
|
return static_cast<T>(s);
|
||||||
|
} else if constexpr(std::is_pointer<T>::value) {
|
||||||
|
return reinterpret_cast<T>(s);
|
||||||
|
} else {
|
||||||
|
return std::bit_cast<T>(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<UsableAtomicType T>
|
||||||
|
constexpr ALWAYS_INLINE AtomicStorage<T> ConvertToStorageForAtomic(T arg) {
|
||||||
|
if constexpr (std::integral<T>) {
|
||||||
|
return static_cast<AtomicStorage<T>>(arg);
|
||||||
|
} else if constexpr(std::is_pointer<T>::value) {
|
||||||
|
if (std::is_constant_evaluated() && arg == nullptr) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return reinterpret_cast<AtomicStorage<T>>(arg);
|
||||||
|
} else {
|
||||||
|
return std::bit_cast<AtomicStorage<T>>(arg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order, std::unsigned_integral StorageType>
|
||||||
|
ALWAYS_INLINE StorageType AtomicLoadImpl(volatile StorageType * const p) {
|
||||||
|
if constexpr (Order != std::memory_order_relaxed) {
|
||||||
|
return ::ams::util::impl::LoadAcquireForAtomic(p);
|
||||||
|
} else {
|
||||||
|
return *p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order, std::unsigned_integral StorageType>
|
||||||
|
ALWAYS_INLINE void AtomicStoreImpl(volatile StorageType * const p, const StorageType s) {
|
||||||
|
if constexpr (Order != std::memory_order_relaxed) {
|
||||||
|
::ams::util::impl::StoreReleaseForAtomic(p, s);
|
||||||
|
} else {
|
||||||
|
*p = s;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order, std::unsigned_integral StorageType>
|
||||||
|
ALWAYS_INLINE StorageType LoadExclusiveForAtomicByMemoryOrder(volatile StorageType * const p) {
|
||||||
|
if constexpr (Order == std::memory_order_relaxed) {
|
||||||
|
return ::ams::util::impl::LoadExclusiveForAtomic(p);
|
||||||
|
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
|
||||||
|
return ::ams::util::impl::LoadAcquireExclusiveForAtomic(p);
|
||||||
|
} else if constexpr (Order == std::memory_order_release) {
|
||||||
|
return ::ams::util::impl::LoadExclusiveForAtomic(p);
|
||||||
|
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
|
||||||
|
return ::ams::util::impl::LoadAcquireExclusiveForAtomic(p);
|
||||||
|
} else {
|
||||||
|
static_assert(Order != Order, "Invalid memory order");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order, std::unsigned_integral StorageType>
|
||||||
|
ALWAYS_INLINE bool StoreExclusiveForAtomicByMemoryOrder(volatile StorageType * const p, const StorageType s) {
|
||||||
|
if constexpr (Order == std::memory_order_relaxed) {
|
||||||
|
return ::ams::util::impl::StoreExclusiveForAtomic(p, s);
|
||||||
|
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
|
||||||
|
return ::ams::util::impl::StoreExclusiveForAtomic(p, s);
|
||||||
|
} else if constexpr (Order == std::memory_order_release) {
|
||||||
|
return ::ams::util::impl::StoreReleaseExclusiveForAtomic(p, s);
|
||||||
|
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
|
||||||
|
return ::ams::util::impl::StoreReleaseExclusiveForAtomic(p, s);
|
||||||
|
} else {
|
||||||
|
static_assert(Order != Order, "Invalid memory order");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order, std::unsigned_integral StorageType>
|
||||||
|
ALWAYS_INLINE StorageType AtomicExchangeImpl(volatile StorageType * const p, const StorageType s) {
|
||||||
|
StorageType current;
|
||||||
|
do {
|
||||||
|
current = ::ams::util::impl::LoadExclusiveForAtomicByMemoryOrder<Order>(p);
|
||||||
|
} while(AMS_UNLIKELY(!impl::StoreExclusiveForAtomicByMemoryOrder<Order>(p, s)));
|
||||||
|
|
||||||
|
return current;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order, UsableAtomicType T>
|
||||||
|
ALWAYS_INLINE bool AtomicCompareExchangeWeakImpl(volatile AtomicStorage<T> * const p, T &expected, T desired) {
|
||||||
|
const AtomicStorage<T> e = ::ams::util::impl::ConvertToStorageForAtomic(expected);
|
||||||
|
const AtomicStorage<T> d = ::ams::util::impl::ConvertToStorageForAtomic(desired);
|
||||||
|
|
||||||
|
const AtomicStorage<T> current = ::ams::util::impl::LoadExclusiveForAtomicByMemoryOrder<Order>(p);
|
||||||
|
if (AMS_UNLIKELY(current != e)) {
|
||||||
|
impl::ClearExclusiveForAtomic();
|
||||||
|
expected = ::ams::util::impl::ConvertToTypeForAtomic<T>(current);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return AMS_LIKELY(impl::StoreExclusiveForAtomicByMemoryOrder<Order>(p, d));
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order, UsableAtomicType T>
|
||||||
|
ALWAYS_INLINE bool AtomicCompareExchangeStrongImpl(volatile AtomicStorage<T> * const p, T &expected, T desired) {
|
||||||
|
const AtomicStorage<T> e = ::ams::util::impl::ConvertToStorageForAtomic(expected);
|
||||||
|
const AtomicStorage<T> d = ::ams::util::impl::ConvertToStorageForAtomic(desired);
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (const AtomicStorage<T> current = ::ams::util::impl::LoadExclusiveForAtomicByMemoryOrder<Order>(p); AMS_UNLIKELY(current != e)) {
|
||||||
|
impl::ClearExclusiveForAtomic();
|
||||||
|
expected = ::ams::util::impl::ConvertToTypeForAtomic<T>(current);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} while (AMS_UNLIKELY(!impl::StoreExclusiveForAtomicByMemoryOrder<Order>(p, d)));
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<impl::UsableAtomicType T>
|
template<impl::UsableAtomicType T>
|
||||||
|
@ -117,27 +232,11 @@ namespace ams::util {
|
||||||
using DifferenceType = typename std::conditional<IsIntegral, T, typename std::conditional<IsPointer, std::ptrdiff_t, void>::type>::type;
|
using DifferenceType = typename std::conditional<IsIntegral, T, typename std::conditional<IsPointer, std::ptrdiff_t, void>::type>::type;
|
||||||
|
|
||||||
static constexpr ALWAYS_INLINE T ConvertToType(StorageType s) {
|
static constexpr ALWAYS_INLINE T ConvertToType(StorageType s) {
|
||||||
if constexpr (std::integral<T>) {
|
return impl::ConvertToTypeForAtomic<T>(s);
|
||||||
return static_cast<T>(s);
|
|
||||||
} else if constexpr(std::is_pointer<T>::value) {
|
|
||||||
return reinterpret_cast<T>(s);
|
|
||||||
} else {
|
|
||||||
return std::bit_cast<T>(s);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr ALWAYS_INLINE StorageType ConvertToStorage(T arg) {
|
static constexpr ALWAYS_INLINE StorageType ConvertToStorage(T arg) {
|
||||||
if constexpr (std::integral<T>) {
|
return impl::ConvertToStorageForAtomic<T>(arg);
|
||||||
return static_cast<StorageType>(arg);
|
|
||||||
} else if constexpr(std::is_pointer<T>::value) {
|
|
||||||
if (std::is_constant_evaluated() && arg == nullptr) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return reinterpret_cast<StorageType>(arg);
|
|
||||||
} else {
|
|
||||||
return std::bit_cast<StorageType>(arg);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
StorageType m_v;
|
StorageType m_v;
|
||||||
|
@ -157,148 +256,31 @@ namespace ams::util {
|
||||||
return desired;
|
return desired;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE operator T() const { return this->Load(); }
|
||||||
|
|
||||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE T Load() const {
|
ALWAYS_INLINE T Load() const {
|
||||||
if constexpr (Order != std::memory_order_relaxed) {
|
return ConvertToType(impl::AtomicLoadImpl<Order>(this->GetStoragePointer()));
|
||||||
return ConvertToType(impl::LoadAcquireForAtomic(this->GetStoragePointer()));
|
|
||||||
} else {
|
|
||||||
return ConvertToType(*this->GetStoragePointer());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE void Store(T arg) {
|
ALWAYS_INLINE void Store(T arg) {
|
||||||
if constexpr (Order != std::memory_order_relaxed) {
|
return impl::AtomicStoreImpl<Order>(this->GetStoragePointer(), ConvertToStorage(arg));
|
||||||
impl::StoreReleaseForAtomic(this->GetStoragePointer(), ConvertToStorage(arg));
|
|
||||||
} else {
|
|
||||||
*this->GetStoragePointer() = ConvertToStorage(arg);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE T Exchange(T arg) {
|
ALWAYS_INLINE T Exchange(T arg) {
|
||||||
volatile StorageType * const p = this->GetStoragePointer();
|
return ConvertToType(impl::AtomicExchangeImpl(this->GetStoragePointer(), ConvertToStorage(arg)));
|
||||||
const StorageType s = ConvertToStorage(arg);
|
|
||||||
|
|
||||||
StorageType current;
|
|
||||||
|
|
||||||
if constexpr (Order == std::memory_order_relaxed) {
|
|
||||||
do {
|
|
||||||
current = impl::LoadExclusiveForAtomic(p);
|
|
||||||
} while (AMS_UNLIKELY(!impl::StoreExclusiveForAtomic(p, s)));
|
|
||||||
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
|
|
||||||
do {
|
|
||||||
current = impl::LoadAcquireExclusiveForAtomic(p);
|
|
||||||
} while (AMS_UNLIKELY(!impl::StoreExclusiveForAtomic(p, s)));
|
|
||||||
} else if constexpr (Order == std::memory_order_release) {
|
|
||||||
do {
|
|
||||||
current = impl::LoadExclusiveForAtomic(p);
|
|
||||||
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic(p, s)));
|
|
||||||
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
|
|
||||||
do {
|
|
||||||
current = impl::LoadAcquireExclusiveForAtomic(p);
|
|
||||||
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic(p, s)));
|
|
||||||
} else {
|
|
||||||
static_assert(Order != Order, "Invalid memory order");
|
|
||||||
}
|
|
||||||
|
|
||||||
return current;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
|
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
|
||||||
volatile StorageType * const p = this->GetStoragePointer();
|
return impl::AtomicCompareExchangeWeakImpl<Order, T>(this->GetStoragePointer(), expected, desired);
|
||||||
const StorageType e = ConvertToStorage(expected);
|
|
||||||
const StorageType d = ConvertToStorage(desired);
|
|
||||||
|
|
||||||
if constexpr (Order == std::memory_order_relaxed) {
|
|
||||||
const StorageType current = impl::LoadExclusiveForAtomic(p);
|
|
||||||
if (AMS_UNLIKELY(current != e)) {
|
|
||||||
impl::ClearExclusiveForAtomic();
|
|
||||||
expected = ConvertToType(current);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return AMS_LIKELY(impl::StoreExclusiveForAtomic(p, d));
|
|
||||||
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
|
|
||||||
const StorageType current = impl::LoadAcquireExclusiveForAtomic(p);
|
|
||||||
if (AMS_UNLIKELY(current != e)) {
|
|
||||||
impl::ClearExclusiveForAtomic();
|
|
||||||
expected = ConvertToType(current);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return AMS_LIKELY(impl::StoreExclusiveForAtomic(p, d));
|
|
||||||
} else if constexpr (Order == std::memory_order_release) {
|
|
||||||
const StorageType current = impl::LoadExclusiveForAtomic(p);
|
|
||||||
if (AMS_UNLIKELY(current != e)) {
|
|
||||||
impl::ClearExclusiveForAtomic();
|
|
||||||
expected = ConvertToType(current);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return AMS_LIKELY(impl::StoreReleaseExclusiveForAtomic(p, d));
|
|
||||||
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
|
|
||||||
const StorageType current = impl::LoadAcquireExclusiveForAtomic(p);
|
|
||||||
if (AMS_UNLIKELY(current != e)) {
|
|
||||||
impl::ClearExclusiveForAtomic();
|
|
||||||
expected = ConvertToType(current);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return AMS_LIKELY(impl::StoreReleaseExclusiveForAtomic(p, d));
|
|
||||||
} else {
|
|
||||||
static_assert(Order != Order, "Invalid memory order");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
|
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
|
||||||
volatile StorageType * const p = this->GetStoragePointer();
|
return impl::AtomicCompareExchangeStrongImpl<Order, T>(this->GetStoragePointer(), expected, desired);
|
||||||
const StorageType e = ConvertToStorage(expected);
|
|
||||||
const StorageType d = ConvertToStorage(desired);
|
|
||||||
|
|
||||||
if constexpr (Order == std::memory_order_relaxed) {
|
|
||||||
StorageType current;
|
|
||||||
do {
|
|
||||||
if (current = impl::LoadExclusiveForAtomic(p); current != e) {
|
|
||||||
impl::ClearExclusiveForAtomic();
|
|
||||||
expected = ConvertToType(current);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} while (!impl::StoreExclusiveForAtomic(p, d));
|
|
||||||
} else if constexpr (Order == std::memory_order_consume || Order == std::memory_order_acquire) {
|
|
||||||
StorageType current;
|
|
||||||
do {
|
|
||||||
if (current = impl::LoadAcquireExclusiveForAtomic(p); current != e) {
|
|
||||||
impl::ClearExclusiveForAtomic();
|
|
||||||
expected = ConvertToType(current);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} while (!impl::StoreExclusiveForAtomic(p, d));
|
|
||||||
} else if constexpr (Order == std::memory_order_release) {
|
|
||||||
StorageType current;
|
|
||||||
do {
|
|
||||||
if (current = impl::LoadExclusiveForAtomic(p); current != e) {
|
|
||||||
impl::ClearExclusiveForAtomic();
|
|
||||||
expected = ConvertToType(current);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} while (!impl::StoreReleaseExclusiveForAtomic(p, d));
|
|
||||||
} else if constexpr (Order == std::memory_order_acq_rel || Order == std::memory_order_seq_cst) {
|
|
||||||
StorageType current;
|
|
||||||
do {
|
|
||||||
if (current = impl::LoadAcquireExclusiveForAtomic(p); current != e) {
|
|
||||||
impl::ClearExclusiveForAtomic();
|
|
||||||
expected = ConvertToType(current);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} while (!impl::StoreReleaseExclusiveForAtomic(p, d));
|
|
||||||
} else {
|
|
||||||
static_assert(Order != Order, "Invalid memory order");
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATOR_, _POINTER_ALLOWED_) \
|
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATOR_, _POINTER_ALLOWED_) \
|
||||||
|
@ -341,5 +323,108 @@ namespace ams::util {
|
||||||
ALWAYS_INLINE T operator--(int) { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1); }
|
ALWAYS_INLINE T operator--(int) { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<impl::UsableAtomicType T>
|
||||||
|
class AtomicRef {
|
||||||
|
NON_MOVEABLE(AtomicRef);
|
||||||
|
public:
|
||||||
|
static constexpr size_t RequiredAlignment = std::max<size_t>(sizeof(T), alignof(T));
|
||||||
|
private:
|
||||||
|
using StorageType = impl::AtomicStorage<T>;
|
||||||
|
static_assert(sizeof(StorageType) == sizeof(T));
|
||||||
|
static_assert(alignof(StorageType) >= alignof(T));
|
||||||
|
|
||||||
|
static constexpr bool IsIntegral = std::integral<T>;
|
||||||
|
static constexpr bool IsPointer = std::is_pointer<T>::value;
|
||||||
|
|
||||||
|
static constexpr bool HasArithmeticFunctions = IsIntegral || IsPointer;
|
||||||
|
|
||||||
|
using DifferenceType = typename std::conditional<IsIntegral, T, typename std::conditional<IsPointer, std::ptrdiff_t, void>::type>::type;
|
||||||
|
|
||||||
|
static constexpr ALWAYS_INLINE T ConvertToType(StorageType s) {
|
||||||
|
return impl::ConvertToTypeForAtomic<T>(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
static constexpr ALWAYS_INLINE StorageType ConvertToStorage(T arg) {
|
||||||
|
return impl::ConvertToStorageForAtomic<T>(arg);
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
volatile StorageType * const m_p;
|
||||||
|
private:
|
||||||
|
ALWAYS_INLINE volatile StorageType *GetStoragePointer() const { return m_p; }
|
||||||
|
public:
|
||||||
|
explicit ALWAYS_INLINE AtomicRef(T &t) : m_p(reinterpret_cast<volatile StorageType *>(std::addressof(t))) { /* ... */ }
|
||||||
|
ALWAYS_INLINE AtomicRef(const AtomicRef &) noexcept = default;
|
||||||
|
|
||||||
|
AtomicRef() = delete;
|
||||||
|
AtomicRef &operator=(const AtomicRef &) = delete;
|
||||||
|
|
||||||
|
ALWAYS_INLINE T operator=(T desired) const { return const_cast<AtomicRef *>(this)->Store(desired); }
|
||||||
|
|
||||||
|
ALWAYS_INLINE operator T() const { return this->Load(); }
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE T Load() const {
|
||||||
|
return ConvertToType(impl::AtomicLoadImpl<Order>(this->GetStoragePointer()));
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE void Store(T arg) const {
|
||||||
|
return impl::AtomicStoreImpl<Order>(this->GetStoragePointer(), ConvertToStorage(arg));
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE T Exchange(T arg) const {
|
||||||
|
return ConvertToType(impl::AtomicExchangeImpl(this->GetStoragePointer(), ConvertToStorage(arg)));
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) const {
|
||||||
|
return impl::AtomicCompareExchangeWeakImpl<Order, T>(this->GetStoragePointer(), expected, desired);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) const {
|
||||||
|
return impl::AtomicCompareExchangeStrongImpl<Order, T>(this->GetStoragePointer(), expected, desired);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATOR_, _POINTER_ALLOWED_) \
|
||||||
|
template<bool Enable = (IsIntegral || (_POINTER_ALLOWED_ && IsPointer)), typename = typename std::enable_if<Enable, void>::type> \
|
||||||
|
ALWAYS_INLINE T Fetch ## _OPERATION_(DifferenceType arg) const { \
|
||||||
|
static_assert(Enable == (IsIntegral || (_POINTER_ALLOWED_ && IsPointer))); \
|
||||||
|
volatile StorageType * const p = this->GetStoragePointer(); \
|
||||||
|
\
|
||||||
|
StorageType current; \
|
||||||
|
do { \
|
||||||
|
current = impl::LoadAcquireExclusiveForAtomic<StorageType>(p); \
|
||||||
|
} while (AMS_UNLIKELY(!impl::StoreReleaseExclusiveForAtomic<StorageType>(p, ConvertToStorage(ConvertToType(current) _OPERATOR_ arg)))); \
|
||||||
|
return ConvertToType(current); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
template<bool Enable = (IsIntegral || (_POINTER_ALLOWED_ && IsPointer)), typename = typename std::enable_if<Enable, void>::type> \
|
||||||
|
ALWAYS_INLINE T operator _OPERATOR_##=(DifferenceType arg) const { \
|
||||||
|
static_assert(Enable == (IsIntegral || (_POINTER_ALLOWED_ && IsPointer))); \
|
||||||
|
return this->Fetch ## _OPERATION_(arg) _OPERATOR_ arg; \
|
||||||
|
}
|
||||||
|
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Add, +, true)
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Sub, -, true)
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(And, &, false)
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Or, |, false)
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Xor, ^, false)
|
||||||
|
|
||||||
|
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION
|
||||||
|
|
||||||
|
template<bool Enable = HasArithmeticFunctions, typename = typename std::enable_if<Enable, void>::type>
|
||||||
|
ALWAYS_INLINE T operator++() const { static_assert(Enable == HasArithmeticFunctions); return this->FetchAdd(1) + 1; }
|
||||||
|
|
||||||
|
template<bool Enable = HasArithmeticFunctions, typename = typename std::enable_if<Enable, void>::type>
|
||||||
|
ALWAYS_INLINE T operator++(int) const { static_assert(Enable == HasArithmeticFunctions); return this->FetchAdd(1); }
|
||||||
|
|
||||||
|
template<bool Enable = HasArithmeticFunctions, typename = typename std::enable_if<Enable, void>::type>
|
||||||
|
ALWAYS_INLINE T operator--() const { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1) - 1; }
|
||||||
|
|
||||||
|
template<bool Enable = HasArithmeticFunctions, typename = typename std::enable_if<Enable, void>::type>
|
||||||
|
ALWAYS_INLINE T operator--(int) const { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1); }
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
|
@ -74,6 +74,8 @@ namespace ams::util {
|
||||||
return (m_v = desired);
|
return (m_v = desired);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE operator T() const { return this->Load(); }
|
||||||
|
|
||||||
template<std::memory_order Order = std::memory_order_seq_cst>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE T Load() const {
|
ALWAYS_INLINE T Load() const {
|
||||||
return m_v.load(Order);
|
return m_v.load(Order);
|
||||||
|
@ -84,22 +86,21 @@ namespace ams::util {
|
||||||
return m_v.store(Order);
|
return m_v.store(Order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<std::memory_order Order>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE T Exchange(T arg) {
|
ALWAYS_INLINE T Exchange(T arg) {
|
||||||
return m_v.exchange(arg, Order);
|
return m_v.exchange(arg, Order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<std::memory_order Order>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
|
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) {
|
||||||
return m_v.compare_exchange_weak(expected, desired, Order);
|
return m_v.compare_exchange_weak(expected, desired, Order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<std::memory_order Order>
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
|
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) {
|
||||||
return m_v.compare_exchange_strong(expected, desired, Order);
|
return m_v.compare_exchange_strong(expected, desired, Order);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATION_LOWER_, _OPERATOR_, _POINTER_ALLOWED_) \
|
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATION_LOWER_, _OPERATOR_, _POINTER_ALLOWED_) \
|
||||||
template<bool Enable = (IsIntegral || (_POINTER_ALLOWED_ && IsPointer)), typename = typename std::enable_if<Enable, void>::type> \
|
template<bool Enable = (IsIntegral || (_POINTER_ALLOWED_ && IsPointer)), typename = typename std::enable_if<Enable, void>::type> \
|
||||||
ALWAYS_INLINE T Fetch ## _OPERATION_(DifferenceType arg) { \
|
ALWAYS_INLINE T Fetch ## _OPERATION_(DifferenceType arg) { \
|
||||||
|
@ -134,5 +135,89 @@ namespace ams::util {
|
||||||
ALWAYS_INLINE T operator--(int) { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1); }
|
ALWAYS_INLINE T operator--(int) { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<impl::UsableAtomicType T>
|
||||||
|
class AtomicRef {
|
||||||
|
NON_MOVEABLE(AtomicRef);
|
||||||
|
public:
|
||||||
|
static constexpr size_t RequiredAlignment = std::atomic_ref<T>::required_alignment;
|
||||||
|
private:
|
||||||
|
static constexpr bool IsIntegral = std::integral<T>;
|
||||||
|
static constexpr bool IsPointer = std::is_pointer<T>::value;
|
||||||
|
|
||||||
|
static constexpr bool HasArithmeticFunctions = IsIntegral || IsPointer;
|
||||||
|
|
||||||
|
using DifferenceType = typename std::conditional<IsIntegral, T, typename std::conditional<IsPointer, std::ptrdiff_t, void>::type>::type;
|
||||||
|
private:
|
||||||
|
static_assert(std::atomic_ref<T>::is_always_lock_free);
|
||||||
|
private:
|
||||||
|
std::atomic_ref<T> m_ref;
|
||||||
|
public:
|
||||||
|
explicit ALWAYS_INLINE AtomicRef(T &t) : m_ref(t) { /* ... */ }
|
||||||
|
ALWAYS_INLINE AtomicRef(const AtomicRef &) noexcept = default;
|
||||||
|
|
||||||
|
AtomicRef() = delete;
|
||||||
|
AtomicRef &operator=(const AtomicRef &) = delete;
|
||||||
|
|
||||||
|
ALWAYS_INLINE T operator=(T desired) const { return (m_ref = desired); }
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE T Load() const {
|
||||||
|
return m_ref.load(Order);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE void Store(T arg) const {
|
||||||
|
return m_ref.store(arg, Order);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE T Exchange(T arg) const {
|
||||||
|
return m_ref.exchange(arg, Order);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE bool CompareExchangeWeak(T &expected, T desired) const {
|
||||||
|
return m_ref.compare_exchange_weak(expected, desired, Order);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<std::memory_order Order = std::memory_order_seq_cst>
|
||||||
|
ALWAYS_INLINE bool CompareExchangeStrong(T &expected, T desired) const {
|
||||||
|
return m_ref.compare_exchange_strong(expected, desired, Order);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(_OPERATION_, _OPERATION_LOWER_, _OPERATOR_, _POINTER_ALLOWED_) \
|
||||||
|
template<bool Enable = (IsIntegral || (_POINTER_ALLOWED_ && IsPointer)), typename = typename std::enable_if<Enable, void>::type> \
|
||||||
|
ALWAYS_INLINE T Fetch ## _OPERATION_(DifferenceType arg) const { \
|
||||||
|
static_assert(Enable == (IsIntegral || (_POINTER_ALLOWED_ && IsPointer))); \
|
||||||
|
return m_ref.fetch_##_OPERATION_LOWER_(arg); \
|
||||||
|
} \
|
||||||
|
\
|
||||||
|
template<bool Enable = (IsIntegral || (_POINTER_ALLOWED_ && IsPointer)), typename = typename std::enable_if<Enable, void>::type> \
|
||||||
|
ALWAYS_INLINE T operator _OPERATOR_##=(DifferenceType arg) const { \
|
||||||
|
static_assert(Enable == (IsIntegral || (_POINTER_ALLOWED_ && IsPointer))); \
|
||||||
|
return this->Fetch##_OPERATION_(arg) _OPERATOR_ arg; \
|
||||||
|
}
|
||||||
|
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Add, add, +, true)
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Sub, sub, -, true)
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(And, and, &, false)
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Or, or, |, false)
|
||||||
|
AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION(Xor, xor, ^, false)
|
||||||
|
|
||||||
|
#undef AMS_UTIL_IMPL_DEFINE_ATOMIC_FETCH_OPERATE_FUNCTION
|
||||||
|
|
||||||
|
template<bool Enable = HasArithmeticFunctions, typename = typename std::enable_if<Enable, void>::type>
|
||||||
|
ALWAYS_INLINE T operator++() const { static_assert(Enable == HasArithmeticFunctions); return this->FetchAdd(1) + 1; }
|
||||||
|
|
||||||
|
template<bool Enable = HasArithmeticFunctions, typename = typename std::enable_if<Enable, void>::type>
|
||||||
|
ALWAYS_INLINE T operator++(int) const { static_assert(Enable == HasArithmeticFunctions); return this->FetchAdd(1); }
|
||||||
|
|
||||||
|
template<bool Enable = HasArithmeticFunctions, typename = typename std::enable_if<Enable, void>::type>
|
||||||
|
ALWAYS_INLINE T operator--() const { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1) - 1; }
|
||||||
|
|
||||||
|
template<bool Enable = HasArithmeticFunctions, typename = typename std::enable_if<Enable, void>::type>
|
||||||
|
ALWAYS_INLINE T operator--(int) const { static_assert(Enable == HasArithmeticFunctions); return this->FetchSub(1); }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
Loading…
Reference in a new issue