mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-18 08:22:04 +00:00
kern: greatly improve codegen for atomics, scheduler
This commit is contained in:
parent
f051f707ed
commit
4aa18b06e8
19 changed files with 67 additions and 46 deletions
|
@ -85,7 +85,7 @@ namespace ams::kern {
|
|||
virtual KProcess *GetOwner() const { return nullptr; }
|
||||
|
||||
u32 GetReferenceCount() const {
|
||||
return m_ref_count;
|
||||
return m_ref_count.load();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const {
|
||||
|
|
|
@ -49,9 +49,9 @@ namespace ams::kern {
|
|||
|
||||
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
||||
constexpr size_t GetSize() const { return m_size; }
|
||||
constexpr size_t GetUsed() const { return m_used; }
|
||||
constexpr size_t GetPeak() const { return m_peak; }
|
||||
constexpr size_t GetCount() const { return m_count; }
|
||||
constexpr size_t GetUsed() const { return m_used.load(); }
|
||||
constexpr size_t GetPeak() const { return m_peak.load(); }
|
||||
constexpr size_t GetCount() const { return m_count.load(); }
|
||||
|
||||
constexpr bool IsInRange(KVirtualAddress addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
||||
|
@ -65,7 +65,7 @@ namespace ams::kern {
|
|||
|
||||
/* Free blocks to memory. */
|
||||
u8 *cur = GetPointer<u8>(m_address + m_size);
|
||||
for (size_t i = 0; i < m_count; i++) {
|
||||
for (size_t i = 0; i < sz / sizeof(T); i++) {
|
||||
cur -= sizeof(T);
|
||||
this->GetImpl()->Free(cur);
|
||||
}
|
||||
|
@ -84,13 +84,13 @@ namespace ams::kern {
|
|||
this->Initialize(page_allocator);
|
||||
|
||||
/* Allocate until we have the correct number of objects. */
|
||||
while (m_count < num_objects) {
|
||||
while (m_count.load() < num_objects) {
|
||||
auto *allocated = reinterpret_cast<T *>(m_page_allocator->Allocate());
|
||||
MESOSPHERE_ABORT_UNLESS(allocated != nullptr);
|
||||
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
this->GetImpl()->Free(allocated + i);
|
||||
}
|
||||
m_count += sizeof(PageBuffer) / sizeof(T);
|
||||
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ namespace ams::kern {
|
|||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
this->GetImpl()->Free(allocated + i);
|
||||
}
|
||||
m_count += sizeof(PageBuffer) / sizeof(T);
|
||||
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -116,8 +116,8 @@ namespace ams::kern {
|
|||
new (allocated) T();
|
||||
|
||||
/* Update our tracking. */
|
||||
size_t used = ++m_used;
|
||||
size_t peak = m_peak;
|
||||
size_t used = m_used.fetch_add(1) + 1;
|
||||
size_t peak = m_peak.load();
|
||||
while (peak < used) {
|
||||
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
|
||||
break;
|
||||
|
@ -130,7 +130,7 @@ namespace ams::kern {
|
|||
|
||||
void Free(T *t) {
|
||||
this->GetImpl()->Free(t);
|
||||
--m_used;
|
||||
m_used.fetch_sub(1);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -303,6 +303,7 @@ namespace ams::kern {
|
|||
const auto linear_id = handle_pack.Get<HandleLinearId>();
|
||||
const auto reserved = handle_pack.Get<HandleReserved>();
|
||||
MESOSPHERE_ASSERT(reserved == 0);
|
||||
MESOSPHERE_UNUSED(reserved);
|
||||
|
||||
/* Validate our indexing information. */
|
||||
if (raw_value == 0) {
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
void Unlock() {
|
||||
ALWAYS_INLINE void Unlock() {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer());
|
||||
|
@ -65,8 +65,8 @@ namespace ams::kern {
|
|||
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||
void UnlockSlowPath(uintptr_t cur_thread);
|
||||
|
||||
bool IsLocked() const { return m_tag != 0; }
|
||||
bool IsLockedByCurrentThread() const { return (m_tag | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
|
||||
ALWAYS_INLINE bool IsLocked() const { return m_tag.load() != 0; }
|
||||
ALWAYS_INLINE bool IsLockedByCurrentThread() const { return (m_tag.load() | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
|
||||
};
|
||||
|
||||
using KScopedLightLock = KScopedLock<KLightLock>;
|
||||
|
|
|
@ -203,54 +203,54 @@ namespace ams::kern {
|
|||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) = 0;
|
||||
virtual void FinalizeUpdate(PageLinkedList *page_list) = 0;
|
||||
|
||||
KPageTableImpl &GetImpl() { return m_impl; }
|
||||
const KPageTableImpl &GetImpl() const { return m_impl; }
|
||||
ALWAYS_INLINE KPageTableImpl &GetImpl() { return m_impl; }
|
||||
ALWAYS_INLINE const KPageTableImpl &GetImpl() const { return m_impl; }
|
||||
|
||||
bool IsLockedByCurrentThread() const { return m_general_lock.IsLockedByCurrentThread(); }
|
||||
ALWAYS_INLINE bool IsLockedByCurrentThread() const { return m_general_lock.IsLockedByCurrentThread(); }
|
||||
|
||||
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||
ALWAYS_INLINE bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr);
|
||||
}
|
||||
|
||||
bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||
ALWAYS_INLINE bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr, size);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||
ALWAYS_INLINE bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||
ALWAYS_INLINE bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr, size);
|
||||
}
|
||||
|
||||
bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
|
||||
ALWAYS_INLINE bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
|
||||
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
||||
}
|
||||
|
||||
bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
||||
ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr);
|
||||
}
|
||||
|
||||
bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
||||
ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress virt_addr, size_t size) {
|
||||
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
|
||||
|
||||
return KMemoryLayout::IsHeapVirtualAddress(m_cached_virtual_heap_region, virt_addr, size);
|
||||
}
|
||||
|
||||
bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||
ALWAYS_INLINE bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
|
||||
return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
||||
}
|
||||
private:
|
||||
|
|
|
@ -135,6 +135,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
void UnpinThread(s32 core_id, KThread *thread) {
|
||||
MESOSPHERE_UNUSED(thread);
|
||||
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||
MESOSPHERE_ASSERT(thread != nullptr);
|
||||
MESOSPHERE_ASSERT(m_pinned_threads[core_id] == thread);
|
||||
|
|
|
@ -38,7 +38,7 @@ namespace ams::kern {
|
|||
static_assert(ams::svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
|
||||
|
||||
struct SchedulingState {
|
||||
std::atomic<bool> needs_scheduling;
|
||||
std::atomic<u8> needs_scheduling;
|
||||
bool interrupt_task_thread_runnable;
|
||||
bool should_count_idle;
|
||||
u64 idle_count;
|
||||
|
@ -181,7 +181,7 @@ namespace ams::kern {
|
|||
KScopedInterruptDisable intr_disable;
|
||||
ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); };
|
||||
|
||||
if (m_state.needs_scheduling) {
|
||||
if (m_state.needs_scheduling.load()) {
|
||||
Schedule();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,8 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
constexpr void Open() {
|
||||
const size_t ref_count = ++m_reference_count;
|
||||
MESOSPHERE_ASSERT(ref_count > 0);
|
||||
++m_reference_count;
|
||||
MESOSPHERE_ASSERT(m_reference_count > 0);
|
||||
}
|
||||
|
||||
constexpr bool Close() {
|
||||
|
|
|
@ -207,7 +207,7 @@ namespace ams::kern {
|
|||
s32 m_original_physical_ideal_core_id{};
|
||||
s32 m_num_core_migration_disables{};
|
||||
ThreadState m_thread_state{};
|
||||
std::atomic<bool> m_termination_requested{};
|
||||
std::atomic<u8> m_termination_requested{};
|
||||
bool m_wait_cancelled{};
|
||||
bool m_cancellable{};
|
||||
bool m_signaled{};
|
||||
|
@ -486,7 +486,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_UNUSED(core_id);
|
||||
}
|
||||
|
||||
s64 GetCpuTime() const { return m_cpu_time; }
|
||||
s64 GetCpuTime() const { return m_cpu_time.load(); }
|
||||
|
||||
s64 GetCpuTime(s32 core_id) const {
|
||||
MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||
|
@ -530,7 +530,7 @@ namespace ams::kern {
|
|||
ALWAYS_INLINE void *GetKernelStackTop() const { return m_kernel_stack_top; }
|
||||
|
||||
ALWAYS_INLINE bool IsTerminationRequested() const {
|
||||
return m_termination_requested || this->GetRawState() == ThreadState_Terminated;
|
||||
return m_termination_requested.load() || this->GetRawState() == ThreadState_Terminated;
|
||||
}
|
||||
|
||||
size_t GetKernelStackUsage() const;
|
||||
|
|
|
@ -40,8 +40,10 @@ namespace ams::kern {
|
|||
MESOSPHERE_PANIC(__VA_ARGS__); \
|
||||
} \
|
||||
})
|
||||
#else
|
||||
#elif defined(MESOSPHERE_PRESERVE_ASSERTION_EXPRESSIONS)
|
||||
#define MESOSPHERE_ASSERT_IMPL(expr, ...) do { static_cast<void>(expr); } while (0)
|
||||
#else
|
||||
#define MESOSPHERE_ASSERT_IMPL(expr, ...) static_cast<void>(0)
|
||||
#endif
|
||||
|
||||
#define MESOSPHERE_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(expr, "Assertion failed: %s\n", #expr)
|
||||
|
@ -56,8 +58,10 @@ namespace ams::kern {
|
|||
|
||||
#ifdef MESOSPHERE_BUILD_FOR_AUDITING
|
||||
#define MESOSPHERE_AUDIT(expr) MESOSPHERE_ASSERT(expr)
|
||||
#else
|
||||
#elif defined(MESOSPHERE_PRESERVE_AUDIT_EXPRESSIONS)
|
||||
#define MESOSPHERE_AUDIT(expr) do { static_cast<void>(expr); } while (0)
|
||||
#else
|
||||
#define MESOSPHERE_AUDIT(expr) static_cast<void>(0)
|
||||
#endif
|
||||
|
||||
#define MESOSPHERE_TODO(arg) ({ constexpr const char *__mesosphere_todo = arg; static_cast<void>(__mesosphere_todo); MESOSPHERE_PANIC("TODO (%s): %s\n", __PRETTY_FUNCTION__, __mesosphere_todo); })
|
||||
|
|
|
@ -109,7 +109,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
/* Wait for a request to come in. */
|
||||
{
|
||||
KScopedLightLock lk(m_cv_lock);
|
||||
while ((m_target_cores & (1ul << core_id)) == 0) {
|
||||
while ((m_target_cores.load() & (1ul << core_id)) == 0) {
|
||||
m_cv.Wait(std::addressof(m_cv_lock));
|
||||
}
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
/* Broadcast, if there's nothing pending. */
|
||||
{
|
||||
KScopedLightLock lk(m_cv_lock);
|
||||
if (m_target_cores == 0) {
|
||||
if (m_target_cores.load() == 0) {
|
||||
m_cv.Broadcast();
|
||||
}
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
if ((op == Operation::InstructionMemoryBarrier) || (Kernel::GetState() == Kernel::State::Initializing)) {
|
||||
/* Check that there's no on-going operation. */
|
||||
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
|
||||
MESOSPHERE_ABORT_UNLESS(m_target_cores == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_target_cores.load() == 0);
|
||||
|
||||
/* Set operation. */
|
||||
m_operation = op;
|
||||
|
@ -171,12 +171,13 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
/* For certain operations, we want to send an interrupt. */
|
||||
m_target_cores = other_cores_mask;
|
||||
|
||||
const u64 target_mask = m_target_cores;
|
||||
const u64 target_mask = m_target_cores.load();
|
||||
|
||||
DataSynchronizationBarrier();
|
||||
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
|
||||
|
||||
this->ProcessOperation();
|
||||
while (m_target_cores != 0) {
|
||||
while (m_target_cores.load() != 0) {
|
||||
cpu::Yield();
|
||||
}
|
||||
|
||||
|
@ -188,7 +189,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
|
||||
/* Check that there's no on-going operation. */
|
||||
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
|
||||
MESOSPHERE_ABORT_UNLESS(m_target_cores == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_target_cores.load() == 0);
|
||||
|
||||
/* Set operation. */
|
||||
m_operation = op;
|
||||
|
@ -198,7 +199,7 @@ namespace ams::kern::arch::arm64::cpu {
|
|||
|
||||
/* Use the condvar. */
|
||||
m_cv.Broadcast();
|
||||
while (m_target_cores != 0) {
|
||||
while (m_target_cores.load() != 0) {
|
||||
m_cv.Wait(std::addressof(m_cv_lock));
|
||||
}
|
||||
|
||||
|
|
|
@ -208,6 +208,8 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
Result KInterruptManager::BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) {
|
||||
MESOSPHERE_UNUSED(core_id);
|
||||
|
||||
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
|
@ -222,6 +224,8 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
Result KInterruptManager::UnbindHandler(s32 irq, s32 core_id) {
|
||||
MESOSPHERE_UNUSED(core_id);
|
||||
|
||||
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
|
@ -244,6 +248,8 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
|
||||
Result KInterruptManager::ClearInterrupt(s32 irq, s32 core_id) {
|
||||
MESOSPHERE_UNUSED(core_id);
|
||||
|
||||
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
|
||||
|
||||
KScopedInterruptDisable di;
|
||||
|
|
|
@ -1163,6 +1163,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
}
|
||||
|
||||
void KDevicePageTable::UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force) {
|
||||
MESOSPHERE_UNUSED(force);
|
||||
MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0);
|
||||
MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0);
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
do {
|
||||
bool res = smc::ReadWriteRegister(std::addressof(value), PmcPhysicalAddress + APBDEV_PMC_PWRGATE_STATUS, 0, 0);
|
||||
MESOSPHERE_ASSERT(res);
|
||||
MESOSPHERE_UNUSED(res);
|
||||
} while ((value & PWRGATE_STATUS_CE123_MASK) != 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -121,6 +121,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(reserved == 0);
|
||||
MESOSPHERE_ASSERT(linear_id != 0);
|
||||
MESOSPHERE_ASSERT(index < m_table_size);
|
||||
MESOSPHERE_UNUSED(linear_id, reserved);
|
||||
|
||||
/* Free the entry. */
|
||||
/* NOTE: This code does not check the linear id. */
|
||||
|
@ -143,6 +144,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(reserved == 0);
|
||||
MESOSPHERE_ASSERT(linear_id != 0);
|
||||
MESOSPHERE_ASSERT(index < m_table_size);
|
||||
MESOSPHERE_UNUSED(reserved);
|
||||
|
||||
/* Set the entry. */
|
||||
Entry *entry = std::addressof(m_table[index]);
|
||||
|
|
|
@ -16,9 +16,6 @@
|
|||
#include <mesosphere.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
|
||||
#undef ALWAYS_INLINE_LAMBDA
|
||||
#define ALWAYS_INLINE_LAMBDA
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) {
|
||||
|
@ -3288,6 +3285,7 @@ namespace ams::kern {
|
|||
TraversalEntry next_entry;
|
||||
bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), aligned_src_start);
|
||||
MESOSPHERE_ASSERT(traverse_valid);
|
||||
MESOSPHERE_UNUSED(traverse_valid);
|
||||
|
||||
/* Prepare tracking variables. */
|
||||
KPhysicalAddress cur_block_addr = next_entry.phys_addr;
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
|
||||
namespace ams::kern {
|
||||
|
||||
#pragma GCC push_options
|
||||
#pragma GCC optimize ("-O3")
|
||||
|
||||
bool KScheduler::s_scheduler_update_needed;
|
||||
KScheduler::LockType KScheduler::s_scheduler_lock;
|
||||
KSchedulerPriorityQueue KScheduler::s_priority_queue;
|
||||
|
@ -607,4 +610,6 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
#pragma GCC pop_options
|
||||
|
||||
}
|
||||
|
|
|
@ -63,6 +63,7 @@ namespace ams::kern {
|
|||
m_tls_address = Null<KProcessAddress>;
|
||||
|
||||
const uintptr_t kern_stack_top_address = reinterpret_cast<uintptr_t>(kern_stack_top);
|
||||
MESOSPHERE_UNUSED(kern_stack_top_address);
|
||||
|
||||
/* Next, assert things based on the type. */
|
||||
switch (type) {
|
||||
|
@ -1161,7 +1162,7 @@ namespace ams::kern {
|
|||
/* Determine if this is the first termination request. */
|
||||
const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool {
|
||||
/* Perform an atomic compare-and-swap from false to true. */
|
||||
bool expected = false;
|
||||
u8 expected = false;
|
||||
return m_termination_requested.compare_exchange_strong(expected, true);
|
||||
}();
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace ams::svc {
|
|||
private:
|
||||
s64 tick;
|
||||
private:
|
||||
static constexpr s64 NanoSecondsPerSecond = INT64_C(1'000'000'000);
|
||||
static constexpr s64 NanoSecondsPerSecond = TimeSpan::FromSeconds(1).GetNanoSeconds();
|
||||
|
||||
static constexpr void DivNs(s64 &out, const s64 value) {
|
||||
out = value / NanoSecondsPerSecond;
|
||||
|
|
Loading…
Reference in a new issue