1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-12-25 03:36:02 +00:00

kern: implement SvcSignalToAddress, SvcWaitForAddress

This commit is contained in:
Michael Scire 2020-07-16 19:06:48 -07:00 committed by SciresM
parent a0cc22302c
commit 8d507aa5a1
7 changed files with 292 additions and 16 deletions

View file

@ -149,6 +149,26 @@ namespace ams::kern::arch::arm64::cpu {
return true; return true;
} }
ALWAYS_INLINE bool CanAccessAtomic(KProcessAddress addr, bool privileged = false) {
const uintptr_t va = GetInteger(addr);
u64 phys_addr;
if (privileged) {
__asm__ __volatile__("at s1e1w, %[va]" :: [va]"r"(va) : "memory");
} else {
__asm__ __volatile__("at s1e0w, %[va]" :: [va]"r"(va) : "memory");
}
InstructionMemoryBarrier();
u64 par = GetParEl1();
if (par & 0x1) {
return false;
}
return (par >> BITSIZEOF(par) - BITSIZEOF(u8)) == 0xFF;
}
/* Synchronization helpers. */ /* Synchronization helpers. */
NOINLINE void SynchronizeAllCores(); NOINLINE void SynchronizeAllCores();

View file

@ -39,6 +39,9 @@ namespace ams::kern::arch::arm64 {
static bool ClearMemoryAligned64Bit(void *dst, size_t size); static bool ClearMemoryAligned64Bit(void *dst, size_t size);
static bool ClearMemorySize32Bit(void *dst); static bool ClearMemorySize32Bit(void *dst);
static bool UpdateIfEqualAtomic(s32 *out, s32 *address, s32 compare_value, s32 new_value);
static bool DecrementIfLessThanAtomic(s32 *out, s32 *address, s32 compare);
static bool StoreDataCache(uintptr_t start, uintptr_t end); static bool StoreDataCache(uintptr_t start, uintptr_t end);
static bool FlushDataCache(uintptr_t start, uintptr_t end); static bool FlushDataCache(uintptr_t start, uintptr_t end);
static bool InvalidateDataCache(uintptr_t start, uintptr_t end); static bool InvalidateDataCache(uintptr_t start, uintptr_t end);

View file

@ -20,6 +20,8 @@
namespace ams::kern { namespace ams::kern {
extern KThread g_cv_arbiter_compare_thread;
class KConditionVariable { class KConditionVariable {
public: public:
using ThreadTree = typename KThread::ConditionVariableThreadTreeType; using ThreadTree = typename KThread::ConditionVariableThreadTreeType;

View file

@ -147,7 +147,7 @@ namespace ams::kern {
KLightLock *waiting_lock{}; KLightLock *waiting_lock{};
uintptr_t condvar_key{}; uintptr_t condvar_key{};
uintptr_t entrypoint{}; uintptr_t entrypoint{};
KProcessAddress arbiter_key{}; KProcessAddress address_key{};
KProcess *parent{}; KProcess *parent{};
void *kernel_stack_top{}; void *kernel_stack_top{};
u32 *light_ipc_data{}; u32 *light_ipc_data{};
@ -175,7 +175,7 @@ namespace ams::kern {
KThread *lock_owner{}; KThread *lock_owner{};
ConditionVariableThreadTree *condvar_tree{}; ConditionVariableThreadTree *condvar_tree{};
uintptr_t debug_params[3]{}; uintptr_t debug_params[3]{};
u32 arbiter_value{}; u32 address_key_value{};
u32 suspend_request_flags{}; u32 suspend_request_flags{};
u32 suspend_allowed_flags{}; u32 suspend_allowed_flags{};
Result wait_result; Result wait_result;
@ -304,6 +304,7 @@ namespace ams::kern {
NOINLINE KThreadContext *GetContextForSchedulerLoop(); NOINLINE KThreadContext *GetContextForSchedulerLoop();
constexpr uintptr_t GetConditionVariableKey() const { return this->condvar_key; } constexpr uintptr_t GetConditionVariableKey() const { return this->condvar_key; }
constexpr uintptr_t GetAddressArbiterKey() const { return this->condvar_key; }
constexpr void SetupForConditionVariableCompare(uintptr_t cv_key, int priority) { constexpr void SetupForConditionVariableCompare(uintptr_t cv_key, int priority) {
this->condvar_key = cv_key; this->condvar_key = cv_key;
@ -314,6 +315,11 @@ namespace ams::kern {
this->condvar_tree = nullptr; this->condvar_tree = nullptr;
} }
constexpr void SetupForAddressArbiterCompare(uintptr_t address, int priority) {
this->condvar_key = address;
this->priority = priority;
}
constexpr void SetAddressArbiter(ConditionVariableThreadTree *tree, uintptr_t address) { constexpr void SetAddressArbiter(ConditionVariableThreadTree *tree, uintptr_t address) {
this->condvar_tree = tree; this->condvar_tree = tree;
this->condvar_key = address; this->condvar_key = address;
@ -349,10 +355,10 @@ namespace ams::kern {
void RemoveWaiter(KThread *thread); void RemoveWaiter(KThread *thread);
KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key); KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key);
constexpr KProcessAddress GetAddressKey() const { return this->arbiter_key; } constexpr KProcessAddress GetAddressKey() const { return this->address_key; }
constexpr u32 GetAddressKeyValue() const { return this->arbiter_value; } constexpr u32 GetAddressKeyValue() const { return this->address_key_value; }
constexpr void SetAddressKey(KProcessAddress key) { this->arbiter_key = key; } constexpr void SetAddressKey(KProcessAddress key) { this->address_key = key; }
constexpr void SetAddressKey(KProcessAddress key, u32 val) { this->arbiter_key = key; this->arbiter_value = val; } constexpr void SetAddressKey(KProcessAddress key, u32 val) { this->address_key = key; this->address_key_value = val; }
constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; } constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; }
constexpr KThread *GetLockOwner() const { return this->lock_owner; } constexpr KThread *GetLockOwner() const { return this->lock_owner; }

View file

@ -428,6 +428,67 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv:
mov x0, #1 mov x0, #1
ret ret
/* ams::kern::arch::arm64::UserspaceAccess::UpdateIfEqualAtomic(s32 *out, s32 *address, s32 compare_value, s32 new_value) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii
.type _ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii:
/* Load the value from the address. */
ldaxr w4, [x1]
/* Compare it to the desired one. */
cmp w4, w2
/* If equal, we want to try to write the new value. */
b.eq 1f
/* Otherwise, clear our exclusive hold and finish. */
clrex
b 2f
1: /* Try to store. */
stlxr w5, w3, [x1]
/* If we failed to store, try again. */
cbnz w5, _ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii
2: /* We're done. */
str w4, [x0]
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::DecrementIfLessThanAtomic(s32 *out, s32 *address, s32 compare) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i
.type _ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i, %function
.balign 0x10
_ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i:
/* Load the value from the address. */
ldaxr w3, [x1]
/* Compare it to the desired one. */
cmp w3, w2
/* If less than, we want to try to decrement. */
b.lt 1f
/* Otherwise, clear our exclusive hold and finish. */
clrex
b 2f
1: /* Decrement and try to store. */
sub w4, w3, #1
stlxr w5, w4, [x1]
/* If we failed to store, try again. */
cbnz w5, _ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i
2: /* We're done. */
str w3, [x0]
mov x0, #1
ret
/* ams::kern::arch::arm64::UserspaceAccess::StoreDataCache(uintptr_t start, uintptr_t end) */ /* ams::kern::arch::arm64::UserspaceAccess::StoreDataCache(uintptr_t start, uintptr_t end) */
.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm, "ax", %progbits .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm, "ax", %progbits
.global _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm .global _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm

View file

@ -19,28 +19,212 @@ namespace ams::kern {
namespace { namespace {
constinit KThread g_arbiter_compare_thread;
ALWAYS_INLINE bool ReadFromUser(s32 *out, KProcessAddress address) { ALWAYS_INLINE bool ReadFromUser(s32 *out, KProcessAddress address) {
return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address)); return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address));
} }
ALWAYS_INLINE bool DecrementIfLessThan(s32 *out, KProcessAddress address, s32 value) {
KScopedInterruptDisable di;
if (!cpu::CanAccessAtomic(address)) {
return false;
}
return UserspaceAccess::DecrementIfLessThanAtomic(out, GetPointer<s32>(address), value);
}
ALWAYS_INLINE bool UpdateIfEqual(s32 *out, KProcessAddress address, s32 value, s32 new_value) {
KScopedInterruptDisable di;
if (!cpu::CanAccessAtomic(address)) {
return false;
}
return UserspaceAccess::UpdateIfEqualAtomic(out, GetPointer<s32>(address), value, new_value);
}
} }
Result KAddressArbiter::Signal(uintptr_t addr, s32 count) { Result KAddressArbiter::Signal(uintptr_t addr, s32 count) {
MESOSPHERE_UNIMPLEMENTED(); /* Perform signaling. */
s32 num_waiters = 0;
{
KScopedSchedulerLock sl;
g_cv_arbiter_compare_thread.SetupForAddressArbiterCompare(addr, -1);
auto it = this->tree.nfind(g_cv_arbiter_compare_thread);
while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
KThread *target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, ResultSuccess());
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup();
it = this->tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters;
}
}
return ResultSuccess();
} }
Result KAddressArbiter::SignalAndIncrementIfEqual(uintptr_t addr, s32 value, s32 count) { Result KAddressArbiter::SignalAndIncrementIfEqual(uintptr_t addr, s32 value, s32 count) {
MESOSPHERE_UNIMPLEMENTED(); /* Perform signaling. */
s32 num_waiters = 0;
{
KScopedSchedulerLock sl;
g_cv_arbiter_compare_thread.SetupForAddressArbiterCompare(addr, -1);
auto it = this->tree.nfind(g_cv_arbiter_compare_thread);
/* Check the userspace value. */
s32 user_value;
R_UNLESS(UpdateIfEqual(std::addressof(user_value), addr, value, value + 1), svc::ResultInvalidCurrentMemory());
R_UNLESS(user_value == value, svc::ResultInvalidState());
while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
KThread *target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, ResultSuccess());
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup();
it = this->tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters;
}
}
return ResultSuccess();
} }
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count) { Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count) {
MESOSPHERE_UNIMPLEMENTED(); /* Perform signaling. */
s32 num_waiters = 0;
{
KScopedSchedulerLock sl;
g_cv_arbiter_compare_thread.SetupForAddressArbiterCompare(addr, -1);
auto it = this->tree.nfind(g_cv_arbiter_compare_thread);
/* Determine the updated value. */
s32 new_value;
if (count <= 0) {
if ((it != this->tree.end()) && (it->GetAddressArbiterKey() == addr)) {
new_value = value - 1;
} else {
new_value = value + 1;
}
} else {
auto tmp_it = it;
int tmp_num_waiters = 0;
while ((tmp_it != this->tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) && (tmp_num_waiters < count + 1)) {
++tmp_num_waiters;
++tmp_it;
}
if (tmp_num_waiters == 0) {
new_value = value + 1;
} else if (tmp_num_waiters <= count) {
new_value = value - 1;
} else {
new_value = value;
}
}
/* Check the userspace value. */
s32 user_value;
bool succeeded;
if (value != new_value) {
succeeded = UpdateIfEqual(std::addressof(user_value), addr, value, new_value);
} else {
succeeded = ReadFromUser(std::addressof(user_value), addr);
}
R_UNLESS(succeeded, svc::ResultInvalidCurrentMemory());
R_UNLESS(user_value == value, svc::ResultInvalidState());
while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
KThread *target_thread = std::addressof(*it);
target_thread->SetSyncedObject(nullptr, ResultSuccess());
AMS_ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->Wakeup();
it = this->tree.erase(it);
target_thread->ClearAddressArbiter();
++num_waiters;
}
}
return ResultSuccess();
} }
Result KAddressArbiter::WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout) { Result KAddressArbiter::WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout) {
MESOSPHERE_UNIMPLEMENTED(); /* Prepare to wait. */
KThread *cur_thread = GetCurrentThreadPointer();
KHardwareTimer *timer;
{
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
/* Check that the thread isn't terminating. */
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
return svc::ResultTerminationRequested();
}
/* Set the synced object. */
cur_thread->SetSyncedObject(nullptr, ams::svc::ResultTimedOut());
/* Read the value from userspace. */
s32 user_value;
bool succeeded;
if (decrement) {
succeeded = DecrementIfLessThan(std::addressof(user_value), addr, value);
} else {
succeeded = ReadFromUser(std::addressof(user_value), addr);
}
if (!succeeded) {
slp.CancelSleep();
return svc::ResultInvalidCurrentMemory();
}
/* Check that the value is less than the specified one. */
if (user_value >= value) {
slp.CancelSleep();
return svc::ResultInvalidState();
}
/* Check that the timeout is non-zero. */
if (timeout == 0) {
slp.CancelSleep();
return svc::ResultTimedOut();
}
/* Set the arbiter. */
cur_thread->SetAddressArbiter(std::addressof(this->tree), addr);
this->tree.insert(*cur_thread);
cur_thread->SetState(KThread::ThreadState_Waiting);
}
/* Cancel the timer wait. */
if (timer != nullptr) {
timer->CancelTask(cur_thread);
}
/* Remove from the address arbiter. */
{
KScopedSchedulerLock sl;
if (cur_thread->IsWaitingForAddressArbiter()) {
this->tree.erase(this->tree.iterator_to(*cur_thread));
cur_thread->ClearAddressArbiter();
}
}
/* Get the result. */
KSynchronizationObject *dummy;
return cur_thread->GetWaitResult(std::addressof(dummy));
} }
Result KAddressArbiter::WaitIfEqual(uintptr_t addr, s32 value, s64 timeout) { Result KAddressArbiter::WaitIfEqual(uintptr_t addr, s32 value, s64 timeout) {

View file

@ -17,9 +17,9 @@
namespace ams::kern { namespace ams::kern {
namespace { constinit KThread g_cv_arbiter_compare_thread;
constinit KThread g_cv_compare_thread; namespace {
ALWAYS_INLINE bool ReadFromUser(u32 *out, KProcessAddress address) { ALWAYS_INLINE bool ReadFromUser(u32 *out, KProcessAddress address) {
return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address)); return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address));
@ -131,9 +131,9 @@ namespace ams::kern {
int num_waiters = 0; int num_waiters = 0;
{ {
KScopedSchedulerLock sl; KScopedSchedulerLock sl;
g_cv_compare_thread.SetupForConditionVariableCompare(cv_key, -1); g_cv_arbiter_compare_thread.SetupForConditionVariableCompare(cv_key, -1);
auto it = this->tree.nfind(g_cv_compare_thread); auto it = this->tree.nfind(g_cv_arbiter_compare_thread);
while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetConditionVariableKey() == cv_key)) { while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetConditionVariableKey() == cv_key)) {
KThread *target_thread = std::addressof(*it); KThread *target_thread = std::addressof(*it);