1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-12-24 11:16:11 +00:00

kern: SvcGetLastThreadInfo, SvcGetDebugFutureThreadInfo

This commit is contained in:
Michael Scire 2020-07-30 16:31:58 -07:00 committed by SciresM
parent 0993ae0685
commit 51084c0837
13 changed files with 325 additions and 18 deletions

View file

@ -26,6 +26,22 @@ namespace ams::kern::arch::arm64 {
u32 write;
u64 tpidr;
u64 reserved;
constexpr void GetSvcThreadContext(ams::svc::LastThreadContext *out) const {
if ((this->psr & 0x10) == 0) {
/* aarch64 thread. */
out->fp = this->x[29];
out->sp = this->sp;
out->lr = this->x[30];
out->pc = this->pc;
} else {
/* aarch32 thread. */
out->fp = this->x[11];
out->sp = this->x[13];
out->lr = this->x[14];
out->pc = this->pc;
}
}
};
static_assert(sizeof(KExceptionContext) == 0x120);

View file

@ -42,6 +42,8 @@ namespace ams::kern {
Result ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size);
Result WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size);
Result GetRunningThreadInfo(ams::svc::LastThreadContext *out_context, u64 *out_thread_id);
Result GetDebugEventInfo(ams::svc::lp64::DebugEventInfo *out);
Result GetDebugEventInfo(ams::svc::ilp32::DebugEventInfo *out);

View file

@ -246,6 +246,8 @@ namespace ams::kern {
constexpr KHandleTable &GetHandleTable() { return this->handle_table; }
constexpr const KHandleTable &GetHandleTable() const { return this->handle_table; }
KWaitObject *GetWaitObjectPointer() { return std::addressof(this->wait_object); }
size_t GetUsedUserPhysicalMemorySize() const;
size_t GetTotalUserPhysicalMemorySize() const;
size_t GetUsedNonSystemUserPhysicalMemorySize() const;
@ -284,6 +286,9 @@ namespace ams::kern {
}
}
constexpr KThread *GetRunningThread(s32 core) const { return this->running_threads[core]; }
constexpr u64 GetRunningThreadIdleCount(s32 core) const { return this->running_thread_idle_counts[core]; }
void RegisterThread(KThread *thread);
void UnregisterThread(KThread *thread);

View file

@ -83,10 +83,18 @@ namespace ams::kern {
}
}
ALWAYS_INLINE u64 GetIdleCount() const {
return this->state.idle_count;
}
ALWAYS_INLINE KThread *GetIdleThread() const {
return this->idle_thread;
}
ALWAYS_INLINE KThread *GetPreviousThread() const {
return this->prev_thread;
}
ALWAYS_INLINE s64 GetLastContextSwitchTime() const {
return this->last_context_switch_time;
}

View file

@ -543,4 +543,12 @@ namespace ams::kern {
}
};
ALWAYS_INLINE KExceptionContext *GetExceptionContext(KThread *thread) {
return reinterpret_cast<KExceptionContext *>(reinterpret_cast<uintptr_t>(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext));
}
ALWAYS_INLINE const KExceptionContext *GetExceptionContext(const KThread *thread) {
return reinterpret_cast<const KExceptionContext *>(reinterpret_cast<uintptr_t>(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext));
}
}

View file

@ -25,13 +25,42 @@ namespace ams::kern {
using Entry = KThread::QueueEntry;
private:
Entry root;
bool uses_timer;
bool timer_used;
public:
constexpr KWaitObject() : root(), uses_timer() { /* ... */ }
constexpr KWaitObject() : root(), timer_used() { /* ... */ }
virtual void OnTimer() override;
Result Synchronize(s64 timeout);
private:
constexpr ALWAYS_INLINE void Enqueue(KThread *add) {
/* Get the entry associated with the added thread. */
Entry &add_entry = add->GetSleepingQueueEntry();
/* TODO: Member functions */
/* Get the entry associated with the end of the queue. */
KThread *tail = this->root.GetPrev();
Entry &tail_entry = (tail != nullptr) ? tail->GetSleepingQueueEntry() : this->root;
/* Link the entries. */
add_entry.SetPrev(tail);
add_entry.SetNext(nullptr);
tail_entry.SetNext(add);
this->root.SetPrev(add);
}
constexpr ALWAYS_INLINE void Remove(KThread *remove) {
/* Get the entry associated with the thread. */
Entry &remove_entry = remove->GetSleepingQueueEntry();
/* Get the entries associated with next and prev. */
KThread *prev = remove_entry.GetPrev();
KThread *next = remove_entry.GetNext();
Entry &prev_entry = (prev != nullptr) ? prev->GetSleepingQueueEntry() : this->root;
Entry &next_entry = (next != nullptr) ? next->GetSleepingQueueEntry() : this->root;
/* Unlink. */
prev_entry.SetNext(next);
next_entry.SetPrev(prev);
}
};
}

View file

@ -58,8 +58,8 @@ namespace ams::kern::svc {
/* 126 */ using ::ams::svc::ResultReservedUsed;
/* 127 */ using ::ams::svc::ResultNotSupported;
/* 128 */ using ::ams::svc::ResultDebug;
/* 129 */ using ::ams::svc::ResultThreadNotOwned;
/* 129 */ using ::ams::svc::ResultNoThread;
/* 130 */ using ::ams::svc::ResultUnknownThread;
/* 131 */ using ::ams::svc::ResultPortClosed;
/* 132 */ using ::ams::svc::ResultLimitReached;
/* 133 */ using ::ams::svc::ResultInvalidMemoryPool;

View file

@ -40,15 +40,6 @@ namespace ams::kern::arch::arm64 {
namespace {
/* TODO: Should these be elsewhere? (In a header)? */
ALWAYS_INLINE KExceptionContext *GetExceptionContext(KThread *thread) {
return reinterpret_cast<KExceptionContext *>(reinterpret_cast<uintptr_t>(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext));
}
ALWAYS_INLINE const KExceptionContext *GetExceptionContext(const KThread *thread) {
return reinterpret_cast<const KExceptionContext *>(reinterpret_cast<uintptr_t>(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext));
}
ALWAYS_INLINE bool IsFpuEnabled() {
return cpu::ArchitecturalFeatureAccessControlRegisterAccessor().IsFpEnabled();
}

View file

@ -234,6 +234,36 @@ namespace ams::kern {
return ResultSuccess();
}
Result KDebugBase::GetRunningThreadInfo(ams::svc::LastThreadContext *out_context, u64 *out_thread_id) {
/* Get the attached process. */
KScopedAutoObject process = this->GetProcess();
R_UNLESS(process.IsNotNull(), svc::ResultProcessTerminated());
/* Get the thread info. */
{
KScopedSchedulerLock sl;
/* Get the running thread. */
const s32 core_id = GetCurrentCoreId();
KThread *thread = process->GetRunningThread(core_id);
/* Check that the thread's idle count is correct. */
R_UNLESS(process->GetRunningThreadIdleCount(core_id) == Kernel::GetScheduler(core_id).GetIdleCount(), svc::ResultNoThread());
/* Check that the thread is running on the current core. */
R_UNLESS(thread != nullptr, svc::ResultUnknownThread());
R_UNLESS(thread->GetActiveCore() == core_id, svc::ResultUnknownThread());
/* Get the thread's exception context. */
GetExceptionContext(thread)->GetSvcThreadContext(out_context);
/* Get the thread's id. */
*out_thread_id = thread->GetId();
}
return ResultSuccess();
}
Result KDebugBase::Attach(KProcess *target) {
/* Check that the process isn't null. */
MESOSPHERE_ASSERT(target != nullptr);

View file

@ -18,7 +18,86 @@
namespace ams::kern {
void KWaitObject::OnTimer() {
MESOSPHERE_UNIMPLEMENTED();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Wake up all the waiting threads. */
Entry *entry = std::addressof(this->root);
while (true) {
/* Get the next thread. */
KThread *thread = entry->GetNext();
if (thread == nullptr) {
break;
}
/* Wake it up. */
thread->Wakeup();
/* Advance. */
entry = std::addressof(thread->GetSleepingQueueEntry());
}
}
Result KWaitObject::Synchronize(s64 timeout) {
/* Perform the wait. */
KHardwareTimer *timer = nullptr;
KThread *cur_thread = GetCurrentThreadPointer();
{
KScopedSchedulerLock sl;
/* Check that the thread isn't terminating. */
R_UNLESS(!cur_thread->IsTerminationRequested(), svc::ResultTerminationRequested());
/* Verify that nothing else is already waiting on the object. */
if (timeout > 0) {
R_UNLESS(!this->timer_used, svc::ResultBusy());
}
/* Check that we're not already in use. */
if (timeout >= 0) {
/* Verify the timer isn't already in use. */
R_UNLESS(!this->timer_used, svc::ResultBusy());
}
/* If we need to, register our timeout. */
if (timeout > 0) {
/* Mark that we're using the timer. */
this->timer_used = true;
/* Use the timer. */
timer = std::addressof(Kernel::GetHardwareTimer());
timer->RegisterAbsoluteTask(this, timeout);
}
if (timeout == 0) {
/* If we're timed out immediately, just wake up the thread. */
this->OnTimer();
} else {
/* Otherwise, sleep until the timeout occurs. */
this->Enqueue(cur_thread);
cur_thread->SetState(KThread::ThreadState_Waiting);
cur_thread->SetSyncedObject(nullptr, svc::ResultTimedOut());
}
}
/* Cleanup as necessary. */
{
KScopedSchedulerLock sl;
/* Remove from the timer. */
if (timeout > 0) {
MESOSPHERE_ASSERT(this->timer_used);
MESOSPHERE_ASSERT(timer != nullptr);
timer->CancelTask(this);
this->timer_used = false;
}
/* Remove the thread from our queue. */
if (timeout != 0) {
this->Remove(cur_thread);
}
}
return ResultSuccess();
}
}

View file

@ -0,0 +1,135 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
Result GetDebugFutureThreadInfo(ams::svc::LastThreadContext *out_context, uint64_t *out_thread_id, ams::svc::Handle debug_handle, int64_t ns) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNoThread());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Synchronize the current process to the desired time. */
{
/* Get the wait object. */
KWaitObject *wait_object = GetCurrentProcess().GetWaitObjectPointer();
/* Convert the timeout from nanoseconds to ticks. */
s64 timeout;
if (ns > 0) {
u64 ticks = KHardwareTimer::GetTick();
ticks += ams::svc::Tick(TimeSpan::FromNanoSeconds(ns));
ticks += 2;
timeout = ticks;
} else {
timeout = ns;
}
/* Synchronize to the desired time. */
R_TRY(wait_object->Synchronize(timeout));
}
/* Get the running thread info. */
R_TRY(debug->GetRunningThreadInfo(out_context, out_thread_id));
return ResultSuccess();
}
Result GetLastThreadInfo(ams::svc::LastThreadContext *out_context, uintptr_t *out_tls_address, uint32_t *out_flags) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNoThread());
/* Get the thread info. */
{
KScopedInterruptDisable di;
/* Get the previous thread. */
KThread *prev_thread = Kernel::GetScheduler().GetPreviousThread();
R_UNLESS(prev_thread != nullptr, svc::ResultNoThread());
/* Verify the last thread was owned by the current process. */
R_UNLESS(prev_thread->GetOwnerProcess() == GetCurrentProcessPointer(), svc::ResultUnknownThread());
/* Clear the output flags. */
*out_flags = 0;
/* Get the thread's exception context. */
GetExceptionContext(prev_thread)->GetSvcThreadContext(out_context);
/* Get the tls address. */
*out_tls_address = GetInteger(prev_thread->GetThreadLocalRegionAddress());
/* Set the syscall flag if appropriate. */
if (prev_thread->IsCallingSvc()) {
*out_flags |= ams::svc::LastThreadInfoFlag_ThreadInSystemCall;
}
}
return ResultSuccess();
}
}
/* ============================= 64 ABI ============================= */
Result GetDebugFutureThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, uint64_t *out_thread_id, ams::svc::Handle debug_handle, int64_t ns) {
return GetDebugFutureThreadInfo(out_context, out_thread_id, debug_handle, ns);
}
Result GetLastThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) {
static_assert(sizeof(*out_tls_address) == sizeof(uintptr_t));
return GetLastThreadInfo(out_context, reinterpret_cast<uintptr_t *>(out_tls_address), out_flags);
}
/* ============================= 64From32 ABI ============================= */
Result GetDebugFutureThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, uint64_t *out_thread_id, ams::svc::Handle debug_handle, int64_t ns) {
ams::svc::LastThreadContext context = {};
R_TRY(GetDebugFutureThreadInfo(std::addressof(context), out_thread_id, debug_handle, ns));
*out_context = {
.fp = static_cast<u32>(context.fp),
.sp = static_cast<u32>(context.sp),
.lr = static_cast<u32>(context.lr),
.pc = static_cast<u32>(context.pc),
};
return ResultSuccess();
}
Result GetLastThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) {
static_assert(sizeof(*out_tls_address) == sizeof(uintptr_t));
ams::svc::LastThreadContext context = {};
R_TRY(GetLastThreadInfo(std::addressof(context), reinterpret_cast<uintptr_t *>(out_tls_address), out_flags));
*out_context = {
.fp = static_cast<u32>(context.fp),
.sp = static_cast<u32>(context.sp),
.lr = static_cast<u32>(context.lr),
.pc = static_cast<u32>(context.pc),
};
return ResultSuccess();
}
}

View file

@ -61,8 +61,8 @@ namespace ams::svc {
R_DEFINE_ERROR_RESULT(ReservedUsed, 126);
R_DEFINE_ERROR_RESULT(NotSupported, 127);
R_DEFINE_ERROR_RESULT(Debug, 128);
R_DEFINE_ERROR_RESULT(ThreadNotOwned, 129);
R_DEFINE_ERROR_RESULT(NoThread, 129);
R_DEFINE_ERROR_RESULT(UnknownThread, 130);
R_DEFINE_ERROR_RESULT(PortClosed, 131);
R_DEFINE_ERROR_RESULT(LimitReached, 132);
R_DEFINE_ERROR_RESULT(InvalidMemoryPool, 133);

View file

@ -188,7 +188,7 @@ namespace ams::svc {
};
enum LastThreadInfoFlag : u32 {
/* TODO */
LastThreadInfoFlag_ThreadInSystemCall = (1u << 0),
};
enum LimitableResource : u32 {
@ -415,6 +415,10 @@ namespace ams::svc {
DebugException_MemorySystemError = 9,
};
enum DebugEventFlag : u32 {
DebugEventFlag_Stopped = (1u << 0),
};
enum ExceptionType : u32 {
ExceptionType_Init = 0x000,
ExceptionType_InstructionAbort = 0x100,