1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-12-29 21:56:04 +00:00
Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp

417 lines
19 KiB
C++
Raw Normal View History

2020-01-31 00:51:35 +00:00
/*
* Copyright (c) Atmosphère-NX
2020-01-31 00:51:35 +00:00
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
2020-02-08 11:18:08 +00:00
#include <mesosphere/kern_common.hpp>
2020-01-31 00:51:35 +00:00
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
#include <mesosphere/kern_k_handle_table.hpp>
#include <mesosphere/kern_k_thread.hpp>
#include <mesosphere/kern_k_thread_local_page.hpp>
2020-02-18 13:04:49 +00:00
#include <mesosphere/kern_k_shared_memory_info.hpp>
#include <mesosphere/kern_k_io_region.hpp>
2020-02-18 13:04:49 +00:00
#include <mesosphere/kern_k_worker_task.hpp>
#include <mesosphere/kern_select_page_table.hpp>
#include <mesosphere/kern_k_condition_variable.hpp>
#include <mesosphere/kern_k_address_arbiter.hpp>
#include <mesosphere/kern_k_capabilities.hpp>
#include <mesosphere/kern_k_wait_object.hpp>
#include <mesosphere/kern_k_dynamic_resource_manager.hpp>
2020-02-18 13:04:49 +00:00
#include <mesosphere/kern_k_page_table_manager.hpp>
#include <mesosphere/kern_k_system_resource.hpp>
2020-01-31 00:51:35 +00:00
namespace ams::kern {
class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask> {
2020-01-31 00:51:35 +00:00
MESOSPHERE_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public:
2020-02-18 13:04:49 +00:00
enum State {
State_Created = ams::svc::ProcessState_Created,
State_CreatedAttached = ams::svc::ProcessState_CreatedAttached,
State_Running = ams::svc::ProcessState_Running,
State_Crashed = ams::svc::ProcessState_Crashed,
State_RunningAttached = ams::svc::ProcessState_RunningAttached,
State_Terminating = ams::svc::ProcessState_Terminating,
State_Terminated = ams::svc::ProcessState_Terminated,
State_DebugBreak = ams::svc::ProcessState_DebugBreak,
};
using ThreadList = util::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
static constexpr size_t AslrAlignment = KernelAslrAlignment;
2020-02-18 13:04:49 +00:00
private:
using SharedMemoryInfoList = util::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
using IoRegionList = util::IntrusiveListMemberTraits<&KIoRegion::m_process_list_node>::ListType;
2020-02-18 13:04:49 +00:00
using TLPTree = util::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
using TLPIterator = TLPTree::iterator;
private:
KProcessPageTable m_page_table;
util::Atomic<size_t> m_used_kernel_memory_size;
TLPTree m_fully_used_tlp_tree;
TLPTree m_partially_used_tlp_tree;
s32 m_ideal_core_id;
void *m_attached_object;
KResourceLimit *m_resource_limit;
KSystemResource *m_system_resource;
size_t m_memory_release_hint;
State m_state;
KLightLock m_state_lock;
KLightLock m_list_lock;
KConditionVariable m_cond_var;
KAddressArbiter m_address_arbiter;
u64 m_entropy[4];
bool m_is_signaled;
bool m_is_initialized;
bool m_is_application;
char m_name[13];
util::Atomic<u16> m_num_running_threads;
u32 m_flags;
KMemoryManager::Pool m_memory_pool;
s64 m_schedule_count;
KCapabilities m_capabilities;
ams::svc::ProgramId m_program_id;
u64 m_process_id;
#if defined(MESOSPHERE_ENABLE_PROCESS_CREATION_TIME)
s64 m_creation_time;
#endif
KProcessAddress m_code_address;
size_t m_code_size;
size_t m_main_thread_stack_size;
size_t m_max_process_memory;
u32 m_version;
KHandleTable m_handle_table;
KProcessAddress m_plr_address;
void *m_plr_heap_address;
KThread *m_exception_thread;
ThreadList m_thread_list;
SharedMemoryInfoList m_shared_memory_list;
IoRegionList m_io_region_list;
bool m_is_suspended;
bool m_is_immortal;
bool m_is_jit_debug;
bool m_is_handle_table_initialized;
ams::svc::DebugEvent m_jit_debug_event_type;
ams::svc::DebugException m_jit_debug_exception_type;
uintptr_t m_jit_debug_params[4];
u64 m_jit_debug_thread_id;
KWaitObject m_wait_object;
KThread *m_running_threads[cpu::NumCores];
u64 m_running_thread_idle_counts[cpu::NumCores];
u64 m_running_thread_switch_counts[cpu::NumCores];
KThread *m_pinned_threads[cpu::NumCores];
util::Atomic<s64> m_cpu_time;
util::Atomic<s64> m_num_process_switches;
util::Atomic<s64> m_num_thread_switches;
util::Atomic<s64> m_num_fpu_switches;
util::Atomic<s64> m_num_supervisor_calls;
util::Atomic<s64> m_num_ipc_messages;
util::Atomic<s64> m_num_ipc_replies;
util::Atomic<s64> m_num_ipc_receives;
private:
Result Initialize(const ams::svc::CreateProcessParameter &params);
2020-07-23 07:52:29 +01:00
Result StartTermination();
2020-07-23 07:52:29 +01:00
void FinishTermination();
ALWAYS_INLINE void PinThread(s32 core_id, KThread *thread) {
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
MESOSPHERE_ASSERT(thread != nullptr);
MESOSPHERE_ASSERT(m_pinned_threads[core_id] == nullptr);
m_pinned_threads[core_id] = thread;
}
ALWAYS_INLINE void UnpinThread(s32 core_id, KThread *thread) {
MESOSPHERE_UNUSED(thread);
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
MESOSPHERE_ASSERT(thread != nullptr);
MESOSPHERE_ASSERT(m_pinned_threads[core_id] == thread);
m_pinned_threads[core_id] = nullptr;
}
2020-02-18 13:04:49 +00:00
public:
explicit KProcess() : m_is_initialized(false) { /* ... */ }
2020-02-18 13:04:49 +00:00
2021-04-07 16:23:21 +01:00
Result Initialize(const ams::svc::CreateProcessParameter &params, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool, bool immortal);
Result Initialize(const ams::svc::CreateProcessParameter &params, svc::KUserPointer<const u32 *> caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool);
2020-07-10 04:11:41 +01:00
void Exit();
constexpr const char *GetName() const { return m_name; }
constexpr ams::svc::ProgramId GetProgramId() const { return m_program_id; }
constexpr u64 GetProcessId() const { return m_process_id; }
2020-02-18 13:04:49 +00:00
constexpr State GetState() const { return m_state; }
constexpr u64 GetCoreMask() const { return m_capabilities.GetCoreMask(); }
constexpr u64 GetPhysicalCoreMask() const { return m_capabilities.GetPhysicalCoreMask(); }
constexpr u64 GetPriorityMask() const { return m_capabilities.GetPriorityMask(); }
constexpr s32 GetIdealCoreId() const { return m_ideal_core_id; }
constexpr void SetIdealCoreId(s32 core_id) { m_ideal_core_id = core_id; }
constexpr bool CheckThreadPriority(s32 prio) const { return ((1ul << prio) & this->GetPriorityMask()) != 0; }
constexpr u32 GetCreateProcessFlags() const { return m_flags; }
constexpr bool Is64Bit() const { return m_flags & ams::svc::CreateProcessFlag_Is64Bit; }
2020-02-18 13:04:49 +00:00
constexpr KProcessAddress GetEntryPoint() const { return m_code_address; }
2020-02-20 03:38:20 +00:00
constexpr size_t GetMainStackSize() const { return m_main_thread_stack_size; }
2020-12-11 00:31:47 +00:00
constexpr KMemoryManager::Pool GetMemoryPool() const { return m_memory_pool; }
constexpr u64 GetRandomEntropy(size_t i) const { return m_entropy[i]; }
2020-02-20 03:38:20 +00:00
constexpr bool IsApplication() const { return m_is_application; }
constexpr bool IsSuspended() const { return m_is_suspended; }
constexpr void SetSuspended(bool suspended) { m_is_suspended = suspended; }
Result Terminate();
constexpr bool IsTerminated() const {
return m_state == State_Terminated;
}
constexpr bool IsAttachedToDebugger() const {
return m_attached_object != nullptr;
}
2022-03-22 21:29:02 +00:00
constexpr bool IsPermittedSvc(svc::SvcId svc_id) const {
return m_capabilities.IsPermittedSvc(svc_id);
}
constexpr bool IsPermittedInterrupt(int32_t interrupt_id) const {
return m_capabilities.IsPermittedInterrupt(interrupt_id);
}
constexpr bool IsPermittedDebug() const {
return m_capabilities.IsPermittedDebug();
}
constexpr bool CanForceDebug() const {
return m_capabilities.CanForceDebug();
}
u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); }
ThreadList &GetThreadList() { return m_thread_list; }
const ThreadList &GetThreadList() const { return m_thread_list; }
constexpr void *GetDebugObject() const { return m_attached_object; }
KProcess::State SetDebugObject(void *debug_object);
void ClearDebugObject(KProcess::State state);
2020-07-31 08:04:43 +01:00
bool EnterJitDebug(ams::svc::DebugEvent event, ams::svc::DebugException exception, uintptr_t param1 = 0, uintptr_t param2 = 0, uintptr_t param3 = 0, uintptr_t param4 = 0);
KEventInfo *GetJitDebugInfo();
2020-07-31 04:49:58 +01:00
void ClearJitDebugInfo();
2020-07-11 02:39:53 +01:00
bool EnterUserException();
bool LeaveUserException();
bool ReleaseUserException(KThread *thread);
KThread *GetPinnedThread(s32 core_id) const {
2020-02-18 13:04:49 +00:00
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
return m_pinned_threads[core_id];
2020-02-18 13:04:49 +00:00
}
2020-02-08 10:49:32 +00:00
const svc::SvcAccessFlagSet &GetSvcPermissions() const { return m_capabilities.GetSvcPermissions(); }
2020-07-31 13:52:59 +01:00
constexpr KResourceLimit *GetResourceLimit() const { return m_resource_limit; }
2020-02-20 03:38:20 +00:00
bool ReserveResource(ams::svc::LimitableResource which, s64 value);
bool ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout);
void ReleaseResource(ams::svc::LimitableResource which, s64 value);
void ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint);
constexpr KLightLock &GetStateLock() { return m_state_lock; }
constexpr KLightLock &GetListLock() { return m_list_lock; }
constexpr KProcessPageTable &GetPageTable() { return m_page_table; }
constexpr const KProcessPageTable &GetPageTable() const { return m_page_table; }
constexpr KHandleTable &GetHandleTable() { return m_handle_table; }
constexpr const KHandleTable &GetHandleTable() const { return m_handle_table; }
2020-02-20 03:38:20 +00:00
KWaitObject *GetWaitObjectPointer() { return std::addressof(m_wait_object); }
size_t GetUsedUserPhysicalMemorySize() const;
size_t GetTotalUserPhysicalMemorySize() const;
size_t GetUsedNonSystemUserPhysicalMemorySize() const;
size_t GetTotalNonSystemUserPhysicalMemorySize() const;
Result AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size);
void RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size);
void AddIoRegion(KIoRegion *io_region);
void RemoveIoRegion(KIoRegion *io_region);
Result CreateThreadLocalRegion(KProcessAddress *out);
2020-07-21 12:58:54 +01:00
Result DeleteThreadLocalRegion(KProcessAddress addr);
void *GetThreadLocalRegionPointer(KProcessAddress addr);
constexpr KProcessAddress GetProcessLocalRegionAddress() const { return m_plr_address; }
constexpr void *GetProcessLocalRegionHeapAddress() const { return m_plr_heap_address; }
KThread *GetExceptionThread() const { return m_exception_thread; }
void AddCpuTime(s64 diff) { m_cpu_time += diff; }
s64 GetCpuTime() { return m_cpu_time.Load(); }
constexpr s64 GetScheduledCount() const { return m_schedule_count; }
void IncrementScheduledCount() { ++m_schedule_count; }
void IncrementRunningThreadCount();
void DecrementRunningThreadCount();
2020-02-20 03:38:20 +00:00
size_t GetTotalSystemResourceSize() const {
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->GetSize() : 0;
}
size_t GetUsedSystemResourceSize() const {
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->GetUsedSize() : 0;
}
void SetRunningThread(s32 core, KThread *thread, u64 idle_count, u64 switch_count) {
m_running_threads[core] = thread;
m_running_thread_idle_counts[core] = idle_count;
m_running_thread_switch_counts[core] = switch_count;
2020-07-31 09:27:09 +01:00
}
2020-07-11 02:39:53 +01:00
void ClearRunningThread(KThread *thread) {
for (size_t i = 0; i < util::size(m_running_threads); ++i) {
if (m_running_threads[i] == thread) {
m_running_threads[i] = nullptr;
2020-07-11 02:39:53 +01:00
}
}
}
const KSystemResource &GetSystemResource() const { return *m_system_resource; }
const KMemoryBlockSlabManager &GetMemoryBlockSlabManager() const { return m_system_resource->GetMemoryBlockSlabManager(); }
const KBlockInfoManager &GetBlockInfoManager() const { return m_system_resource->GetBlockInfoManager(); }
const KPageTableManager &GetPageTableManager() const { return m_system_resource->GetPageTableManager(); }
2020-12-10 11:31:57 +00:00
constexpr KThread *GetRunningThread(s32 core) const { return m_running_threads[core]; }
constexpr u64 GetRunningThreadIdleCount(s32 core) const { return m_running_thread_idle_counts[core]; }
constexpr u64 GetRunningThreadSwitchCount(s32 core) const { return m_running_thread_switch_counts[core]; }
2020-02-20 03:38:20 +00:00
void RegisterThread(KThread *thread);
void UnregisterThread(KThread *thread);
Result Run(s32 priority, size_t stack_size);
Result Reset();
2020-07-31 04:49:58 +01:00
void SetDebugBreak() {
if (m_state == State_RunningAttached) {
2020-07-31 04:49:58 +01:00
this->ChangeState(State_DebugBreak);
}
}
void SetAttached() {
if (m_state == State_DebugBreak) {
2020-07-31 04:49:58 +01:00
this->ChangeState(State_RunningAttached);
}
}
Result SetActivity(ams::svc::ProcessActivity activity);
void PinCurrentThread();
void UnpinCurrentThread();
void UnpinThread(KThread *thread);
void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
return m_cond_var.Signal(cv_key, count);
}
Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
}
2020-07-15 17:15:49 +01:00
Result SignalAddressArbiter(uintptr_t address, ams::svc::SignalType signal_type, s32 value, s32 count) {
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
2020-07-15 17:15:49 +01:00
}
Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s32 value, s64 timeout) {
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
2020-07-15 17:15:49 +01:00
}
2020-07-31 00:52:11 +01:00
Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count);
2020-07-14 21:22:08 +01:00
static KProcess *GetProcessFromId(u64 process_id);
static Result GetProcessList(s32 *out_num_processes, ams::kern::svc::KUserPointer<u64 *> out_process_ids, s32 max_out_count);
static void Switch(KProcess *cur_process, KProcess *next_process) {
2020-08-21 01:29:10 +01:00
MESOSPHERE_UNUSED(cur_process);
/* Update the current page table. */
if (next_process) {
next_process->GetPageTable().Activate(next_process->GetProcessId());
} else {
Kernel::GetKernelPageTable().Activate();
}
}
2020-02-18 13:04:49 +00:00
public:
/* Overridden parent functions. */
bool IsInitialized() const { return m_is_initialized; }
2020-02-18 13:04:49 +00:00
2020-08-21 01:29:10 +01:00
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
2020-02-18 13:04:49 +00:00
void Finalize();
2020-02-18 13:04:49 +00:00
ALWAYS_INLINE u64 GetIdImpl() const { return this->GetProcessId(); }
ALWAYS_INLINE u64 GetId() const { return this->GetIdImpl(); }
2020-02-18 13:04:49 +00:00
virtual bool IsSignaled() const override {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
return m_is_signaled;
2020-02-18 13:04:49 +00:00
}
void DoWorkerTaskImpl();
2020-02-20 03:38:20 +00:00
private:
void ChangeState(State new_state) {
if (m_state != new_state) {
m_state = new_state;
m_is_signaled = true;
2020-02-20 03:38:20 +00:00
this->NotifyAvailable();
}
}
ALWAYS_INLINE Result InitializeHandleTable(s32 size) {
/* Try to initialize the handle table. */
R_TRY(m_handle_table.Initialize(size));
/* We succeeded, so note that we did. */
m_is_handle_table_initialized = true;
R_SUCCEED();
}
ALWAYS_INLINE void FinalizeHandleTable() {
/* Finalize the table. */
m_handle_table.Finalize();
/* Note that the table is finalized. */
m_is_handle_table_initialized = false;
}
2020-01-31 00:51:35 +00:00
};
}