2
1
Fork 0
mirror of https://github.com/yuzu-emu/yuzu.git synced 2024-07-04 23:31:19 +01:00

Merge pull request #8457 from liamwhite/kprocess-suspend

kernel: implement KProcess suspension
This commit is contained in:
Fernando S 2022-06-16 02:41:12 +02:00 committed by GitHub
commit f86b770ff7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 204 additions and 217 deletions

View file

@ -138,7 +138,6 @@ struct System::Impl {
kernel.Suspend(false);
core_timing.SyncPause(false);
cpu_manager.Pause(false);
is_paused = false;
return status;
@ -150,25 +149,22 @@ struct System::Impl {
core_timing.SyncPause(true);
kernel.Suspend(true);
cpu_manager.Pause(true);
is_paused = true;
return status;
}
std::unique_lock<std::mutex> StallCPU() {
std::unique_lock<std::mutex> StallProcesses() {
std::unique_lock<std::mutex> lk(suspend_guard);
kernel.Suspend(true);
core_timing.SyncPause(true);
cpu_manager.Pause(true);
return lk;
}
void UnstallCPU() {
void UnstallProcesses() {
if (!is_paused) {
core_timing.SyncPause(false);
kernel.Suspend(false);
cpu_manager.Pause(false);
}
}
@ -334,6 +330,8 @@ struct System::Impl {
gpu_core->NotifyShutdown();
}
kernel.ShutdownCores();
cpu_manager.Shutdown();
debugger.reset();
services.reset();
service_manager.reset();
@ -499,12 +497,12 @@ void System::DetachDebugger() {
}
}
std::unique_lock<std::mutex> System::StallCPU() {
return impl->StallCPU();
std::unique_lock<std::mutex> System::StallProcesses() {
return impl->StallProcesses();
}
void System::UnstallCPU() {
impl->UnstallCPU();
void System::UnstallProcesses() {
impl->UnstallProcesses();
}
void System::InitializeDebugger() {

View file

@ -163,8 +163,8 @@ public:
/// Forcibly detach the debugger if it is running.
void DetachDebugger();
std::unique_lock<std::mutex> StallCPU();
void UnstallCPU();
std::unique_lock<std::mutex> StallProcesses();
void UnstallProcesses();
/**
* Initialize the debugger.

View file

@ -16,31 +16,28 @@
namespace Core {
CpuManager::CpuManager(System& system_)
: pause_barrier{std::make_unique<Common::Barrier>(1)}, system{system_} {}
CpuManager::CpuManager(System& system_) : system{system_} {}
CpuManager::~CpuManager() = default;
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
std::size_t core) {
cpu_manager.RunThread(stop_token, core);
cpu_manager.RunThread(core);
}
void CpuManager::Initialize() {
running_mode = true;
if (is_multicore) {
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
}
pause_barrier = std::make_unique<Common::Barrier>(Core::Hardware::NUM_CPU_CORES + 1);
} else {
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
pause_barrier = std::make_unique<Common::Barrier>(2);
num_cores = is_multicore ? Core::Hardware::NUM_CPU_CORES : 1;
for (std::size_t core = 0; core < num_cores; core++) {
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
}
}
void CpuManager::Shutdown() {
running_mode = false;
Pause(false);
for (std::size_t core = 0; core < num_cores; core++) {
if (core_data[core].host_thread.joinable()) {
core_data[core].host_thread.join();
}
}
}
std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
@ -51,8 +48,8 @@ std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
return IdleThreadFunction;
}
std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
return SuspendThreadFunction;
std::function<void(void*)> CpuManager::GetShutdownThreadStartFunc() {
return ShutdownThreadFunction;
}
void CpuManager::GuestThreadFunction(void* cpu_manager_) {
@ -82,17 +79,12 @@ void CpuManager::IdleThreadFunction(void* cpu_manager_) {
}
}
void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
if (cpu_manager->is_multicore) {
cpu_manager->MultiCoreRunSuspendThread();
} else {
cpu_manager->SingleCoreRunSuspendThread();
}
void CpuManager::ShutdownThreadFunction(void* cpu_manager) {
static_cast<CpuManager*>(cpu_manager)->ShutdownThread();
}
void* CpuManager::GetStartFuncParamater() {
return static_cast<void*>(this);
void* CpuManager::GetStartFuncParameter() {
return this;
}
///////////////////////////////////////////////////////////////////////////////
@ -132,21 +124,6 @@ void CpuManager::MultiCoreRunIdleThread() {
}
}
void CpuManager::MultiCoreRunSuspendThread() {
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
while (true) {
auto core = kernel.CurrentPhysicalCoreIndex();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
current_thread->DisableDispatch();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
scheduler.RescheduleCurrentCore();
}
}
///////////////////////////////////////////////////////////////////////////////
/// SingleCore ///
///////////////////////////////////////////////////////////////////////////////
@ -190,21 +167,6 @@ void CpuManager::SingleCoreRunIdleThread() {
}
}
void CpuManager::SingleCoreRunSuspendThread() {
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
while (true) {
auto core = kernel.GetCurrentHostThreadID();
auto& scheduler = *kernel.CurrentScheduler();
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
current_thread->DisableDispatch();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
ASSERT(core == kernel.GetCurrentHostThreadID());
scheduler.RescheduleCurrentCore();
}
}
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
{
auto& kernel = system.Kernel();
@ -237,24 +199,16 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
}
}
void CpuManager::Pause(bool paused) {
std::scoped_lock lk{pause_lock};
void CpuManager::ShutdownThread() {
auto& kernel = system.Kernel();
auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0;
auto* current_thread = kernel.GetCurrentEmuThread();
if (pause_state == paused) {
return;
}
// Set the new state
pause_state.store(paused);
// Wake up any waiting threads
pause_state.notify_all();
// Wait for all threads to successfully change state before returning
pause_barrier->Sync();
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
UNREACHABLE();
}
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
void CpuManager::RunThread(std::size_t core) {
/// Initialization
system.RegisterCoreThread(core);
std::string name;
@ -268,8 +222,6 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
auto& data = core_data[core];
data.host_context = Common::Fiber::ThreadToFiber();
const bool sc_sync = !is_async_gpu && !is_multicore;
bool sc_sync_first_use = sc_sync;
// Cleanup
SCOPE_EXIT({
@ -277,32 +229,13 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
MicroProfileOnThreadExit();
});
/// Running
while (running_mode) {
if (pause_state.load(std::memory_order_relaxed)) {
// Wait for caller to acknowledge pausing
pause_barrier->Sync();
// Wait until unpaused
pause_state.wait(true, std::memory_order_relaxed);
// Wait for caller to acknowledge unpausing
pause_barrier->Sync();
}
if (sc_sync_first_use) {
system.GPU().ObtainContext();
sc_sync_first_use = false;
}
// Emulation was stopped
if (stop_token.stop_requested()) {
return;
}
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
// Running
if (!is_async_gpu && !is_multicore) {
system.GPU().ObtainContext();
}
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
}
} // namespace Core

View file

@ -46,12 +46,10 @@ public:
void Initialize();
void Shutdown();
void Pause(bool paused);
static std::function<void(void*)> GetGuestThreadStartFunc();
static std::function<void(void*)> GetIdleThreadStartFunc();
static std::function<void(void*)> GetSuspendThreadStartFunc();
void* GetStartFuncParamater();
static std::function<void(void*)> GetShutdownThreadStartFunc();
void* GetStartFuncParameter();
void PreemptSingleCore(bool from_running_enviroment = true);
@ -63,38 +61,33 @@ private:
static void GuestThreadFunction(void* cpu_manager);
static void GuestRewindFunction(void* cpu_manager);
static void IdleThreadFunction(void* cpu_manager);
static void SuspendThreadFunction(void* cpu_manager);
static void ShutdownThreadFunction(void* cpu_manager);
void MultiCoreRunGuestThread();
void MultiCoreRunGuestLoop();
void MultiCoreRunIdleThread();
void MultiCoreRunSuspendThread();
void SingleCoreRunGuestThread();
void SingleCoreRunGuestLoop();
void SingleCoreRunIdleThread();
void SingleCoreRunSuspendThread();
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
void RunThread(std::stop_token stop_token, std::size_t core);
void ShutdownThread();
void RunThread(std::size_t core);
struct CoreData {
std::shared_ptr<Common::Fiber> host_context;
std::jthread host_thread;
};
std::atomic<bool> running_mode{};
std::atomic<bool> pause_state{};
std::unique_ptr<Common::Barrier> pause_barrier{};
std::mutex pause_lock{};
std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
bool is_async_gpu{};
bool is_multicore{};
std::atomic<std::size_t> current_core{};
std::size_t idle_count{};
std::size_t num_cores{};
static constexpr std::size_t max_cycle_runs = 5;
System& system;

View file

@ -67,18 +67,20 @@ public:
}
bool SignalDebugger(SignalInfo signal_info) {
std::scoped_lock lk{connection_lock};
{
std::scoped_lock lk{connection_lock};
if (stopped) {
// Do not notify the debugger about another event.
// It should be ignored.
return false;
if (stopped) {
// Do not notify the debugger about another event.
// It should be ignored.
return false;
}
// Set up the state.
stopped = true;
info = signal_info;
}
// Set up the state.
stopped = true;
info = signal_info;
// Write a single byte into the pipe to wake up the debug interface.
boost::asio::write(signal_pipe, boost::asio::buffer(&stopped, sizeof(stopped)));
return true;
@ -141,9 +143,6 @@ private:
AsyncReceiveInto(signal_pipe, pipe_data, [&](auto d) { PipeData(d); });
AsyncReceiveInto(client_socket, client_data, [&](auto d) { ClientData(d); });
// Stop the emulated CPU.
AllCoreStop();
// Set the active thread.
UpdateActiveThread();
@ -159,7 +158,7 @@ private:
switch (info.type) {
case SignalType::Stopped:
// Stop emulation.
AllCoreStop();
PauseEmulation();
// Notify the client.
active_thread = info.thread;
@ -171,7 +170,6 @@ private:
frontend->ShuttingDown();
// Wait for emulation to shut down gracefully now.
suspend.reset();
signal_pipe.close();
client_socket.shutdown(boost::asio::socket_base::shutdown_both);
LOG_INFO(Debug_GDBStub, "Shut down server");
@ -189,32 +187,29 @@ private:
std::scoped_lock lk{connection_lock};
stopped = true;
}
AllCoreStop();
PauseEmulation();
UpdateActiveThread();
frontend->Stopped(active_thread);
break;
}
case DebuggerAction::Continue:
active_thread->SetStepState(Kernel::StepState::NotStepping);
ResumeInactiveThreads();
AllCoreResume();
MarkResumed([&] { ResumeEmulation(); });
break;
case DebuggerAction::StepThreadUnlocked:
active_thread->SetStepState(Kernel::StepState::StepPending);
ResumeInactiveThreads();
AllCoreResume();
MarkResumed([&] {
active_thread->SetStepState(Kernel::StepState::StepPending);
active_thread->Resume(Kernel::SuspendType::Debug);
ResumeEmulation(active_thread);
});
break;
case DebuggerAction::StepThreadLocked:
active_thread->SetStepState(Kernel::StepState::StepPending);
SuspendInactiveThreads();
AllCoreResume();
case DebuggerAction::StepThreadLocked: {
MarkResumed([&] {
active_thread->SetStepState(Kernel::StepState::StepPending);
active_thread->Resume(Kernel::SuspendType::Debug);
});
break;
}
case DebuggerAction::ShutdownEmulation: {
// Suspend all threads and release any locks held
active_thread->RequestSuspend(Kernel::SuspendType::Debug);
SuspendInactiveThreads();
AllCoreResume();
// Spawn another thread that will exit after shutdown,
// to avoid a deadlock
Core::System* system_ref{&system};
@ -226,33 +221,33 @@ private:
}
}
void AllCoreStop() {
if (!suspend) {
suspend = system.StallCPU();
void PauseEmulation() {
// Put all threads to sleep on next scheduler round.
for (auto* thread : ThreadList()) {
thread->RequestSuspend(Kernel::SuspendType::Debug);
}
// Signal an interrupt so that scheduler will fire.
system.Kernel().InterruptAllPhysicalCores();
}
void ResumeEmulation(Kernel::KThread* except = nullptr) {
// Wake up all threads.
for (auto* thread : ThreadList()) {
if (thread == except) {
continue;
}
thread->SetStepState(Kernel::StepState::NotStepping);
thread->Resume(Kernel::SuspendType::Debug);
}
}
void AllCoreResume() {
template <typename Callback>
void MarkResumed(Callback&& cb) {
std::scoped_lock lk{connection_lock};
stopped = false;
system.UnstallCPU();
suspend.reset();
}
void SuspendInactiveThreads() {
for (auto* thread : ThreadList()) {
if (thread != active_thread) {
thread->RequestSuspend(Kernel::SuspendType::Debug);
}
}
}
void ResumeInactiveThreads() {
for (auto* thread : ThreadList()) {
if (thread != active_thread) {
thread->Resume(Kernel::SuspendType::Debug);
thread->SetStepState(Kernel::StepState::NotStepping);
}
}
cb();
}
void UpdateActiveThread() {
@ -260,8 +255,6 @@ private:
if (std::find(threads.begin(), threads.end(), active_thread) == threads.end()) {
active_thread = threads[0];
}
active_thread->Resume(Kernel::SuspendType::Debug);
active_thread->SetStepState(Kernel::StepState::NotStepping);
}
const std::vector<Kernel::KThread*>& ThreadList() {
@ -277,7 +270,6 @@ private:
boost::asio::io_context io_context;
boost::process::async_pipe signal_pipe;
boost::asio::ip::tcp::socket client_socket;
std::optional<std::unique_lock<std::mutex>> suspend;
SignalInfo info;
Kernel::KThread* active_thread;

View file

@ -275,11 +275,15 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
shmem->Close();
}
void KProcess::RegisterThread(const KThread* thread) {
void KProcess::RegisterThread(KThread* thread) {
KScopedLightLock lk{list_lock};
thread_list.push_back(thread);
}
void KProcess::UnregisterThread(const KThread* thread) {
void KProcess::UnregisterThread(KThread* thread) {
KScopedLightLock lk{list_lock};
thread_list.remove(thread);
}
@ -297,6 +301,50 @@ ResultCode KProcess::Reset() {
return ResultSuccess;
}
ResultCode KProcess::SetActivity(ProcessActivity activity) {
// Lock ourselves and the scheduler.
KScopedLightLock lk{state_lock};
KScopedLightLock list_lk{list_lock};
KScopedSchedulerLock sl{kernel};
// Validate our state.
R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
// Either pause or resume.
if (activity == ProcessActivity::Paused) {
// Verify that we're not suspended.
if (is_suspended) {
return ResultInvalidState;
}
// Suspend all threads.
for (auto* thread : GetThreadList()) {
thread->RequestSuspend(SuspendType::Process);
}
// Set ourselves as suspended.
SetSuspended(true);
} else {
ASSERT(activity == ProcessActivity::Runnable);
// Verify that we're suspended.
if (!is_suspended) {
return ResultInvalidState;
}
// Resume all threads.
for (auto* thread : GetThreadList()) {
thread->Resume(SuspendType::Process);
}
// Set ourselves as resumed.
SetSuspended(false);
}
return ResultSuccess;
}
ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
std::size_t code_size) {
program_id = metadata.GetTitleID();
@ -556,9 +604,10 @@ bool KProcess::IsSignaled() const {
}
KProcess::KProcess(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_},
page_table{std::make_unique<KPageTable>(kernel_.System())}, handle_table{kernel_},
address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_} {}
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
kernel_.System())},
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
state_lock{kernel_}, list_lock{kernel_} {}
KProcess::~KProcess() = default;

View file

@ -63,6 +63,11 @@ enum class ProcessStatus {
DebugBreak,
};
enum class ProcessActivity : u32 {
Runnable,
Paused,
};
class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask> {
KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
@ -282,17 +287,17 @@ public:
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
/// Gets the list of all threads created with this process as their owner.
const std::list<const KThread*>& GetThreadList() const {
std::list<KThread*>& GetThreadList() {
return thread_list;
}
/// Registers a thread as being created under this process,
/// adding it to this process' thread list.
void RegisterThread(const KThread* thread);
void RegisterThread(KThread* thread);
/// Unregisters a thread from this process, removing it
/// from this process' thread list.
void UnregisterThread(const KThread* thread);
void UnregisterThread(KThread* thread);
/// Clears the signaled state of the process if and only if it's signaled.
///
@ -347,6 +352,8 @@ public:
void DoWorkerTaskImpl();
ResultCode SetActivity(ProcessActivity activity);
void PinCurrentThread(s32 core_id);
void UnpinCurrentThread(s32 core_id);
void UnpinThread(KThread* thread);
@ -442,7 +449,7 @@ private:
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
/// List of threads that are running with this process as their owner.
std::list<const KThread*> thread_list;
std::list<KThread*> thread_list;
/// List of shared memory that are running with this process as their owner.
std::list<KSharedMemoryInfo*> shared_memory_list;
@ -475,6 +482,7 @@ private:
KThread* exception_thread{};
KLightLock state_lock;
KLightLock list_lock;
using TLPTree =
Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;

View file

@ -267,15 +267,15 @@ ResultCode KThread::InitializeDummyThread(KThread* thread) {
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
Core::CpuManager::GetIdleThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
system.GetCpuManager().GetStartFuncParameter());
}
ResultCode KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
KThreadFunction func, uintptr_t arg,
s32 virt_core) {
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
Core::CpuManager::GetSuspendThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
Core::CpuManager::GetShutdownThreadStartFunc(),
system.GetCpuManager().GetStartFuncParameter());
}
ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
@ -284,7 +284,7 @@ ResultCode KThread::InitializeUserThread(Core::System& system, KThread* thread,
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(),
system.GetCpuManager().GetStartFuncParamater());
system.GetCpuManager().GetStartFuncParameter());
}
void KThread::PostDestroy(uintptr_t arg) {

View file

@ -76,7 +76,7 @@ struct KernelCore::Impl {
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
InitializeSchedulers();
InitializeSuspendThreads();
InitializeShutdownThreads();
InitializePreemption(kernel);
RegisterHostThread();
@ -143,9 +143,9 @@ struct KernelCore::Impl {
CleanupObject(system_resource_limit);
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
if (suspend_threads[core_id]) {
suspend_threads[core_id]->Close();
suspend_threads[core_id] = nullptr;
if (shutdown_threads[core_id]) {
shutdown_threads[core_id]->Close();
shutdown_threads[core_id] = nullptr;
}
schedulers[core_id]->Finalize();
@ -247,14 +247,14 @@ struct KernelCore::Impl {
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
}
void InitializeSuspendThreads() {
void InitializeShutdownThreads() {
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
suspend_threads[core_id] = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {},
shutdown_threads[core_id] = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {},
core_id)
.IsSuccess());
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
suspend_threads[core_id]->DisableDispatch();
shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
shutdown_threads[core_id]->DisableDispatch();
}
}
@ -769,7 +769,7 @@ struct KernelCore::Impl {
std::weak_ptr<ServiceThread> default_service_thread;
Common::ThreadWorker service_threads_manager;
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads;
std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads;
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
@ -920,6 +920,12 @@ const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const {
return *impl->global_object_list_container;
}
void KernelCore::InterruptAllPhysicalCores() {
for (auto& physical_core : impl->cores) {
physical_core.Interrupt();
}
}
void KernelCore::InvalidateAllInstructionCaches() {
for (auto& physical_core : impl->cores) {
physical_core.ArmInterface().ClearInstructionCache();
@ -1067,19 +1073,22 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
return *impl->hidbus_shared_mem;
}
void KernelCore::Suspend(bool in_suspention) {
const bool should_suspend = exception_exited || in_suspention;
{
KScopedSchedulerLock lock(*this);
const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
impl->suspend_threads[core_id]->SetState(state);
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
ThreadWaitReasonForDebugging::Suspended);
}
void KernelCore::Suspend(bool suspended) {
const bool should_suspend{exception_exited || suspended};
const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable;
for (auto* process : GetProcessList()) {
process->SetActivity(activity);
}
}
void KernelCore::ShutdownCores() {
for (auto* thread : impl->shutdown_threads) {
void(thread->Run());
}
InterruptAllPhysicalCores();
}
bool KernelCore::IsMulticore() const {
return impl->is_multicore;
}

View file

@ -184,6 +184,8 @@ public:
const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const;
void InterruptAllPhysicalCores();
void InvalidateAllInstructionCaches();
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
@ -269,12 +271,15 @@ public:
/// Gets the shared memory object for HIDBus services.
const Kernel::KSharedMemory& GetHidBusSharedMem() const;
/// Suspend/unsuspend the OS.
void Suspend(bool in_suspention);
/// Suspend/unsuspend all processes.
void Suspend(bool suspend);
/// Exceptional exit the OS.
/// Exceptional exit all processes.
void ExceptionalExit();
/// Notify emulated CPU cores to shut down.
void ShutdownCores();
bool IsMulticore() const;
bool IsShuttingDown() const;

View file

@ -2537,7 +2537,7 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
return ResultOutOfRange;
}
const auto* const current_process = system.Kernel().CurrentProcess();
auto* const current_process = system.Kernel().CurrentProcess();
const auto total_copy_size = out_thread_ids_size * sizeof(u64);
if (out_thread_ids_size > 0 &&

View file

@ -150,9 +150,9 @@ NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector
event.event->GetWritableEvent().Clear();
if (events_interface.failed[event_id]) {
{
auto lk = system.StallCPU();
auto lk = system.StallProcesses();
gpu.WaitFence(params.syncpt_id, target_value);
system.UnstallCPU();
system.UnstallProcesses();
}
std::memcpy(output.data(), &params, sizeof(params));
events_interface.failed[event_id] = false;