mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-12-18 00:12:03 +00:00
kern: SvcUnmapPhysicalMemory, cleanup thread pinning
This commit is contained in:
parent
cbecda2a27
commit
1b9acc4a6a
10 changed files with 248 additions and 24 deletions
|
@ -30,6 +30,7 @@ namespace ams::kern {
|
||||||
KInterruptTaskManager *interrupt_task_manager;
|
KInterruptTaskManager *interrupt_task_manager;
|
||||||
s32 core_id;
|
s32 core_id;
|
||||||
void *exception_stack_top;
|
void *exception_stack_top;
|
||||||
|
ams::svc::ThreadLocalRegion *tlr;
|
||||||
};
|
};
|
||||||
static_assert(std::is_standard_layout<KCurrentContext>::value && std::is_trivially_destructible<KCurrentContext>::value);
|
static_assert(std::is_standard_layout<KCurrentContext>::value && std::is_trivially_destructible<KCurrentContext>::value);
|
||||||
static_assert(sizeof(KCurrentContext) <= cpu::DataCacheLineSize);
|
static_assert(sizeof(KCurrentContext) <= cpu::DataCacheLineSize);
|
||||||
|
@ -80,6 +81,10 @@ namespace ams::kern {
|
||||||
return impl::GetCurrentContext().core_id;
|
return impl::GetCurrentContext().core_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE ams::svc::ThreadLocalRegion *GetCurrentThreadLocalRegion() {
|
||||||
|
return impl::GetCurrentContext().tlr;
|
||||||
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) {
|
ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) {
|
||||||
impl::GetCurrentContext().current_thread = new_thread;
|
impl::GetCurrentContext().current_thread = new_thread;
|
||||||
}
|
}
|
||||||
|
@ -88,4 +93,8 @@ namespace ams::kern {
|
||||||
impl::GetCurrentContext().current_process = new_process;
|
impl::GetCurrentContext().current_process = new_process;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void SetCurrentThreadLocalRegion(void *address) {
|
||||||
|
impl::GetCurrentContext().tlr = static_cast<ams::svc::ThreadLocalRegion *>(address);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -197,7 +197,7 @@ namespace ams::kern {
|
||||||
bool LeaveUserException();
|
bool LeaveUserException();
|
||||||
bool ReleaseUserException(KThread *thread);
|
bool ReleaseUserException(KThread *thread);
|
||||||
|
|
||||||
KThread *GetPreemptionStatePinnedThread(s32 core_id) const {
|
KThread *GetPinnedThread(s32 core_id) const {
|
||||||
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
|
||||||
return this->pinned_threads[core_id];
|
return this->pinned_threads[core_id];
|
||||||
}
|
}
|
||||||
|
@ -269,7 +269,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
Result SetActivity(ams::svc::ProcessActivity activity);
|
Result SetActivity(ams::svc::ProcessActivity activity);
|
||||||
|
|
||||||
void SetPreemptionState();
|
void PinCurrentThread();
|
||||||
|
|
||||||
Result SignalToAddress(KProcessAddress address) {
|
Result SignalToAddress(KProcessAddress address) {
|
||||||
return this->cond_var.SignalToAddress(address);
|
return this->cond_var.SignalToAddress(address);
|
||||||
|
|
|
@ -421,8 +421,9 @@ namespace ams::kern {
|
||||||
constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(this->sync_object_buffer.sync_objects[0]); }
|
constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(this->sync_object_buffer.sync_objects[0]); }
|
||||||
constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(this->sync_object_buffer.handles[sizeof(this->sync_object_buffer.sync_objects) / sizeof(ams::svc::Handle) - ams::svc::ArgumentHandleCountMax]); }
|
constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(this->sync_object_buffer.handles[sizeof(this->sync_object_buffer.sync_objects) / sizeof(ams::svc::Handle) - ams::svc::ArgumentHandleCountMax]); }
|
||||||
|
|
||||||
constexpr u16 GetUserPreemptionState() const { return *GetPointer<u16>(this->tls_address + 0x100); }
|
u16 GetUserDisableCount() const { return static_cast<ams::svc::ThreadLocalRegion *>(this->tls_heap_address)->disable_count; }
|
||||||
constexpr void SetKernelPreemptionState(u16 state) const { *GetPointer<u16>(this->tls_address + 0x100 + sizeof(u16)) = state; }
|
void SetInterruptFlag() const { static_cast<ams::svc::ThreadLocalRegion *>(this->tls_heap_address)->interrupt_flag = 1; }
|
||||||
|
void ClearInterruptFlag() const { static_cast<ams::svc::ThreadLocalRegion *>(this->tls_heap_address)->interrupt_flag = 0; }
|
||||||
|
|
||||||
constexpr void SetDebugAttached() { this->debug_attached = true; }
|
constexpr void SetDebugAttached() { this->debug_attached = true; }
|
||||||
constexpr bool IsAttachedToDebugger() const { return this->debug_attached; }
|
constexpr bool IsAttachedToDebugger() const { return this->debug_attached; }
|
||||||
|
|
|
@ -145,15 +145,15 @@ namespace ams::kern::arch::arm64 {
|
||||||
{
|
{
|
||||||
const bool is_user_mode = (context->psr & 0xF) == 0;
|
const bool is_user_mode = (context->psr & 0xF) == 0;
|
||||||
if (is_user_mode) {
|
if (is_user_mode) {
|
||||||
/* Handle any changes needed to the user preemption state. */
|
/* If the user disable count is set, we may need to pin the current thread. */
|
||||||
if (GetCurrentThread().GetUserPreemptionState() != 0 && GetCurrentProcess().GetPreemptionStatePinnedThread(GetCurrentCoreId()) == nullptr) {
|
if (GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) {
|
||||||
KScopedSchedulerLock lk;
|
KScopedSchedulerLock lk;
|
||||||
|
|
||||||
/* Note the preemption state in process. */
|
/* Pin the current thread. */
|
||||||
GetCurrentProcess().SetPreemptionState();
|
GetCurrentProcess().PinCurrentThread();
|
||||||
|
|
||||||
/* Set the kernel preemption state flag. */
|
/* Set the interrupt flag for the thread. */
|
||||||
GetCurrentThread().SetKernelPreemptionState(1);
|
GetCurrentThread().SetInterruptFlag();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable interrupts while we process the usermode exception. */
|
/* Enable interrupts while we process the usermode exception. */
|
||||||
|
|
|
@ -31,6 +31,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
|
||||||
mrs x9, elr_el1
|
mrs x9, elr_el1
|
||||||
mrs x10, spsr_el1
|
mrs x10, spsr_el1
|
||||||
mrs x11, tpidr_el0
|
mrs x11, tpidr_el0
|
||||||
|
mrs x18, tpidr_el1
|
||||||
|
|
||||||
/* Save callee-saved registers. */
|
/* Save callee-saved registers. */
|
||||||
stp x19, x20, [sp, #(8 * 19)]
|
stp x19, x20, [sp, #(8 * 19)]
|
||||||
|
@ -63,8 +64,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
|
||||||
tst x10, #1
|
tst x10, #1
|
||||||
b.eq 3f
|
b.eq 3f
|
||||||
|
|
||||||
/* Check if our preemption state allows us to call SVCs. */
|
/* Check if our disable count allows us to call SVCs. */
|
||||||
mrs x10, tpidrro_el0
|
ldr x10, [x18, #0x30]
|
||||||
ldrh w10, [x10, #0x100]
|
ldrh w10, [x10, #0x100]
|
||||||
cbz w10, 1f
|
cbz w10, 1f
|
||||||
|
|
||||||
|
@ -83,7 +84,6 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
|
||||||
strb w8, [sp, #(0x120 + 0x11)]
|
strb w8, [sp, #(0x120 + 0x11)]
|
||||||
|
|
||||||
/* Invoke the SVC handler. */
|
/* Invoke the SVC handler. */
|
||||||
mrs x18, tpidr_el1
|
|
||||||
msr daifclr, #2
|
msr daifclr, #2
|
||||||
blr x11
|
blr x11
|
||||||
msr daifset, #2
|
msr daifset, #2
|
||||||
|
@ -211,6 +211,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
|
||||||
mrs x17, elr_el1
|
mrs x17, elr_el1
|
||||||
mrs x20, spsr_el1
|
mrs x20, spsr_el1
|
||||||
mrs x19, tpidr_el0
|
mrs x19, tpidr_el0
|
||||||
|
mrs x18, tpidr_el1
|
||||||
stp x17, x20, [sp, #(8 * 32)]
|
stp x17, x20, [sp, #(8 * 32)]
|
||||||
str x19, [sp, #(8 * 34)]
|
str x19, [sp, #(8 * 34)]
|
||||||
|
|
||||||
|
@ -239,8 +240,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
|
||||||
tst x17, #1
|
tst x17, #1
|
||||||
b.eq 3f
|
b.eq 3f
|
||||||
|
|
||||||
/* Check if our preemption state allows us to call SVCs. */
|
/* Check if our disable count allows us to call SVCs. */
|
||||||
mrs x15, tpidrro_el0
|
ldr x15, [x18, #0x30]
|
||||||
ldrh w15, [x15, #0x100]
|
ldrh w15, [x15, #0x100]
|
||||||
cbz w15, 1f
|
cbz w15, 1f
|
||||||
|
|
||||||
|
@ -259,7 +260,6 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
|
||||||
strb w16, [sp, #(0x120 + 0x11)]
|
strb w16, [sp, #(0x120 + 0x11)]
|
||||||
|
|
||||||
/* Invoke the SVC handler. */
|
/* Invoke the SVC handler. */
|
||||||
mrs x18, tpidr_el1
|
|
||||||
msr daifclr, #2
|
msr daifclr, #2
|
||||||
blr x19
|
blr x19
|
||||||
msr daifset, #2
|
msr daifset, #2
|
||||||
|
|
|
@ -3352,7 +3352,217 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||||
MESOSPHERE_UNIMPLEMENTED();
|
/* Lock the physical memory lock. */
|
||||||
|
KScopedLightLock phys_lk(this->map_physical_memory_lock);
|
||||||
|
|
||||||
|
/* Lock the table. */
|
||||||
|
KScopedLightLock lk(this->general_lock);
|
||||||
|
|
||||||
|
/* Calculate the last address for convenience. */
|
||||||
|
const KProcessAddress last_address = address + size - 1;
|
||||||
|
|
||||||
|
/* Define iteration variables. */
|
||||||
|
KProcessAddress cur_address;
|
||||||
|
size_t mapped_size;
|
||||||
|
|
||||||
|
/* Check if the memory is mapped. */
|
||||||
|
{
|
||||||
|
/* Iterate over the memory. */
|
||||||
|
cur_address = address;
|
||||||
|
mapped_size = 0;
|
||||||
|
|
||||||
|
auto it = this->memory_block_manager.FindIterator(cur_address);
|
||||||
|
while (true) {
|
||||||
|
/* Check that the iterator is valid. */
|
||||||
|
MESOSPHERE_ASSERT(it != this->memory_block_manager.end());
|
||||||
|
|
||||||
|
/* Get the memory info. */
|
||||||
|
const KMemoryInfo info = it->GetMemoryInfo();
|
||||||
|
|
||||||
|
/* Verify the memory's state. */
|
||||||
|
const bool is_normal = info.GetState() == KMemoryState_Normal && info.GetAttribute() == 0;
|
||||||
|
const bool is_free = info.GetState() == KMemoryState_Free;
|
||||||
|
R_UNLESS(is_normal || is_free, svc::ResultInvalidCurrentMemory());
|
||||||
|
|
||||||
|
/* Check if we're done. */
|
||||||
|
if (last_address <= info.GetLastAddress()) {
|
||||||
|
if (is_normal) {
|
||||||
|
mapped_size += (last_address + 1 - cur_address);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Track the memory if it's mapped. */
|
||||||
|
if (is_normal) {
|
||||||
|
mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Advance. */
|
||||||
|
cur_address = info.GetEndAddress();
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If there's nothing mapped, we've nothing to do. */
|
||||||
|
R_SUCCEED_IF(mapped_size == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Make a page group for the unmap region. */
|
||||||
|
KPageGroup pg(this->block_info_manager);
|
||||||
|
{
|
||||||
|
auto &impl = this->GetImpl();
|
||||||
|
|
||||||
|
/* Begin traversal. */
|
||||||
|
TraversalContext context;
|
||||||
|
TraversalEntry cur_entry = {};
|
||||||
|
bool cur_valid = false;
|
||||||
|
TraversalEntry next_entry;
|
||||||
|
bool next_valid;
|
||||||
|
size_t tot_size = 0;
|
||||||
|
|
||||||
|
next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_address);
|
||||||
|
next_entry.block_size = (next_entry.block_size - (GetInteger(next_entry.phys_addr) & (next_entry.block_size - 1)));
|
||||||
|
|
||||||
|
/* Iterate, building the group. */
|
||||||
|
while (true) {
|
||||||
|
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
|
||||||
|
cur_entry.block_size += next_entry.block_size;
|
||||||
|
} else {
|
||||||
|
if (cur_valid) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
||||||
|
R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update tracking variables. */
|
||||||
|
tot_size += cur_entry.block_size;
|
||||||
|
cur_entry = next_entry;
|
||||||
|
cur_valid = next_valid;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur_entry.block_size + tot_size >= size) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add the last block. */
|
||||||
|
if (cur_valid) {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
||||||
|
R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), (size - tot_size) / PageSize));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MESOSPHERE_ASSERT(pg.GetNumPages() == mapped_size / PageSize);
|
||||||
|
|
||||||
|
/* Create an update allocator. */
|
||||||
|
KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager);
|
||||||
|
R_TRY(allocator.GetResult());
|
||||||
|
|
||||||
|
/* We're going to perform an update, so create a helper. */
|
||||||
|
KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
|
/* Open a reference to the pages, we're unmapping, and close the reference when we're done. */
|
||||||
|
pg.Open();
|
||||||
|
ON_SCOPE_EXIT { pg.Close(); };
|
||||||
|
|
||||||
|
/* Reset the current tracking address, and make sure we clean up on failure. */
|
||||||
|
cur_address = address;
|
||||||
|
auto remap_guard = SCOPE_GUARD {
|
||||||
|
if (cur_address > address) {
|
||||||
|
const KProcessAddress last_map_address = cur_address - 1;
|
||||||
|
cur_address = address;
|
||||||
|
|
||||||
|
/* Iterate over the memory we unmapped. */
|
||||||
|
auto it = this->memory_block_manager.FindIterator(cur_address);
|
||||||
|
auto pg_it = pg.begin();
|
||||||
|
KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
|
||||||
|
size_t pg_pages = pg_it->GetNumPages();
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
/* Get the memory info for the pages we unmapped, convert to property. */
|
||||||
|
const KMemoryInfo info = it->GetMemoryInfo();
|
||||||
|
const KPageProperties prev_properties = { info.GetPermission(), false, false, false };
|
||||||
|
|
||||||
|
/* If the memory is normal, we unmapped it and need to re-map it. */
|
||||||
|
if (info.GetState() == KMemoryState_Normal) {
|
||||||
|
/* Determine the range to map. */
|
||||||
|
size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_map_address + 1 - cur_address) / PageSize;
|
||||||
|
|
||||||
|
/* While we have pages to map, map them. */
|
||||||
|
while (map_pages > 0) {
|
||||||
|
/* Check if we're at the end of the physical block. */
|
||||||
|
if (pg_pages == 0) {
|
||||||
|
/* Ensure there are more pages to map. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(pg_it != pg.end());
|
||||||
|
|
||||||
|
/* Advance our physical block. */
|
||||||
|
++pg_it;
|
||||||
|
pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress());
|
||||||
|
pg_pages = pg_it->GetNumPages();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Map whatever we can. */
|
||||||
|
const size_t cur_pages = std::min(pg_pages, map_pages);
|
||||||
|
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, pg_phys_addr, true, prev_properties, OperationType_Map, true));
|
||||||
|
|
||||||
|
/* Advance. */
|
||||||
|
cur_address += cur_pages * PageSize;
|
||||||
|
map_pages -= cur_pages;
|
||||||
|
|
||||||
|
pg_phys_addr += cur_pages * PageSize;
|
||||||
|
pg_pages -= cur_pages;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check if we're done. */
|
||||||
|
if (last_map_address <= info.GetLastAddress()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Advance. */
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Iterate over the memory, unmapping as we go. */
|
||||||
|
auto it = this->memory_block_manager.FindIterator(cur_address);
|
||||||
|
while (true) {
|
||||||
|
/* Check that the iterator is valid. */
|
||||||
|
MESOSPHERE_ASSERT(it != this->memory_block_manager.end());
|
||||||
|
|
||||||
|
/* Get the memory info. */
|
||||||
|
const KMemoryInfo info = it->GetMemoryInfo();
|
||||||
|
|
||||||
|
/* If the memory state is normal, we need to unmap it. */
|
||||||
|
if (info.GetState() == KMemoryState_Normal) {
|
||||||
|
/* Determine the range to unmap. */
|
||||||
|
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false };
|
||||||
|
const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
|
||||||
|
|
||||||
|
/* Unmap. */
|
||||||
|
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check if we're done. */
|
||||||
|
if (last_address <= info.GetLastAddress()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Advance. */
|
||||||
|
cur_address = info.GetEndAddress();
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Release the memory resource. */
|
||||||
|
this->mapped_physical_memory_size -= mapped_size;
|
||||||
|
GetCurrentProcess().ReleaseResource(ams::svc::LimitableResource_PhysicalMemoryMax, mapped_size);
|
||||||
|
|
||||||
|
/* Update memory blocks. */
|
||||||
|
this->memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
|
||||||
|
|
||||||
|
/* We succeeded. */
|
||||||
|
remap_guard.Cancel();
|
||||||
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||||
|
|
|
@ -977,7 +977,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::SetPreemptionState() {
|
void KProcess::PinCurrentThread() {
|
||||||
MESOSPHERE_UNIMPLEMENTED();
|
MESOSPHERE_UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -118,11 +118,11 @@ namespace ams::kern {
|
||||||
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
|
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
|
||||||
KThread *top_thread = priority_queue.GetScheduledFront(core_id);
|
KThread *top_thread = priority_queue.GetScheduledFront(core_id);
|
||||||
if (top_thread != nullptr) {
|
if (top_thread != nullptr) {
|
||||||
/* If the thread has no waiters, we need to check if the process has a thread pinned by PreemptionState. */
|
/* If the thread has no waiters, we need to check if the process has a thread pinned. */
|
||||||
if (top_thread->GetNumKernelWaiters() == 0) {
|
if (top_thread->GetNumKernelWaiters() == 0) {
|
||||||
if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) {
|
if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) {
|
||||||
if (KThread *suggested = parent->GetPreemptionStatePinnedThread(core_id); suggested != nullptr && suggested != top_thread) {
|
if (KThread *suggested = parent->GetPinnedThread(core_id); suggested != nullptr && suggested != top_thread && suggested->GetNumKernelWaiters() == 0) {
|
||||||
/* We prefer our parent's pinned thread possible. However, we also don't want to schedule un-runnable threads. */
|
/* We prefer our parent's pinned thread if possible. However, we also don't want to schedule un-runnable threads. */
|
||||||
if (suggested->GetRawState() == KThread::ThreadState_Runnable) {
|
if (suggested->GetRawState() == KThread::ThreadState_Runnable) {
|
||||||
top_thread = suggested;
|
top_thread = suggested;
|
||||||
} else {
|
} else {
|
||||||
|
@ -261,6 +261,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Set the new Thread Local region. */
|
/* Set the new Thread Local region. */
|
||||||
cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
|
cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
|
||||||
|
SetCurrentThreadLocalRegion(next_thread->GetThreadLocalRegionHeapAddress());
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::ClearPreviousThread(KThread *thread) {
|
void KScheduler::ClearPreviousThread(KThread *thread) {
|
||||||
|
|
|
@ -327,7 +327,7 @@ namespace ams::kern {
|
||||||
/* Release user exception, if relevant. */
|
/* Release user exception, if relevant. */
|
||||||
if (this->parent != nullptr) {
|
if (this->parent != nullptr) {
|
||||||
this->parent->ReleaseUserException(this);
|
this->parent->ReleaseUserException(this);
|
||||||
if (this->parent->GetPreemptionStatePinnedThread(GetCurrentCoreId()) == this) {
|
if (this->parent->GetPinnedThread(GetCurrentCoreId()) == this) {
|
||||||
/* TODO: this->parent->UnpinCurrentThread(); */
|
/* TODO: this->parent->UnpinCurrentThread(); */
|
||||||
MESOSPHERE_UNIMPLEMENTED();
|
MESOSPHERE_UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ namespace ams::kern::svc {
|
||||||
|
|
||||||
void Break(ams::svc::BreakReason break_reason, uintptr_t address, size_t size) {
|
void Break(ams::svc::BreakReason break_reason, uintptr_t address, size_t size) {
|
||||||
/* Log for debug that Break was called. */
|
/* Log for debug that Break was called. */
|
||||||
MESOSPHERE_LOG("%s: Break(%08x)\n", GetCurrentProcess().GetName(), static_cast<u32>(break_reason));
|
MESOSPHERE_LOG("%s: Break(%08x, %016lx, %zu)\n", GetCurrentProcess().GetName(), static_cast<u32>(break_reason), address, size);
|
||||||
|
|
||||||
/* If the current process is attached to debugger, notify it. */
|
/* If the current process is attached to debugger, notify it. */
|
||||||
if (GetCurrentProcess().IsAttachedToDebugger()) {
|
if (GetCurrentProcess().IsAttachedToDebugger()) {
|
||||||
|
@ -36,7 +36,10 @@ namespace ams::kern::svc {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO */
|
/* TODO */
|
||||||
MESOSPHERE_UNIMPLEMENTED();
|
if (size == sizeof(u32)) {
|
||||||
|
MESOSPHERE_LOG("DEBUG: %08x\n", *reinterpret_cast<u32 *>(address));
|
||||||
|
}
|
||||||
|
MESOSPHERE_PANIC("Break was called\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue