mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-11-08 05:01:44 +00:00
kern: support dynamic resource expansion for system heaps/events/sessions.
This commit is contained in:
parent
dfd57b09a3
commit
f8fd072349
37 changed files with 856 additions and 328 deletions
|
@ -33,14 +33,15 @@ namespace ams::secmon::smc {
|
||||||
using PhysicalMemorySize = util::BitPack32::Field<16, 2>;
|
using PhysicalMemorySize = util::BitPack32::Field<16, 2>;
|
||||||
|
|
||||||
/* Kernel view, from libmesosphere. */
|
/* Kernel view, from libmesosphere. */
|
||||||
using DebugFillMemory = util::BitPack32::Field<0, 1, bool>;
|
using DebugFillMemory = util::BitPack32::Field<0, 1, bool>;
|
||||||
using EnableUserExceptionHandlers = util::BitPack32::Field<DebugFillMemory::Next, 1, bool>;
|
using EnableUserExceptionHandlers = util::BitPack32::Field<DebugFillMemory::Next, 1, bool>;
|
||||||
using EnableUserPmuAccess = util::BitPack32::Field<EnableUserExceptionHandlers::Next, 1, bool>;
|
using EnableUserPmuAccess = util::BitPack32::Field<EnableUserExceptionHandlers::Next, 1, bool>;
|
||||||
using IncreaseThreadResourceLimit = util::BitPack32::Field<EnableUserPmuAccess::Next, 1, bool>;
|
using IncreaseThreadResourceLimit = util::BitPack32::Field<EnableUserPmuAccess::Next, 1, bool>;
|
||||||
using Reserved4 = util::BitPack32::Field<IncreaseThreadResourceLimit::Next, 4, u32>;
|
using DisableDynamicResourceLimits = util::BitPack32::Field<IncreaseThreadResourceLimit::Next, 1, bool>;
|
||||||
using UseSecureMonitorPanicCall = util::BitPack32::Field<Reserved4::Next, 1, bool>;
|
using Reserved5 = util::BitPack32::Field<DisableDynamicResourceLimits::Next, 3, u32>;
|
||||||
using Reserved9 = util::BitPack32::Field<UseSecureMonitorPanicCall::Next, 7, u32>;
|
using UseSecureMonitorPanicCall = util::BitPack32::Field<Reserved5::Next, 1, bool>;
|
||||||
using MemorySize = util::BitPack32::Field<Reserved9::Next, 2, u32>; /* smc::MemorySize = pkg1::MemorySize */
|
using Reserved9 = util::BitPack32::Field<UseSecureMonitorPanicCall::Next, 7, u32>;
|
||||||
|
using MemorySize = util::BitPack32::Field<Reserved9::Next, 2, u32>; /* smc::MemorySize = pkg1::MemorySize */
|
||||||
};
|
};
|
||||||
|
|
||||||
constexpr const pkg1::MemorySize DramIdToMemorySize[fuse::DramId_Count] = {
|
constexpr const pkg1::MemorySize DramIdToMemorySize[fuse::DramId_Count] = {
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <vapours.hpp>
|
#include <vapours.hpp>
|
||||||
#include <mesosphere/kern_select_cpu.hpp>
|
#include <mesosphere/kern_select_cpu.hpp>
|
||||||
|
#include <mesosphere/kern_select_interrupt_manager.hpp>
|
||||||
|
|
||||||
namespace ams::kern::arch::arm64 {
|
namespace ams::kern::arch::arm64 {
|
||||||
|
|
||||||
|
@ -24,6 +25,32 @@ namespace ams::kern::arch::arm64 {
|
||||||
{ t.next } -> std::convertible_to<T *>;
|
{ t.next } -> std::convertible_to<T *>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
ALWAYS_INLINE bool IsSlabAtomicValid() {
|
||||||
|
/* Without careful consideration, slab heaps atomics are vulnerable to */
|
||||||
|
/* the ABA problem, when doing compare and swap of node pointers. */
|
||||||
|
/* We resolve this by using the ARM exclusive monitor; we bundle the */
|
||||||
|
/* load and store of the relevant values into a single exclusive monitor */
|
||||||
|
/* hold, preventing the ABA problem. */
|
||||||
|
/* However, our assembly must do both a load and a store under a single */
|
||||||
|
/* hold, at different memory addresses. Considering the case where the */
|
||||||
|
/* addresses are distinct but resolve to the same cache set (by chance), */
|
||||||
|
/* we can note that under a 1-way associative (direct-mapped) cache */
|
||||||
|
/* we would have as a guarantee that the second access would evict the */
|
||||||
|
/* cache line from the first access, invalidating our exclusive monitor */
|
||||||
|
/* hold. Thus, we require that the cache is not 1-way associative, for */
|
||||||
|
/* our implementation to be correct. */
|
||||||
|
{
|
||||||
|
/* Disable interrupts. */
|
||||||
|
KScopedInterruptDisable di;
|
||||||
|
|
||||||
|
/* Select L1 cache. */
|
||||||
|
cpu::SetCsselrEl1(0);
|
||||||
|
|
||||||
|
/* Check that the L1 cache is not direct-mapped. */
|
||||||
|
return cpu::CacheSizeIdRegisterAccessor().GetAssociativity() != 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template<typename T> requires SlabHeapNode<T>
|
template<typename T> requires SlabHeapNode<T>
|
||||||
ALWAYS_INLINE T *AllocateFromSlabAtomic(T **head) {
|
ALWAYS_INLINE T *AllocateFromSlabAtomic(T **head) {
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
@ -36,10 +63,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
" ldr %[next], [%[node]]\n"
|
" ldr %[next], [%[node]]\n"
|
||||||
" stlxr %w[tmp], %[next], [%[head]]\n"
|
" stlxr %w[tmp], %[next], [%[head]]\n"
|
||||||
" cbnz %w[tmp], 1b\n"
|
" cbnz %w[tmp], 1b\n"
|
||||||
" b 3f\n"
|
|
||||||
"2:\n"
|
"2:\n"
|
||||||
" clrex\n"
|
|
||||||
"3:\n"
|
|
||||||
: [tmp]"=&r"(tmp), [node]"=&r"(node), [next]"=&r"(next), [head]"+&r"(head)
|
: [tmp]"=&r"(tmp), [node]"=&r"(node), [next]"=&r"(next), [head]"+&r"(head)
|
||||||
:
|
:
|
||||||
: "cc", "memory"
|
: "cc", "memory"
|
||||||
|
@ -59,7 +83,6 @@ namespace ams::kern::arch::arm64 {
|
||||||
" str %[next], [%[node]]\n"
|
" str %[next], [%[node]]\n"
|
||||||
" stlxr %w[tmp], %[node], [%[head]]\n"
|
" stlxr %w[tmp], %[node], [%[head]]\n"
|
||||||
" cbnz %w[tmp], 1b\n"
|
" cbnz %w[tmp], 1b\n"
|
||||||
"2:\n"
|
|
||||||
: [tmp]"=&r"(tmp), [node]"+&r"(node), [next]"=&r"(next), [head]"+&r"(head)
|
: [tmp]"=&r"(tmp), [node]"+&r"(node), [next]"=&r"(next), [head]"+&r"(head)
|
||||||
:
|
:
|
||||||
: "cc", "memory"
|
: "cc", "memory"
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_k_dynamic_slab_heap.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
template<typename T, bool ClearNode = false>
|
||||||
|
class KDynamicResourceManager {
|
||||||
|
NON_COPYABLE(KDynamicResourceManager);
|
||||||
|
NON_MOVEABLE(KDynamicResourceManager);
|
||||||
|
public:
|
||||||
|
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
|
||||||
|
private:
|
||||||
|
KDynamicPageManager *m_page_allocator{};
|
||||||
|
DynamicSlabType *m_slab_heap{};
|
||||||
|
public:
|
||||||
|
constexpr KDynamicResourceManager() = default;
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return m_slab_heap->GetAddress(); }
|
||||||
|
constexpr ALWAYS_INLINE size_t GetSize() const { return m_slab_heap->GetSize(); }
|
||||||
|
constexpr ALWAYS_INLINE size_t GetUsed() const { return m_slab_heap->GetUsed(); }
|
||||||
|
constexpr ALWAYS_INLINE size_t GetPeak() const { return m_slab_heap->GetPeak(); }
|
||||||
|
constexpr ALWAYS_INLINE size_t GetCount() const { return m_slab_heap->GetCount(); }
|
||||||
|
|
||||||
|
ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, DynamicSlabType *slab_heap) {
|
||||||
|
m_page_allocator = page_allocator;
|
||||||
|
m_slab_heap = slab_heap;
|
||||||
|
}
|
||||||
|
|
||||||
|
T *Allocate() const {
|
||||||
|
return m_slab_heap->Allocate(m_page_allocator);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Free(T *t) const {
|
||||||
|
m_slab_heap->Free(t);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo>{};
|
||||||
|
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock>{};
|
||||||
|
|
||||||
|
using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType;
|
||||||
|
using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
|
||||||
|
|
||||||
|
}
|
|
@ -23,95 +23,71 @@
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
template<typename T, bool ClearNode = false>
|
template<typename T, bool ClearNode = false>
|
||||||
class KDynamicSlabHeap {
|
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
|
||||||
NON_COPYABLE(KDynamicSlabHeap);
|
NON_COPYABLE(KDynamicSlabHeap);
|
||||||
NON_MOVEABLE(KDynamicSlabHeap);
|
NON_MOVEABLE(KDynamicSlabHeap);
|
||||||
private:
|
private:
|
||||||
using Impl = impl::KSlabHeapImpl;
|
|
||||||
using PageBuffer = KDynamicPageManager::PageBuffer;
|
using PageBuffer = KDynamicPageManager::PageBuffer;
|
||||||
private:
|
private:
|
||||||
Impl m_impl;
|
std::atomic<size_t> m_used{};
|
||||||
KDynamicPageManager *m_page_allocator;
|
std::atomic<size_t> m_peak{};
|
||||||
std::atomic<size_t> m_used;
|
std::atomic<size_t> m_count{};
|
||||||
std::atomic<size_t> m_peak;
|
KVirtualAddress m_address{};
|
||||||
std::atomic<size_t> m_count;
|
size_t m_size{};
|
||||||
KVirtualAddress m_address;
|
|
||||||
size_t m_size;
|
|
||||||
private:
|
|
||||||
ALWAYS_INLINE Impl *GetImpl() {
|
|
||||||
return std::addressof(m_impl);
|
|
||||||
}
|
|
||||||
ALWAYS_INLINE const Impl *GetImpl() const {
|
|
||||||
return std::addressof(m_impl);
|
|
||||||
}
|
|
||||||
public:
|
public:
|
||||||
constexpr KDynamicSlabHeap() : m_impl(), m_page_allocator(), m_used(), m_peak(), m_count(), m_address(), m_size() { /* ... */ }
|
constexpr KDynamicSlabHeap() = default;
|
||||||
|
|
||||||
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return m_address; }
|
||||||
constexpr size_t GetSize() const { return m_size; }
|
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
|
||||||
constexpr size_t GetUsed() const { return m_used.load(); }
|
constexpr ALWAYS_INLINE size_t GetUsed() const { return m_used.load(); }
|
||||||
constexpr size_t GetPeak() const { return m_peak.load(); }
|
constexpr ALWAYS_INLINE size_t GetPeak() const { return m_peak.load(); }
|
||||||
constexpr size_t GetCount() const { return m_count.load(); }
|
constexpr ALWAYS_INLINE size_t GetCount() const { return m_count.load(); }
|
||||||
|
|
||||||
constexpr bool IsInRange(KVirtualAddress addr) const {
|
constexpr ALWAYS_INLINE bool IsInRange(KVirtualAddress addr) const {
|
||||||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Initialize(KVirtualAddress memory, size_t sz) {
|
ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, size_t num_objects) {
|
||||||
/* Set tracking fields. */
|
|
||||||
m_address = memory;
|
|
||||||
m_count = sz / sizeof(T);
|
|
||||||
m_size = m_count * sizeof(T);
|
|
||||||
|
|
||||||
/* Free blocks to memory. */
|
|
||||||
u8 *cur = GetPointer<u8>(m_address + m_size);
|
|
||||||
for (size_t i = 0; i < sz / sizeof(T); i++) {
|
|
||||||
cur -= sizeof(T);
|
|
||||||
this->GetImpl()->Free(cur);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Initialize(KDynamicPageManager *page_allocator) {
|
|
||||||
m_page_allocator = page_allocator;
|
|
||||||
m_address = m_page_allocator->GetAddress();
|
|
||||||
m_size = m_page_allocator->GetSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
void Initialize(KDynamicPageManager *page_allocator, size_t num_objects) {
|
|
||||||
MESOSPHERE_ASSERT(page_allocator != nullptr);
|
MESOSPHERE_ASSERT(page_allocator != nullptr);
|
||||||
|
|
||||||
/* Initialize members. */
|
/* Initialize members. */
|
||||||
this->Initialize(page_allocator);
|
m_address = page_allocator->GetAddress();
|
||||||
|
m_size = page_allocator->GetSize();
|
||||||
|
|
||||||
|
/* Initialize the base allocator. */
|
||||||
|
KSlabHeapImpl::Initialize();
|
||||||
|
|
||||||
/* Allocate until we have the correct number of objects. */
|
/* Allocate until we have the correct number of objects. */
|
||||||
while (m_count.load() < num_objects) {
|
while (m_count.load() < num_objects) {
|
||||||
auto *allocated = reinterpret_cast<T *>(m_page_allocator->Allocate());
|
auto *allocated = reinterpret_cast<T *>(page_allocator->Allocate());
|
||||||
MESOSPHERE_ABORT_UNLESS(allocated != nullptr);
|
MESOSPHERE_ABORT_UNLESS(allocated != nullptr);
|
||||||
|
|
||||||
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||||
this->GetImpl()->Free(allocated + i);
|
KSlabHeapImpl::Free(allocated + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T));
|
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
T *Allocate() {
|
ALWAYS_INLINE T *Allocate(KDynamicPageManager *page_allocator) {
|
||||||
T *allocated = reinterpret_cast<T *>(this->GetImpl()->Allocate());
|
T *allocated = static_cast<T *>(KSlabHeapImpl::Allocate());
|
||||||
|
|
||||||
/* If we successfully allocated and we should clear the node, do so. */
|
/* If we successfully allocated and we should clear the node, do so. */
|
||||||
if constexpr (ClearNode) {
|
if constexpr (ClearNode) {
|
||||||
if (AMS_LIKELY(allocated != nullptr)) {
|
if (AMS_LIKELY(allocated != nullptr)) {
|
||||||
reinterpret_cast<Impl::Node *>(allocated)->next = nullptr;
|
reinterpret_cast<KSlabHeapImpl::Node *>(allocated)->next = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we fail to allocate, try to get a new page from our next allocator. */
|
/* If we fail to allocate, try to get a new page from our next allocator. */
|
||||||
if (AMS_UNLIKELY(allocated == nullptr)) {
|
if (AMS_UNLIKELY(allocated == nullptr) ) {
|
||||||
if (m_page_allocator != nullptr) {
|
if (page_allocator != nullptr) {
|
||||||
allocated = reinterpret_cast<T *>(m_page_allocator->Allocate());
|
allocated = reinterpret_cast<T *>(page_allocator->Allocate());
|
||||||
if (allocated != nullptr) {
|
if (allocated != nullptr) {
|
||||||
/* If we succeeded in getting a page, free the rest to our slab. */
|
/* If we succeeded in getting a page, free the rest to our slab. */
|
||||||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||||
this->GetImpl()->Free(allocated + i);
|
KSlabHeapImpl::Free(allocated + i);
|
||||||
}
|
}
|
||||||
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T));
|
m_count.fetch_add(sizeof(PageBuffer) / sizeof(T));
|
||||||
}
|
}
|
||||||
|
@ -135,13 +111,10 @@ namespace ams::kern {
|
||||||
return allocated;
|
return allocated;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(T *t) {
|
ALWAYS_INLINE void Free(T *t) {
|
||||||
this->GetImpl()->Free(t);
|
KSlabHeapImpl::Free(t);
|
||||||
m_used.fetch_sub(1);
|
m_used.fetch_sub(1);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class KBlockInfoManager : public KDynamicSlabHeap<KBlockInfo>{};
|
|
||||||
class KMemoryBlockSlabManager : public KDynamicSlabHeap<KMemoryBlock>{};
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> {
|
class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList, true> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
|
||||||
private:
|
private:
|
||||||
KReadableEvent m_readable_event;
|
KReadableEvent m_readable_event;
|
||||||
|
|
|
@ -25,7 +25,7 @@ namespace ams::kern {
|
||||||
class KClientPort;
|
class KClientPort;
|
||||||
class KProcess;
|
class KProcess;
|
||||||
|
|
||||||
class KLightSession final : public KAutoObjectWithSlabHeapAndContainer<KLightSession, KAutoObjectWithList> {
|
class KLightSession final : public KAutoObjectWithSlabHeapAndContainer<KLightSession, KAutoObjectWithList, true> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KLightSession, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KLightSession, KAutoObject);
|
||||||
private:
|
private:
|
||||||
enum class State : u8 {
|
enum class State : u8 {
|
||||||
|
|
|
@ -144,6 +144,7 @@ namespace ams::kern {
|
||||||
static NOINLINE const KMemoryRegion &GetPageTableHeapRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap)); }
|
static NOINLINE const KMemoryRegion &GetPageTableHeapRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap)); }
|
||||||
static NOINLINE const KMemoryRegion &GetKernelStackRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack)); }
|
static NOINLINE const KMemoryRegion &GetKernelStackRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack)); }
|
||||||
static NOINLINE const KMemoryRegion &GetTempRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp)); }
|
static NOINLINE const KMemoryRegion &GetTempRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp)); }
|
||||||
|
static NOINLINE const KMemoryRegion &GetSlabRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)); }
|
||||||
|
|
||||||
static NOINLINE const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
|
static NOINLINE const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
|
||||||
|
|
||||||
|
|
|
@ -15,58 +15,27 @@
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
#include <mesosphere/kern_slab_helpers.hpp>
|
#include <mesosphere/kern_k_page_table_slab_heap.hpp>
|
||||||
#include <mesosphere/kern_k_dynamic_slab_heap.hpp>
|
#include <mesosphere/kern_k_dynamic_resource_manager.hpp>
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
namespace impl {
|
class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> {
|
||||||
|
|
||||||
class PageTablePage {
|
|
||||||
private:
|
|
||||||
u8 m_buffer[PageSize];
|
|
||||||
public:
|
|
||||||
ALWAYS_INLINE PageTablePage() { /* Do not initialize anything. */ }
|
|
||||||
};
|
|
||||||
static_assert(sizeof(PageTablePage) == PageSize);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
class KPageTableManager : public KDynamicSlabHeap<impl::PageTablePage, true> {
|
|
||||||
public:
|
public:
|
||||||
using RefCount = u16;
|
using RefCount = KPageTableSlabHeap::RefCount;
|
||||||
static constexpr size_t PageTableSize = sizeof(impl::PageTablePage);
|
static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize;
|
||||||
static_assert(PageTableSize == PageSize);
|
|
||||||
private:
|
private:
|
||||||
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>;
|
using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>;
|
||||||
private:
|
private:
|
||||||
RefCount *m_ref_counts;
|
KPageTableSlabHeap *m_pt_heap{};
|
||||||
public:
|
public:
|
||||||
static constexpr size_t CalculateReferenceCountSize(size_t size) {
|
constexpr KPageTableManager() = default;
|
||||||
return (size / PageSize) * sizeof(RefCount);
|
|
||||||
}
|
|
||||||
public:
|
|
||||||
constexpr KPageTableManager() : BaseHeap(), m_ref_counts() { /* ... */ }
|
|
||||||
private:
|
|
||||||
void Initialize(RefCount *rc) {
|
|
||||||
m_ref_counts = rc;
|
|
||||||
for (size_t i = 0; i < this->GetSize() / PageSize; i++) {
|
|
||||||
m_ref_counts[i] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr RefCount *GetRefCountPointer(KVirtualAddress addr) const {
|
ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, KPageTableSlabHeap *pt_heap) {
|
||||||
return std::addressof(m_ref_counts[(addr - this->GetAddress()) / PageSize]);
|
m_pt_heap = pt_heap;
|
||||||
}
|
|
||||||
public:
|
|
||||||
void Initialize(KDynamicPageManager *page_allocator, RefCount *rc) {
|
|
||||||
BaseHeap::Initialize(page_allocator);
|
|
||||||
this->Initialize(rc);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Initialize(KDynamicPageManager *page_allocator, size_t object_count, RefCount *rc) {
|
static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>);
|
||||||
BaseHeap::Initialize(page_allocator, object_count);
|
BaseHeap::Initialize(page_allocator, pt_heap);
|
||||||
this->Initialize(rc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KVirtualAddress Allocate() {
|
KVirtualAddress Allocate() {
|
||||||
|
@ -74,33 +43,23 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(KVirtualAddress addr) {
|
void Free(KVirtualAddress addr) {
|
||||||
/* Free the page. */
|
return BaseHeap::Free(GetPointer<impl::PageTablePage>(addr));
|
||||||
BaseHeap::Free(GetPointer<impl::PageTablePage>(addr));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
RefCount GetRefCount(KVirtualAddress addr) const {
|
ALWAYS_INLINE RefCount GetRefCount(KVirtualAddress addr) const {
|
||||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
return m_pt_heap->GetRefCount(addr);
|
||||||
return *this->GetRefCountPointer(addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Open(KVirtualAddress addr, int count) {
|
ALWAYS_INLINE void Open(KVirtualAddress addr, int count) {
|
||||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
return m_pt_heap->Open(addr, count);
|
||||||
|
|
||||||
*this->GetRefCountPointer(addr) += count;
|
|
||||||
|
|
||||||
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) > 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Close(KVirtualAddress addr, int count) {
|
ALWAYS_INLINE bool Close(KVirtualAddress addr, int count) {
|
||||||
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
return m_pt_heap->Close(addr, count);
|
||||||
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) >= count);
|
|
||||||
|
|
||||||
*this->GetRefCountPointer(addr) -= count;
|
|
||||||
return this->GetRefCount(addr) == 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool IsInPageTableHeap(KVirtualAddress addr) const {
|
constexpr ALWAYS_INLINE bool IsInPageTableHeap(KVirtualAddress addr) const {
|
||||||
return this->IsInRange(addr);
|
return m_pt_heap->IsInRange(addr);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,93 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_slab_helpers.hpp>
|
||||||
|
#include <mesosphere/kern_k_dynamic_slab_heap.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
namespace impl {
|
||||||
|
|
||||||
|
class PageTablePage {
|
||||||
|
private:
|
||||||
|
u8 m_buffer[PageSize];
|
||||||
|
public:
|
||||||
|
ALWAYS_INLINE PageTablePage() { /* Do not initialize anything. */ }
|
||||||
|
};
|
||||||
|
static_assert(sizeof(PageTablePage) == PageSize);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> {
|
||||||
|
public:
|
||||||
|
using RefCount = u16;
|
||||||
|
static constexpr size_t PageTableSize = sizeof(impl::PageTablePage);
|
||||||
|
static_assert(PageTableSize == PageSize);
|
||||||
|
private:
|
||||||
|
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>;
|
||||||
|
private:
|
||||||
|
RefCount *m_ref_counts{};
|
||||||
|
public:
|
||||||
|
static constexpr ALWAYS_INLINE size_t CalculateReferenceCountSize(size_t size) {
|
||||||
|
return (size / PageSize) * sizeof(RefCount);
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
constexpr KPageTableSlabHeap() = default;
|
||||||
|
private:
|
||||||
|
ALWAYS_INLINE void Initialize(RefCount *rc) {
|
||||||
|
m_ref_counts = rc;
|
||||||
|
for (size_t i = 0; i < this->GetSize() / PageSize; i++) {
|
||||||
|
m_ref_counts[i] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE RefCount *GetRefCountPointer(KVirtualAddress addr) const {
|
||||||
|
return m_ref_counts + ((addr - this->GetAddress()) / PageSize);
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, size_t object_count, RefCount *rc) {
|
||||||
|
BaseHeap::Initialize(page_allocator, object_count);
|
||||||
|
this->Initialize(rc);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE RefCount GetRefCount(KVirtualAddress addr) const {
|
||||||
|
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||||
|
return *this->GetRefCountPointer(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE void Open(KVirtualAddress addr, int count) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||||
|
|
||||||
|
*this->GetRefCountPointer(addr) += count;
|
||||||
|
|
||||||
|
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALWAYS_INLINE bool Close(KVirtualAddress addr, int count) {
|
||||||
|
MESOSPHERE_ASSERT(this->IsInRange(addr));
|
||||||
|
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) >= count);
|
||||||
|
|
||||||
|
*this->GetRefCountPointer(addr) -= count;
|
||||||
|
return this->GetRefCount(addr) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE bool IsInPageTableHeap(KVirtualAddress addr) const {
|
||||||
|
return this->IsInRange(addr);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -29,7 +29,7 @@
|
||||||
#include <mesosphere/kern_k_address_arbiter.hpp>
|
#include <mesosphere/kern_k_address_arbiter.hpp>
|
||||||
#include <mesosphere/kern_k_capabilities.hpp>
|
#include <mesosphere/kern_k_capabilities.hpp>
|
||||||
#include <mesosphere/kern_k_wait_object.hpp>
|
#include <mesosphere/kern_k_wait_object.hpp>
|
||||||
#include <mesosphere/kern_k_dynamic_slab_heap.hpp>
|
#include <mesosphere/kern_k_dynamic_resource_manager.hpp>
|
||||||
#include <mesosphere/kern_k_page_table_manager.hpp>
|
#include <mesosphere/kern_k_page_table_manager.hpp>
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
@ -121,6 +121,9 @@ namespace ams::kern {
|
||||||
KMemoryBlockSlabManager m_memory_block_slab_manager{};
|
KMemoryBlockSlabManager m_memory_block_slab_manager{};
|
||||||
KBlockInfoManager m_block_info_manager{};
|
KBlockInfoManager m_block_info_manager{};
|
||||||
KPageTableManager m_page_table_manager{};
|
KPageTableManager m_page_table_manager{};
|
||||||
|
KMemoryBlockSlabHeap m_memory_block_heap{};
|
||||||
|
KBlockInfoSlabHeap m_block_info_heap{};
|
||||||
|
KPageTableSlabHeap m_page_table_heap{};
|
||||||
private:
|
private:
|
||||||
Result Initialize(const ams::svc::CreateProcessParameter ¶ms);
|
Result Initialize(const ams::svc::CreateProcessParameter ¶ms);
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,8 @@ namespace ams::kern {
|
||||||
|
|
||||||
Result SetLimitValue(ams::svc::LimitableResource which, s64 value);
|
Result SetLimitValue(ams::svc::LimitableResource which, s64 value);
|
||||||
|
|
||||||
|
void Add(ams::svc::LimitableResource which, s64 value);
|
||||||
|
|
||||||
bool Reserve(ams::svc::LimitableResource which, s64 value);
|
bool Reserve(ams::svc::LimitableResource which, s64 value);
|
||||||
bool Reserve(ams::svc::LimitableResource which, s64 value, s64 timeout);
|
bool Reserve(ams::svc::LimitableResource which, s64 value, s64 timeout);
|
||||||
void Release(ams::svc::LimitableResource which, s64 value);
|
void Release(ams::svc::LimitableResource which, s64 value);
|
||||||
|
|
|
@ -25,7 +25,7 @@ namespace ams::kern {
|
||||||
class KClientPort;
|
class KClientPort;
|
||||||
class KProcess;
|
class KProcess;
|
||||||
|
|
||||||
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAutoObjectWithList> {
|
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAutoObjectWithList, true> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KSession, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KSession, KAutoObject);
|
||||||
private:
|
private:
|
||||||
enum class State : u8 {
|
enum class State : u8 {
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
class KSessionRequest final : public KSlabAllocated<KSessionRequest>, public KAutoObject, public util::IntrusiveListBaseNode<KSessionRequest> {
|
class KSessionRequest final : public KSlabAllocated<KSessionRequest, true>, public KAutoObject, public util::IntrusiveListBaseNode<KSessionRequest> {
|
||||||
MESOSPHERE_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
|
MESOSPHERE_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
|
||||||
public:
|
public:
|
||||||
class SessionMappings {
|
class SessionMappings {
|
||||||
|
@ -140,6 +140,14 @@ namespace ams::kern {
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static KSessionRequest *CreateFromUnusedSlabMemory() {
|
||||||
|
KSessionRequest *req = KSessionRequest::AllocateFromUnusedSlabMemory();
|
||||||
|
if (req != nullptr) {
|
||||||
|
KAutoObject::Create(req);
|
||||||
|
}
|
||||||
|
return req;
|
||||||
|
}
|
||||||
|
|
||||||
virtual void Destroy() override {
|
virtual void Destroy() override {
|
||||||
this->Finalize();
|
this->Finalize();
|
||||||
KSessionRequest::Free(this);
|
KSessionRequest::Free(this);
|
||||||
|
|
|
@ -35,7 +35,7 @@ namespace ams::kern {
|
||||||
bool m_is_initialized;
|
bool m_is_initialized;
|
||||||
public:
|
public:
|
||||||
explicit KSharedMemory()
|
explicit KSharedMemory()
|
||||||
: m_page_group(std::addressof(Kernel::GetBlockInfoManager())), m_resource_limit(nullptr), m_owner_process_id(std::numeric_limits<u64>::max()),
|
: m_page_group(std::addressof(Kernel::GetSystemBlockInfoManager())), m_resource_limit(nullptr), m_owner_process_id(std::numeric_limits<u64>::max()),
|
||||||
m_owner_perm(ams::svc::MemoryPermission_None), m_remote_perm(ams::svc::MemoryPermission_None), m_is_initialized(false)
|
m_owner_perm(ams::svc::MemoryPermission_None), m_remote_perm(ams::svc::MemoryPermission_None), m_is_initialized(false)
|
||||||
{
|
{
|
||||||
/* ... */
|
/* ... */
|
||||||
|
|
|
@ -16,11 +16,13 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
#include <mesosphere/kern_common.hpp>
|
#include <mesosphere/kern_common.hpp>
|
||||||
#include <mesosphere/kern_k_typed_address.hpp>
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
|
#include <mesosphere/kern_k_memory_layout.hpp>
|
||||||
|
|
||||||
#if defined(ATMOSPHERE_ARCH_ARM64)
|
#if defined(ATMOSPHERE_ARCH_ARM64)
|
||||||
|
|
||||||
#include <mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp>
|
#include <mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp>
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
using ams::kern::arch::arm64::IsSlabAtomicValid;
|
||||||
using ams::kern::arch::arm64::AllocateFromSlabAtomic;
|
using ams::kern::arch::arm64::AllocateFromSlabAtomic;
|
||||||
using ams::kern::arch::arm64::FreeToSlabAtomic;
|
using ams::kern::arch::arm64::FreeToSlabAtomic;
|
||||||
}
|
}
|
||||||
|
@ -44,78 +46,73 @@ namespace ams::kern {
|
||||||
Node *next;
|
Node *next;
|
||||||
};
|
};
|
||||||
private:
|
private:
|
||||||
Node * m_head;
|
Node *m_head{nullptr};
|
||||||
size_t m_obj_size;
|
|
||||||
public:
|
public:
|
||||||
constexpr KSlabHeapImpl() : m_head(nullptr), m_obj_size(0) { MESOSPHERE_ASSERT_THIS(); }
|
constexpr KSlabHeapImpl() = default;
|
||||||
|
|
||||||
void Initialize(size_t size) {
|
void Initialize() {
|
||||||
MESOSPHERE_INIT_ABORT_UNLESS(m_head == nullptr);
|
MESOSPHERE_ABORT_UNLESS(m_head == nullptr);
|
||||||
m_obj_size = size;
|
MESOSPHERE_ABORT_UNLESS(IsSlabAtomicValid());
|
||||||
}
|
}
|
||||||
|
|
||||||
Node *GetHead() const {
|
ALWAYS_INLINE Node *GetHead() const {
|
||||||
return m_head;
|
return m_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetObjectSize() const {
|
ALWAYS_INLINE void *Allocate() {
|
||||||
return m_obj_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void *Allocate() {
|
|
||||||
MESOSPHERE_ASSERT_THIS();
|
|
||||||
|
|
||||||
return AllocateFromSlabAtomic(std::addressof(m_head));
|
return AllocateFromSlabAtomic(std::addressof(m_head));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(void *obj) {
|
ALWAYS_INLINE void Free(void *obj) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
return FreeToSlabAtomic(std::addressof(m_head), static_cast<Node *>(obj));
|
||||||
|
|
||||||
Node *node = reinterpret_cast<Node *>(obj);
|
|
||||||
|
|
||||||
return FreeToSlabAtomic(std::addressof(m_head), node);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class KSlabHeapBase {
|
template<bool SupportDynamicExpansion>
|
||||||
|
class KSlabHeapBase : protected impl::KSlabHeapImpl {
|
||||||
NON_COPYABLE(KSlabHeapBase);
|
NON_COPYABLE(KSlabHeapBase);
|
||||||
NON_MOVEABLE(KSlabHeapBase);
|
NON_MOVEABLE(KSlabHeapBase);
|
||||||
private:
|
private:
|
||||||
using Impl = impl::KSlabHeapImpl;
|
size_t m_obj_size{};
|
||||||
|
uintptr_t m_peak{};
|
||||||
|
uintptr_t m_start{};
|
||||||
|
uintptr_t m_end{};
|
||||||
private:
|
private:
|
||||||
Impl m_impl;
|
ALWAYS_INLINE void UpdatePeakImpl(uintptr_t obj) {
|
||||||
uintptr_t m_peak;
|
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
|
||||||
uintptr_t m_start;
|
std::atomic_ref<uintptr_t> peak_ref(m_peak);
|
||||||
uintptr_t m_end;
|
|
||||||
private:
|
const uintptr_t alloc_peak = obj + this->GetObjectSize();
|
||||||
ALWAYS_INLINE Impl *GetImpl() {
|
uintptr_t cur_peak = m_peak;
|
||||||
return std::addressof(m_impl);
|
do {
|
||||||
}
|
if (alloc_peak <= cur_peak) {
|
||||||
ALWAYS_INLINE const Impl *GetImpl() const {
|
break;
|
||||||
return std::addressof(m_impl);
|
}
|
||||||
|
} while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak));
|
||||||
}
|
}
|
||||||
public:
|
public:
|
||||||
constexpr KSlabHeapBase() : m_impl(), m_peak(0), m_start(0), m_end(0) { MESOSPHERE_ASSERT_THIS(); }
|
constexpr KSlabHeapBase() = default;
|
||||||
|
|
||||||
ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
ALWAYS_INLINE bool Contains(uintptr_t address) const {
|
||||||
return m_start <= address && address < m_end;
|
return m_start <= address && address < m_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) {
|
void Initialize(size_t obj_size, void *memory, size_t memory_size) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
|
||||||
|
|
||||||
/* Ensure we don't initialize a slab using null memory. */
|
/* Ensure we don't initialize a slab using null memory. */
|
||||||
MESOSPHERE_ABORT_UNLESS(memory != nullptr);
|
MESOSPHERE_ABORT_UNLESS(memory != nullptr);
|
||||||
|
|
||||||
|
/* Set our object size. */
|
||||||
|
m_obj_size = obj_size;
|
||||||
|
|
||||||
/* Initialize the base allocator. */
|
/* Initialize the base allocator. */
|
||||||
this->GetImpl()->Initialize(obj_size);
|
KSlabHeapImpl::Initialize();
|
||||||
|
|
||||||
/* Set our tracking variables. */
|
/* Set our tracking variables. */
|
||||||
const size_t num_obj = (memory_size / obj_size);
|
const size_t num_obj = (memory_size / obj_size);
|
||||||
m_start = reinterpret_cast<uintptr_t>(memory);
|
m_start = reinterpret_cast<uintptr_t>(memory);
|
||||||
m_end = m_start + num_obj * obj_size;
|
m_end = m_start + num_obj * obj_size;
|
||||||
m_peak = m_start;
|
m_peak = m_start;
|
||||||
|
|
||||||
/* Free the objects. */
|
/* Free the objects. */
|
||||||
|
@ -123,75 +120,91 @@ namespace ams::kern {
|
||||||
|
|
||||||
for (size_t i = 0; i < num_obj; i++) {
|
for (size_t i = 0; i < num_obj; i++) {
|
||||||
cur -= obj_size;
|
cur -= obj_size;
|
||||||
this->GetImpl()->Free(cur);
|
KSlabHeapImpl::Free(cur);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetSlabHeapSize() const {
|
ALWAYS_INLINE size_t GetSlabHeapSize() const {
|
||||||
return (m_end - m_start) / this->GetObjectSize();
|
return (m_end - m_start) / this->GetObjectSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetObjectSize() const {
|
ALWAYS_INLINE size_t GetObjectSize() const {
|
||||||
return this->GetImpl()->GetObjectSize();
|
return m_obj_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *AllocateImpl() {
|
ALWAYS_INLINE void *Allocate() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
void *obj = KSlabHeapImpl::Allocate();
|
||||||
|
|
||||||
void *obj = this->GetImpl()->Allocate();
|
|
||||||
|
|
||||||
/* Track the allocated peak. */
|
/* Track the allocated peak. */
|
||||||
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
|
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
|
||||||
if (AMS_LIKELY(obj != nullptr)) {
|
if (AMS_LIKELY(obj != nullptr)) {
|
||||||
static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free);
|
if constexpr (SupportDynamicExpansion) {
|
||||||
std::atomic_ref<uintptr_t> peak_ref(m_peak);
|
if (this->Contains(reinterpret_cast<uintptr_t>(obj))) {
|
||||||
|
this->UpdatePeakImpl(reinterpret_cast<uintptr_t>(obj));
|
||||||
const uintptr_t alloc_peak = reinterpret_cast<uintptr_t>(obj) + this->GetObjectSize();
|
} else {
|
||||||
uintptr_t cur_peak = m_peak;
|
this->UpdatePeakImpl(reinterpret_cast<uintptr_t>(m_end) - this->GetObjectSize());
|
||||||
do {
|
|
||||||
if (alloc_peak <= cur_peak) {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
} while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak));
|
} else {
|
||||||
|
this->UpdatePeakImpl(reinterpret_cast<uintptr_t>(obj));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FreeImpl(void *obj) {
|
ALWAYS_INLINE void Free(void *obj) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
|
||||||
|
|
||||||
/* Don't allow freeing an object that wasn't allocated from this heap. */
|
/* Don't allow freeing an object that wasn't allocated from this heap. */
|
||||||
MESOSPHERE_ABORT_UNLESS(this->Contains(reinterpret_cast<uintptr_t>(obj)));
|
const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj));
|
||||||
|
if constexpr (SupportDynamicExpansion) {
|
||||||
|
const bool is_slab = KMemoryLayout::GetSlabRegion().Contains(reinterpret_cast<uintptr_t>(obj));
|
||||||
|
MESOSPHERE_ABORT_UNLESS(contained || is_slab);
|
||||||
|
} else {
|
||||||
|
MESOSPHERE_ABORT_UNLESS(contained);
|
||||||
|
}
|
||||||
|
|
||||||
this->GetImpl()->Free(obj);
|
KSlabHeapImpl::Free(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetObjectIndexImpl(const void *obj) const {
|
ALWAYS_INLINE size_t GetObjectIndex(const void *obj) const {
|
||||||
|
if constexpr (SupportDynamicExpansion) {
|
||||||
|
if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) {
|
||||||
|
return std::numeric_limits<size_t>::max();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
|
return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetPeakIndex() const {
|
ALWAYS_INLINE size_t GetPeakIndex() const {
|
||||||
return this->GetObjectIndexImpl(reinterpret_cast<const void *>(m_peak));
|
return this->GetObjectIndex(reinterpret_cast<const void *>(m_peak));
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t GetSlabHeapAddress() const {
|
ALWAYS_INLINE uintptr_t GetSlabHeapAddress() const {
|
||||||
return m_start;
|
return m_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetNumRemaining() const {
|
ALWAYS_INLINE size_t GetNumRemaining() const {
|
||||||
size_t remaining = 0;
|
size_t remaining = 0;
|
||||||
|
|
||||||
/* Only calculate the number of remaining objects under debug configuration. */
|
/* Only calculate the number of remaining objects under debug configuration. */
|
||||||
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
|
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
|
||||||
while (true) {
|
while (true) {
|
||||||
auto *cur = this->GetImpl()->GetHead();
|
auto *cur = this->GetHead();
|
||||||
remaining = 0;
|
remaining = 0;
|
||||||
|
|
||||||
while (this->Contains(reinterpret_cast<uintptr_t>(cur))) {
|
if constexpr (SupportDynamicExpansion) {
|
||||||
++remaining;
|
const auto &slab_region = KMemoryLayout::GetSlabRegion();
|
||||||
cur = cur->next;
|
|
||||||
|
while (this->Contains(reinterpret_cast<uintptr_t>(cur)) || slab_region.Contains(reinterpret_cast<uintptr_t>(cur))) {
|
||||||
|
++remaining;
|
||||||
|
cur = cur->next;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
while (this->Contains(reinterpret_cast<uintptr_t>(cur))) {
|
||||||
|
++remaining;
|
||||||
|
cur = cur->next;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cur == nullptr) {
|
if (cur == nullptr) {
|
||||||
|
@ -204,29 +217,31 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename T>
|
template<typename T, bool SupportDynamicExpansion>
|
||||||
class KSlabHeap : public KSlabHeapBase {
|
class KSlabHeap : public KSlabHeapBase<SupportDynamicExpansion> {
|
||||||
|
private:
|
||||||
|
using BaseHeap = KSlabHeapBase<SupportDynamicExpansion>;
|
||||||
public:
|
public:
|
||||||
constexpr KSlabHeap() : KSlabHeapBase() { /* ... */ }
|
constexpr KSlabHeap() = default;
|
||||||
|
|
||||||
void Initialize(void *memory, size_t memory_size) {
|
void Initialize(void *memory, size_t memory_size) {
|
||||||
this->InitializeImpl(sizeof(T), memory, memory_size);
|
BaseHeap::Initialize(sizeof(T), memory, memory_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
T *Allocate() {
|
ALWAYS_INLINE T *Allocate() {
|
||||||
T *obj = reinterpret_cast<T *>(this->AllocateImpl());
|
T *obj = static_cast<T *>(BaseHeap::Allocate());
|
||||||
if (AMS_LIKELY(obj != nullptr)) {
|
if (AMS_LIKELY(obj != nullptr)) {
|
||||||
std::construct_at(obj);
|
std::construct_at(obj);
|
||||||
}
|
}
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Free(T *obj) {
|
ALWAYS_INLINE void Free(T *obj) {
|
||||||
this->FreeImpl(obj);
|
BaseHeap::Free(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GetObjectIndex(const T *obj) const {
|
ALWAYS_INLINE size_t GetObjectIndex(const T *obj) const {
|
||||||
return this->GetObjectIndexImpl(obj);
|
return BaseHeap::GetObjectIndex(obj);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -23,12 +23,13 @@ namespace ams::kern {
|
||||||
private:
|
private:
|
||||||
friend class KSystemControl;
|
friend class KSystemControl;
|
||||||
private:
|
private:
|
||||||
static inline bool s_is_debug_mode;
|
static inline constinit bool s_is_debug_mode;
|
||||||
static inline bool s_enable_debug_logging;
|
static inline constinit bool s_enable_debug_logging;
|
||||||
static inline bool s_enable_user_exception_handlers;
|
static inline constinit bool s_enable_user_exception_handlers;
|
||||||
static inline bool s_enable_debug_memory_fill;
|
static inline constinit bool s_enable_debug_memory_fill;
|
||||||
static inline bool s_enable_user_pmu_access;
|
static inline constinit bool s_enable_user_pmu_access;
|
||||||
static inline bool s_enable_kernel_debugging;
|
static inline constinit bool s_enable_kernel_debugging;
|
||||||
|
static inline constinit bool s_enable_dynamic_resource_limits;
|
||||||
private:
|
private:
|
||||||
static ALWAYS_INLINE void SetIsDebugMode(bool en) { s_is_debug_mode = en; }
|
static ALWAYS_INLINE void SetIsDebugMode(bool en) { s_is_debug_mode = en; }
|
||||||
static ALWAYS_INLINE void EnableDebugLogging(bool en) { s_enable_debug_logging = en; }
|
static ALWAYS_INLINE void EnableDebugLogging(bool en) { s_enable_debug_logging = en; }
|
||||||
|
@ -36,6 +37,7 @@ namespace ams::kern {
|
||||||
static ALWAYS_INLINE void EnableDebugMemoryFill(bool en) { s_enable_debug_memory_fill = en; }
|
static ALWAYS_INLINE void EnableDebugMemoryFill(bool en) { s_enable_debug_memory_fill = en; }
|
||||||
static ALWAYS_INLINE void EnableUserPmuAccess(bool en) { s_enable_user_pmu_access = en; }
|
static ALWAYS_INLINE void EnableUserPmuAccess(bool en) { s_enable_user_pmu_access = en; }
|
||||||
static ALWAYS_INLINE void EnableKernelDebugging(bool en) { s_enable_kernel_debugging = en; }
|
static ALWAYS_INLINE void EnableKernelDebugging(bool en) { s_enable_kernel_debugging = en; }
|
||||||
|
static ALWAYS_INLINE void EnableDynamicResourceLimits(bool en) { s_enable_dynamic_resource_limits = en; }
|
||||||
public:
|
public:
|
||||||
static ALWAYS_INLINE bool IsDebugMode() { return s_is_debug_mode; }
|
static ALWAYS_INLINE bool IsDebugMode() { return s_is_debug_mode; }
|
||||||
static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_enable_debug_logging; }
|
static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_enable_debug_logging; }
|
||||||
|
@ -43,6 +45,7 @@ namespace ams::kern {
|
||||||
static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_enable_debug_memory_fill; }
|
static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_enable_debug_memory_fill; }
|
||||||
static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_enable_user_pmu_access; }
|
static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_enable_user_pmu_access; }
|
||||||
static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_enable_kernel_debugging; }
|
static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_enable_kernel_debugging; }
|
||||||
|
static ALWAYS_INLINE bool IsDynamicResourceLimitsEnabled() { return s_enable_dynamic_resource_limits; }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
#include <mesosphere/kern_common.hpp>
|
||||||
|
#include <mesosphere/kern_k_typed_address.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
/* Utilities to allocate/free memory from the "unused" gaps between slab heaps. */
|
||||||
|
/* See KTargetSystem::IsDynamicResourceLimitsEnabled() usage for more context. */
|
||||||
|
KVirtualAddress AllocateUnusedSlabMemory(size_t size, size_t alignment);
|
||||||
|
void FreeUnusedSlabMemory(KVirtualAddress address, size_t size);
|
||||||
|
|
||||||
|
}
|
|
@ -63,14 +63,21 @@ namespace ams::kern {
|
||||||
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
|
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
|
||||||
static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
|
static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
|
||||||
static constexpr size_t BlockInfoSlabHeapSize = 4000;
|
static constexpr size_t BlockInfoSlabHeapSize = 4000;
|
||||||
|
static constexpr size_t ReservedDynamicPageCount = 70;
|
||||||
private:
|
private:
|
||||||
static State s_state;
|
static State s_state;
|
||||||
static KResourceLimit s_system_resource_limit;
|
static KResourceLimit s_system_resource_limit;
|
||||||
static KMemoryManager s_memory_manager;
|
static KMemoryManager s_memory_manager;
|
||||||
static KPageTableManager s_page_table_manager;
|
static KPageTableSlabHeap s_page_table_heap;
|
||||||
|
static KMemoryBlockSlabHeap s_app_memory_block_heap;
|
||||||
|
static KMemoryBlockSlabHeap s_sys_memory_block_heap;
|
||||||
|
static KBlockInfoSlabHeap s_block_info_heap;
|
||||||
|
static KPageTableManager s_app_page_table_manager;
|
||||||
|
static KPageTableManager s_sys_page_table_manager;
|
||||||
static KMemoryBlockSlabManager s_app_memory_block_manager;
|
static KMemoryBlockSlabManager s_app_memory_block_manager;
|
||||||
static KMemoryBlockSlabManager s_sys_memory_block_manager;
|
static KMemoryBlockSlabManager s_sys_memory_block_manager;
|
||||||
static KBlockInfoManager s_block_info_manager;
|
static KBlockInfoManager s_app_block_info_manager;
|
||||||
|
static KBlockInfoManager s_sys_block_info_manager;
|
||||||
static KSupervisorPageTable s_supervisor_page_table;
|
static KSupervisorPageTable s_supervisor_page_table;
|
||||||
static KUnsafeMemory s_unsafe_memory;
|
static KUnsafeMemory s_unsafe_memory;
|
||||||
static KWorkerTaskManager s_worker_task_managers[KWorkerTaskManager::WorkerType_Count];
|
static KWorkerTaskManager s_worker_task_managers[KWorkerTaskManager::WorkerType_Count];
|
||||||
|
@ -130,12 +137,20 @@ namespace ams::kern {
|
||||||
return s_sys_memory_block_manager;
|
return s_sys_memory_block_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE KBlockInfoManager &GetBlockInfoManager() {
|
static ALWAYS_INLINE KBlockInfoManager &GetApplicationBlockInfoManager() {
|
||||||
return s_block_info_manager;
|
return s_app_block_info_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE KPageTableManager &GetPageTableManager() {
|
static ALWAYS_INLINE KBlockInfoManager &GetSystemBlockInfoManager() {
|
||||||
return s_page_table_manager;
|
return s_sys_block_info_manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE KPageTableManager &GetApplicationPageTableManager() {
|
||||||
|
return s_app_page_table_manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ALWAYS_INLINE KPageTableManager &GetSystemPageTableManager() {
|
||||||
|
return s_sys_page_table_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE KSupervisorPageTable &GetKernelPageTable() {
|
static ALWAYS_INLINE KSupervisorPageTable &GetKernelPageTable() {
|
||||||
|
|
|
@ -18,15 +18,16 @@
|
||||||
#include <mesosphere/kern_k_auto_object.hpp>
|
#include <mesosphere/kern_k_auto_object.hpp>
|
||||||
#include <mesosphere/kern_k_slab_heap.hpp>
|
#include <mesosphere/kern_k_slab_heap.hpp>
|
||||||
#include <mesosphere/kern_k_auto_object_container.hpp>
|
#include <mesosphere/kern_k_auto_object_container.hpp>
|
||||||
|
#include <mesosphere/kern_k_unused_slab_memory.hpp>
|
||||||
|
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
template<class Derived>
|
template<class Derived, bool SupportDynamicExpansion = false>
|
||||||
class KSlabAllocated {
|
class KSlabAllocated {
|
||||||
private:
|
private:
|
||||||
static inline KSlabHeap<Derived> s_slab_heap;
|
static constinit inline KSlabHeap<Derived, SupportDynamicExpansion> s_slab_heap;
|
||||||
public:
|
public:
|
||||||
constexpr KSlabAllocated() { /* ... */ }
|
constexpr KSlabAllocated() = default;
|
||||||
|
|
||||||
size_t GetSlabIndex() const {
|
size_t GetSlabIndex() const {
|
||||||
return s_slab_heap.GetIndex(static_cast<const Derived *>(this));
|
return s_slab_heap.GetIndex(static_cast<const Derived *>(this));
|
||||||
|
@ -36,14 +37,25 @@ namespace ams::kern {
|
||||||
s_slab_heap.Initialize(memory, memory_size);
|
s_slab_heap.Initialize(memory, memory_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE Derived *Allocate() {
|
static Derived *Allocate() {
|
||||||
return s_slab_heap.Allocate();
|
return s_slab_heap.Allocate();
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE void Free(Derived *obj) {
|
static void Free(Derived *obj) {
|
||||||
s_slab_heap.Free(obj);
|
s_slab_heap.Free(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<bool Enable = SupportDynamicExpansion, typename = typename std::enable_if<Enable>::type>
|
||||||
|
static Derived *AllocateFromUnusedSlabMemory() {
|
||||||
|
static_assert(Enable == SupportDynamicExpansion);
|
||||||
|
|
||||||
|
Derived * const obj = GetPointer<Derived>(AllocateUnusedSlabMemory(sizeof(Derived), alignof(Derived)));
|
||||||
|
if (AMS_LIKELY(obj != nullptr)) {
|
||||||
|
std::construct_at(obj);
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); }
|
static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); }
|
||||||
static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); }
|
static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); }
|
||||||
static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); }
|
static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); }
|
||||||
|
@ -52,12 +64,12 @@ namespace ams::kern {
|
||||||
static size_t GetNumRemaining() { return s_slab_heap.GetNumRemaining(); }
|
static size_t GetNumRemaining() { return s_slab_heap.GetNumRemaining(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Derived, typename Base>
|
template<typename Derived, typename Base, bool SupportDynamicExpansion = false>
|
||||||
class KAutoObjectWithSlabHeapAndContainer : public Base {
|
class KAutoObjectWithSlabHeapAndContainer : public Base {
|
||||||
static_assert(std::is_base_of<KAutoObjectWithList, Base>::value);
|
static_assert(std::is_base_of<KAutoObjectWithList, Base>::value);
|
||||||
private:
|
private:
|
||||||
static inline KSlabHeap<Derived> s_slab_heap;
|
static constinit inline KSlabHeap<Derived, SupportDynamicExpansion> s_slab_heap;
|
||||||
static inline KAutoObjectWithListContainer s_container;
|
static constinit inline KAutoObjectWithListContainer s_container;
|
||||||
private:
|
private:
|
||||||
static ALWAYS_INLINE Derived *Allocate() {
|
static ALWAYS_INLINE Derived *Allocate() {
|
||||||
return s_slab_heap.Allocate();
|
return s_slab_heap.Allocate();
|
||||||
|
@ -73,7 +85,7 @@ namespace ams::kern {
|
||||||
ALWAYS_INLINE ~ListAccessor() { /* ... */ }
|
ALWAYS_INLINE ~ListAccessor() { /* ... */ }
|
||||||
};
|
};
|
||||||
public:
|
public:
|
||||||
constexpr KAutoObjectWithSlabHeapAndContainer() : Base() { /* ... */ }
|
constexpr KAutoObjectWithSlabHeapAndContainer() = default;
|
||||||
|
|
||||||
virtual void Destroy() override {
|
virtual void Destroy() override {
|
||||||
const bool is_initialized = this->IsInitialized();
|
const bool is_initialized = this->IsInitialized();
|
||||||
|
@ -109,6 +121,18 @@ namespace ams::kern {
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<bool Enable = SupportDynamicExpansion, typename = typename std::enable_if<Enable>::type>
|
||||||
|
static Derived *CreateFromUnusedSlabMemory() {
|
||||||
|
static_assert(Enable == SupportDynamicExpansion);
|
||||||
|
|
||||||
|
Derived * const obj = GetPointer<Derived>(AllocateUnusedSlabMemory(sizeof(Derived), alignof(Derived)));
|
||||||
|
if (AMS_LIKELY(obj != nullptr)) {
|
||||||
|
std::construct_at(obj);
|
||||||
|
KAutoObject::Create(obj);
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
static void Register(Derived *obj) {
|
static void Register(Derived *obj) {
|
||||||
return s_container.Register(obj);
|
return s_container.Register(obj);
|
||||||
}
|
}
|
||||||
|
|
|
@ -166,7 +166,7 @@ namespace ams::kern::arch::arm64 {
|
||||||
Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) {
|
Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) {
|
||||||
/* Initialize basic fields. */
|
/* Initialize basic fields. */
|
||||||
m_asid = 0;
|
m_asid = 0;
|
||||||
m_manager = std::addressof(Kernel::GetPageTableManager());
|
m_manager = std::addressof(Kernel::GetSystemPageTableManager());
|
||||||
|
|
||||||
/* Allocate a page for ttbr. */
|
/* Allocate a page for ttbr. */
|
||||||
const u64 asid_tag = (static_cast<u64>(m_asid) << 48ul);
|
const u64 asid_tag = (static_cast<u64>(m_asid) << 48ul);
|
||||||
|
|
|
@ -650,11 +650,11 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
g_memory_controller_address = KMemoryLayout::GetDevicePhysicalAddress(KMemoryRegionType_MemoryController);
|
g_memory_controller_address = KMemoryLayout::GetDevicePhysicalAddress(KMemoryRegionType_MemoryController);
|
||||||
|
|
||||||
/* Allocate a page to use as a reserved/no device table. */
|
/* Allocate a page to use as a reserved/no device table. */
|
||||||
const KVirtualAddress table_virt_addr = Kernel::GetPageTableManager().Allocate();
|
const KVirtualAddress table_virt_addr = Kernel::GetSystemPageTableManager().Allocate();
|
||||||
MESOSPHERE_ABORT_UNLESS(table_virt_addr != Null<KVirtualAddress>);
|
MESOSPHERE_ABORT_UNLESS(table_virt_addr != Null<KVirtualAddress>);
|
||||||
const KPhysicalAddress table_phys_addr = GetPageTablePhysicalAddress(table_virt_addr);
|
const KPhysicalAddress table_phys_addr = GetPageTablePhysicalAddress(table_virt_addr);
|
||||||
MESOSPHERE_ASSERT(IsValidPhysicalAddress(table_phys_addr));
|
MESOSPHERE_ASSERT(IsValidPhysicalAddress(table_phys_addr));
|
||||||
Kernel::GetPageTableManager().Open(table_virt_addr, 1);
|
Kernel::GetSystemPageTableManager().Open(table_virt_addr, 1);
|
||||||
|
|
||||||
/* Clear the page and save it. */
|
/* Clear the page and save it. */
|
||||||
/* NOTE: Nintendo does not check the result of StoreDataCache. */
|
/* NOTE: Nintendo does not check the result of StoreDataCache. */
|
||||||
|
@ -779,7 +779,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize;
|
const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize;
|
||||||
|
|
||||||
/* Get the page table manager. */
|
/* Get the page table manager. */
|
||||||
auto &ptm = Kernel::GetPageTableManager();
|
auto &ptm = Kernel::GetSystemPageTableManager();
|
||||||
|
|
||||||
/* Clear the tables. */
|
/* Clear the tables. */
|
||||||
static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize);
|
static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize);
|
||||||
|
@ -840,7 +840,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
void KDevicePageTable::Finalize() {
|
void KDevicePageTable::Finalize() {
|
||||||
/* Get the page table manager. */
|
/* Get the page table manager. */
|
||||||
auto &ptm = Kernel::GetPageTableManager();
|
auto &ptm = Kernel::GetSystemPageTableManager();
|
||||||
|
|
||||||
/* Detach from all devices. */
|
/* Detach from all devices. */
|
||||||
{
|
{
|
||||||
|
@ -1017,7 +1017,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Get the memory manager and page table manager. */
|
/* Get the memory manager and page table manager. */
|
||||||
KMemoryManager &mm = Kernel::GetMemoryManager();
|
KMemoryManager &mm = Kernel::GetMemoryManager();
|
||||||
KPageTableManager &ptm = Kernel::GetPageTableManager();
|
KPageTableManager &ptm = Kernel::GetSystemPageTableManager();
|
||||||
|
|
||||||
/* Cache permissions. */
|
/* Cache permissions. */
|
||||||
const bool read = (device_perm & ams::svc::MemoryPermission_Read) != 0;
|
const bool read = (device_perm & ams::svc::MemoryPermission_Read) != 0;
|
||||||
|
@ -1181,10 +1181,10 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
/* Get the memory manager and page table manager. */
|
/* Get the memory manager and page table manager. */
|
||||||
KMemoryManager &mm = Kernel::GetMemoryManager();
|
KMemoryManager &mm = Kernel::GetMemoryManager();
|
||||||
KPageTableManager &ptm = Kernel::GetPageTableManager();
|
KPageTableManager &ptm = Kernel::GetSystemPageTableManager();
|
||||||
|
|
||||||
/* Make a page group for the pages we're closing. */
|
/* Make a page group for the pages we're closing. */
|
||||||
KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager()));
|
KPageGroup pg(std::addressof(Kernel::GetSystemBlockInfoManager()));
|
||||||
|
|
||||||
/* Walk the directory. */
|
/* Walk the directory. */
|
||||||
u64 remaining = size;
|
u64 remaining = size;
|
||||||
|
|
|
@ -459,6 +459,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||||
|
|
||||||
KTargetSystem::EnableDebugMemoryFill(kernel_config.Get<smc::KernelConfiguration::DebugFillMemory>());
|
KTargetSystem::EnableDebugMemoryFill(kernel_config.Get<smc::KernelConfiguration::DebugFillMemory>());
|
||||||
KTargetSystem::EnableUserExceptionHandlers(kernel_config.Get<smc::KernelConfiguration::EnableUserExceptionHandlers>());
|
KTargetSystem::EnableUserExceptionHandlers(kernel_config.Get<smc::KernelConfiguration::EnableUserExceptionHandlers>());
|
||||||
|
KTargetSystem::EnableDynamicResourceLimits(!kernel_config.Get<smc::KernelConfiguration::DisableDynamicResourceLimits>());
|
||||||
KTargetSystem::EnableUserPmuAccess(kernel_config.Get<smc::KernelConfiguration::EnableUserPmuAccess>());
|
KTargetSystem::EnableUserPmuAccess(kernel_config.Get<smc::KernelConfiguration::EnableUserPmuAccess>());
|
||||||
|
|
||||||
g_call_smc_on_panic = kernel_config.Get<smc::KernelConfiguration::UseSecureMonitorPanicCall>();
|
g_call_smc_on_panic = kernel_config.Get<smc::KernelConfiguration::UseSecureMonitorPanicCall>();
|
||||||
|
|
|
@ -80,14 +80,15 @@ namespace ams::kern::board::nintendo::nx::smc {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct KernelConfiguration {
|
struct KernelConfiguration {
|
||||||
using DebugFillMemory = util::BitPack32::Field<0, 1, bool>;
|
using DebugFillMemory = util::BitPack32::Field<0, 1, bool>;
|
||||||
using EnableUserExceptionHandlers = util::BitPack32::Field<DebugFillMemory::Next, 1, bool>;
|
using EnableUserExceptionHandlers = util::BitPack32::Field<DebugFillMemory::Next, 1, bool>;
|
||||||
using EnableUserPmuAccess = util::BitPack32::Field<EnableUserExceptionHandlers::Next, 1, bool>;
|
using EnableUserPmuAccess = util::BitPack32::Field<EnableUserExceptionHandlers::Next, 1, bool>;
|
||||||
using IncreaseThreadResourceLimit = util::BitPack32::Field<EnableUserPmuAccess::Next, 1, bool>;
|
using IncreaseThreadResourceLimit = util::BitPack32::Field<EnableUserPmuAccess::Next, 1, bool>;
|
||||||
using Reserved4 = util::BitPack32::Field<IncreaseThreadResourceLimit::Next, 4, u32>;
|
using DisableDynamicResourceLimits = util::BitPack32::Field<IncreaseThreadResourceLimit::Next, 1, bool>;
|
||||||
using UseSecureMonitorPanicCall = util::BitPack32::Field<Reserved4::Next, 1, bool>;
|
using Reserved5 = util::BitPack32::Field<DisableDynamicResourceLimits::Next, 3, u32>;
|
||||||
using Reserved9 = util::BitPack32::Field<UseSecureMonitorPanicCall::Next, 7, u32>;
|
using UseSecureMonitorPanicCall = util::BitPack32::Field<Reserved5::Next, 1, bool>;
|
||||||
using MemorySize = util::BitPack32::Field<Reserved9::Next, 2, smc::MemorySize>;
|
using Reserved9 = util::BitPack32::Field<UseSecureMonitorPanicCall::Next, 7, u32>;
|
||||||
|
using MemorySize = util::BitPack32::Field<Reserved9::Next, 2, smc::MemorySize>;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum UserRebootType {
|
enum UserRebootType {
|
||||||
|
|
|
@ -173,8 +173,9 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeSlabHeaps() {
|
void InitializeSlabHeaps() {
|
||||||
/* Get the start of the slab region, since that's where we'll be working. */
|
/* Get the slab region, since that's where we'll be working. */
|
||||||
KVirtualAddress address = KMemoryLayout::GetSlabRegionAddress();
|
const KMemoryRegion &slab_region = KMemoryLayout::GetSlabRegion();
|
||||||
|
KVirtualAddress address = slab_region.GetAddress();
|
||||||
|
|
||||||
/* Initialize slab type array to be in sorted order. */
|
/* Initialize slab type array to be in sorted order. */
|
||||||
KSlabType slab_types[KSlabType_Count];
|
KSlabType slab_types[KSlabType_Count];
|
||||||
|
@ -202,13 +203,21 @@ namespace ams::kern::init {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Track the gaps, so that we can free them to the unused slab tree. */
|
||||||
|
KVirtualAddress gap_start = address;
|
||||||
|
size_t gap_size = 0;
|
||||||
|
|
||||||
for (size_t i = 0; i < util::size(slab_types); i++) {
|
for (size_t i = 0; i < util::size(slab_types); i++) {
|
||||||
/* Add the random gap to the address. */
|
/* Add the random gap to the address. */
|
||||||
address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
|
||||||
|
address += cur_gap;
|
||||||
|
gap_size += cur_gap;
|
||||||
|
|
||||||
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
|
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
|
||||||
case KSlabType_##NAME: \
|
case KSlabType_##NAME: \
|
||||||
address = InitializeSlabHeap<NAME>(address, COUNT); \
|
if (COUNT > 0) { \
|
||||||
|
address = InitializeSlabHeap<NAME>(address, COUNT); \
|
||||||
|
} \
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* Initialize the slabheap. */
|
/* Initialize the slabheap. */
|
||||||
|
@ -218,7 +227,17 @@ namespace ams::kern::init {
|
||||||
/* If we somehow get an invalid type, abort. */
|
/* If we somehow get an invalid type, abort. */
|
||||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If we've hit the end of a gap, free it. */
|
||||||
|
if (gap_start + gap_size != address) {
|
||||||
|
FreeUnusedSlabMemory(gap_start, gap_size);
|
||||||
|
gap_start = address;
|
||||||
|
gap_size = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Free the end of the slab region. */
|
||||||
|
FreeUnusedSlabMemory(gap_start, gap_size + (slab_region.GetEndAddress() - GetInteger(address)));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -128,13 +128,13 @@ namespace ams::kern {
|
||||||
KProcess *new_process = nullptr;
|
KProcess *new_process = nullptr;
|
||||||
{
|
{
|
||||||
/* Make page groups to represent the data. */
|
/* Make page groups to represent the data. */
|
||||||
KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager()));
|
KPageGroup pg(std::addressof(Kernel::GetSystemBlockInfoManager()));
|
||||||
KPageGroup workaround_pg(std::addressof(Kernel::GetBlockInfoManager()));
|
KPageGroup workaround_pg(std::addressof(Kernel::GetSystemBlockInfoManager()));
|
||||||
|
|
||||||
/* Populate the page group to represent the data. */
|
/* Populate the page group to represent the data. */
|
||||||
{
|
{
|
||||||
/* Allocate the previously unreserved pages. */
|
/* Allocate the previously unreserved pages. */
|
||||||
KPageGroup unreserve_pg(std::addressof(Kernel::GetBlockInfoManager()));
|
KPageGroup unreserve_pg(std::addressof(Kernel::GetSystemBlockInfoManager()));
|
||||||
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
|
||||||
|
|
||||||
/* Add the previously reserved pages. */
|
/* Add the previously reserved pages. */
|
||||||
|
|
|
@ -62,11 +62,46 @@ namespace ams::kern {
|
||||||
Result KClientPort::CreateSession(KClientSession **out) {
|
Result KClientPort::CreateSession(KClientSession **out) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
|
/* Declare the session we're going to allocate. */
|
||||||
|
KSession *session;
|
||||||
|
|
||||||
/* Reserve a new session from the resource limit. */
|
/* Reserve a new session from the resource limit. */
|
||||||
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax);
|
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax);
|
||||||
R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached());
|
if (session_reservation.Succeeded()) {
|
||||||
|
/* Allocate a session normally. */
|
||||||
|
session = KSession::Create();
|
||||||
|
} else {
|
||||||
|
/* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */
|
||||||
|
R_UNLESS(GetCurrentProcess().GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
|
||||||
|
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
|
||||||
|
|
||||||
|
/* Try to allocate a session from unused slab memory. */
|
||||||
|
session = KSession::CreateFromUnusedSlabMemory();
|
||||||
|
R_UNLESS(session != nullptr, svc::ResultLimitReached());
|
||||||
|
|
||||||
|
/* Ensure that if we fail to allocate our session requests, we close the session we created. */
|
||||||
|
auto session_guard = SCOPE_GUARD { session->Close(); };
|
||||||
|
{
|
||||||
|
/* We want to add two KSessionRequests to the heap, to prevent request exhaustion. */
|
||||||
|
for (size_t i = 0; i < 2; ++i) {
|
||||||
|
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
|
||||||
|
R_UNLESS(request != nullptr, svc::ResultLimitReached());
|
||||||
|
|
||||||
|
request->Close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
session_guard.Cancel();
|
||||||
|
|
||||||
|
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
|
||||||
|
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that we successfully created a session. */
|
||||||
|
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
||||||
|
|
||||||
|
|
||||||
/* Update the session counts. */
|
/* Update the session counts. */
|
||||||
|
auto count_guard = SCOPE_GUARD { session->Close(); };
|
||||||
{
|
{
|
||||||
/* Atomically increment the number of sessions. */
|
/* Atomically increment the number of sessions. */
|
||||||
s32 new_sessions;
|
s32 new_sessions;
|
||||||
|
@ -90,18 +125,7 @@ namespace ams::kern {
|
||||||
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
count_guard.Cancel();
|
||||||
/* Create a new session. */
|
|
||||||
KSession *session = KSession::Create();
|
|
||||||
if (session == nullptr) {
|
|
||||||
/* Decrement the session count. */
|
|
||||||
const auto prev = m_num_sessions--;
|
|
||||||
if (prev == m_max_sessions) {
|
|
||||||
this->NotifyAvailable();
|
|
||||||
}
|
|
||||||
|
|
||||||
return svc::ResultOutOfResource();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize the session. */
|
/* Initialize the session. */
|
||||||
session->Initialize(this, m_parent->GetName());
|
session->Initialize(this, m_parent->GetName());
|
||||||
|
@ -128,11 +152,32 @@ namespace ams::kern {
|
||||||
Result KClientPort::CreateLightSession(KLightClientSession **out) {
|
Result KClientPort::CreateLightSession(KLightClientSession **out) {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
|
/* Declare the session we're going to allocate. */
|
||||||
|
KLightSession *session;
|
||||||
|
|
||||||
/* Reserve a new session from the resource limit. */
|
/* Reserve a new session from the resource limit. */
|
||||||
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax);
|
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax);
|
||||||
R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached());
|
if (session_reservation.Succeeded()) {
|
||||||
|
/* Allocate a session normally. */
|
||||||
|
session = KLightSession::Create();
|
||||||
|
} else {
|
||||||
|
/* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */
|
||||||
|
R_UNLESS(GetCurrentProcess().GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
|
||||||
|
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
|
||||||
|
|
||||||
|
/* Try to allocate a session from unused slab memory. */
|
||||||
|
session = KLightSession::CreateFromUnusedSlabMemory();
|
||||||
|
R_UNLESS(session != nullptr, svc::ResultLimitReached());
|
||||||
|
|
||||||
|
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
|
||||||
|
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that we successfully created a session. */
|
||||||
|
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
||||||
|
|
||||||
/* Update the session counts. */
|
/* Update the session counts. */
|
||||||
|
auto count_guard = SCOPE_GUARD { session->Close(); };
|
||||||
{
|
{
|
||||||
/* Atomically increment the number of sessions. */
|
/* Atomically increment the number of sessions. */
|
||||||
s32 new_sessions;
|
s32 new_sessions;
|
||||||
|
@ -156,18 +201,7 @@ namespace ams::kern {
|
||||||
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
count_guard.Cancel();
|
||||||
/* Create a new session. */
|
|
||||||
KLightSession *session = KLightSession::Create();
|
|
||||||
if (session == nullptr) {
|
|
||||||
/* Decrement the session count. */
|
|
||||||
const auto prev = m_num_sessions--;
|
|
||||||
if (prev == m_max_sessions) {
|
|
||||||
this->NotifyAvailable();
|
|
||||||
}
|
|
||||||
|
|
||||||
return svc::ResultOutOfResource();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize the session. */
|
/* Initialize the session. */
|
||||||
session->Initialize(this, m_parent->GetName());
|
session->Initialize(this, m_parent->GetName());
|
||||||
|
|
|
@ -369,14 +369,14 @@ namespace ams::kern::KDumpObject {
|
||||||
/* KBlockInfo slab. */
|
/* KBlockInfo slab. */
|
||||||
{
|
{
|
||||||
MESOSPHERE_RELEASE_LOG("KBlockInfo\n");
|
MESOSPHERE_RELEASE_LOG("KBlockInfo\n");
|
||||||
auto &manager = Kernel::GetBlockInfoManager();
|
auto &manager = Kernel::GetSystemBlockInfoManager();
|
||||||
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount());
|
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Page Table slab. */
|
/* Page Table slab. */
|
||||||
{
|
{
|
||||||
MESOSPHERE_RELEASE_LOG("Page Table\n");
|
MESOSPHERE_RELEASE_LOG("Page Table\n");
|
||||||
auto &manager = Kernel::GetPageTableManager();
|
auto &manager = Kernel::GetSystemPageTableManager();
|
||||||
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount());
|
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ namespace ams::kern {
|
||||||
void KEvent::Finalize() {
|
void KEvent::Finalize() {
|
||||||
MESOSPHERE_ASSERT_THIS();
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
|
||||||
KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize();
|
KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList, true>::Finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KEvent::Signal() {
|
Result KEvent::Signal() {
|
||||||
|
|
|
@ -37,6 +37,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
|
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size) {
|
||||||
/* Clear the management region to zero. */
|
/* Clear the management region to zero. */
|
||||||
|
|
||||||
const KVirtualAddress management_region_end = management_region + management_region_size;
|
const KVirtualAddress management_region_end = management_region + management_region_size;
|
||||||
std::memset(GetVoidPointer(management_region), 0, management_region_size);
|
std::memset(GetVoidPointer(management_region), 0, management_region_size);
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ namespace ams::kern {
|
||||||
m_mapped_ipc_server_memory = 0;
|
m_mapped_ipc_server_memory = 0;
|
||||||
|
|
||||||
m_memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager());
|
m_memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager());
|
||||||
m_block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
m_block_info_manager = std::addressof(Kernel::GetSystemBlockInfoManager());
|
||||||
m_resource_limit = std::addressof(Kernel::GetSystemResourceLimit());
|
m_resource_limit = std::addressof(Kernel::GetSystemResourceLimit());
|
||||||
|
|
||||||
m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
||||||
|
|
|
@ -260,8 +260,8 @@ namespace ams::kern {
|
||||||
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
||||||
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0;
|
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0;
|
||||||
auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
||||||
auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
auto *block_info_manager = std::addressof(is_app ? Kernel::GetApplicationBlockInfoManager() : Kernel::GetSystemBlockInfoManager());
|
||||||
auto *pt_manager = std::addressof(Kernel::GetPageTableManager());
|
auto *pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager());
|
||||||
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager, res_limit));
|
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager, res_limit));
|
||||||
}
|
}
|
||||||
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
||||||
|
@ -326,12 +326,17 @@ namespace ams::kern {
|
||||||
MESOSPHERE_ASSERT(m_system_resource_address != Null<KVirtualAddress>);
|
MESOSPHERE_ASSERT(m_system_resource_address != Null<KVirtualAddress>);
|
||||||
m_system_resource_num_pages = system_resource_num_pages;
|
m_system_resource_num_pages = system_resource_num_pages;
|
||||||
|
|
||||||
/* Initialize managers. */
|
/* Initialize slab heaps. */
|
||||||
const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(system_resource_size), PageSize);
|
const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(system_resource_size), PageSize);
|
||||||
m_dynamic_page_manager.Initialize(m_system_resource_address + rc_size, system_resource_size - rc_size);
|
m_dynamic_page_manager.Initialize(m_system_resource_address + rc_size, system_resource_size - rc_size);
|
||||||
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), GetPointer<KPageTableManager::RefCount>(m_system_resource_address));
|
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, GetPointer<KPageTableManager::RefCount>(m_system_resource_address));
|
||||||
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager));
|
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||||
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager));
|
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
|
||||||
|
|
||||||
|
/* Initialize managers. */
|
||||||
|
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_page_table_heap));
|
||||||
|
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_memory_block_heap));
|
||||||
|
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_block_info_heap));
|
||||||
|
|
||||||
mem_block_manager = std::addressof(m_memory_block_slab_manager);
|
mem_block_manager = std::addressof(m_memory_block_slab_manager);
|
||||||
block_info_manager = std::addressof(m_block_info_manager);
|
block_info_manager = std::addressof(m_block_info_manager);
|
||||||
|
@ -339,8 +344,8 @@ namespace ams::kern {
|
||||||
} else {
|
} else {
|
||||||
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
||||||
mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
||||||
block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
block_info_manager = std::addressof(is_app ? Kernel::GetApplicationBlockInfoManager() : Kernel::GetSystemBlockInfoManager());
|
||||||
pt_manager = std::addressof(Kernel::GetPageTableManager());
|
pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ensure we don't leak any secure memory we allocated. */
|
/* Ensure we don't leak any secure memory we allocated. */
|
||||||
|
|
|
@ -49,7 +49,8 @@ namespace ams::kern {
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
value = m_limit_values[which];
|
value = m_limit_values[which];
|
||||||
MESOSPHERE_ASSERT(value >= 0);
|
MESOSPHERE_ASSERT(value >= 0);
|
||||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
|
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
|
||||||
|
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
|
||||||
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +65,8 @@ namespace ams::kern {
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
value = m_current_values[which];
|
value = m_current_values[which];
|
||||||
MESOSPHERE_ASSERT(value >= 0);
|
MESOSPHERE_ASSERT(value >= 0);
|
||||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
|
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
|
||||||
|
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
|
||||||
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +81,8 @@ namespace ams::kern {
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
value = m_peak_values[which];
|
value = m_peak_values[which];
|
||||||
MESOSPHERE_ASSERT(value >= 0);
|
MESOSPHERE_ASSERT(value >= 0);
|
||||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
|
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
|
||||||
|
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
|
||||||
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +96,8 @@ namespace ams::kern {
|
||||||
{
|
{
|
||||||
KScopedLightLock lk(m_lock);
|
KScopedLightLock lk(m_lock);
|
||||||
MESOSPHERE_ASSERT(m_current_values[which] >= 0);
|
MESOSPHERE_ASSERT(m_current_values[which] >= 0);
|
||||||
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
|
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
|
||||||
|
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
|
||||||
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
|
||||||
value = m_limit_values[which] - m_current_values[which];
|
value = m_limit_values[which] - m_current_values[which];
|
||||||
}
|
}
|
||||||
|
@ -113,6 +117,37 @@ namespace ams::kern {
|
||||||
return ResultSuccess();
|
return ResultSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KResourceLimit::Add(ams::svc::LimitableResource which, s64 value) {
|
||||||
|
MESOSPHERE_ASSERT_THIS();
|
||||||
|
MESOSPHERE_ASSERT(KTargetSystem::IsDynamicResourceLimitsEnabled());
|
||||||
|
|
||||||
|
KScopedLightLock lk(m_lock);
|
||||||
|
|
||||||
|
/* Check that this is a true increase. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(value > 0);
|
||||||
|
|
||||||
|
/* Check that we can perform an increase. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_current_values[which] <= m_peak_values[which]);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_peak_values[which] <= m_limit_values[which]);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_current_hints[which] <= m_current_values[which]);
|
||||||
|
|
||||||
|
/* Check that the increase doesn't cause an overflow. */
|
||||||
|
const auto increased_limit = m_limit_values[which] + value;
|
||||||
|
const auto increased_current = m_current_values[which] + value;
|
||||||
|
const auto increased_hint = m_current_hints[which] + value;
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_limit_values[which] < increased_limit);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_current_values[which] < increased_current);
|
||||||
|
MESOSPHERE_ABORT_UNLESS(m_current_hints[which] < increased_hint);
|
||||||
|
|
||||||
|
/* Add the value. */
|
||||||
|
m_limit_values[which] = increased_limit;
|
||||||
|
m_current_values[which] = increased_current;
|
||||||
|
m_current_hints[which] = increased_hint;
|
||||||
|
|
||||||
|
/* Update our peak. */
|
||||||
|
m_peak_values[which] = std::max(m_peak_values[which], increased_current);
|
||||||
|
}
|
||||||
|
|
||||||
bool KResourceLimit::Reserve(ams::svc::LimitableResource which, s64 value) {
|
bool KResourceLimit::Reserve(ams::svc::LimitableResource which, s64 value) {
|
||||||
return this->Reserve(which, value, KHardwareTimer::GetTick() + DefaultTimeout);
|
return this->Reserve(which, value, KHardwareTimer::GetTick() + DefaultTimeout);
|
||||||
}
|
}
|
||||||
|
|
159
libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp
Normal file
159
libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp
Normal file
|
@ -0,0 +1,159 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms and conditions of the GNU General Public License,
|
||||||
|
* version 2, as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||||
|
* more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#include <mesosphere.hpp>
|
||||||
|
|
||||||
|
namespace ams::kern {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class KUnusedSlabMemory : public util::IntrusiveRedBlackTreeBaseNode<KUnusedSlabMemory> {
|
||||||
|
NON_COPYABLE(KUnusedSlabMemory);
|
||||||
|
NON_MOVEABLE(KUnusedSlabMemory);
|
||||||
|
private:
|
||||||
|
size_t m_size;
|
||||||
|
public:
|
||||||
|
struct RedBlackKeyType {
|
||||||
|
size_t m_size;
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE size_t GetSize() const {
|
||||||
|
return m_size;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<typename T> requires (std::same_as<T, KUnusedSlabMemory> || std::same_as<T, RedBlackKeyType>)
|
||||||
|
static constexpr ALWAYS_INLINE int Compare(const T &lhs, const KUnusedSlabMemory &rhs) {
|
||||||
|
if (lhs.GetSize() < rhs.GetSize()) {
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public:
|
||||||
|
constexpr KUnusedSlabMemory(size_t size) : m_size(size) { /* ... */ }
|
||||||
|
|
||||||
|
constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return reinterpret_cast<uintptr_t>(this); }
|
||||||
|
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
|
||||||
|
|
||||||
|
};
|
||||||
|
static_assert(std::is_trivially_destructible<KUnusedSlabMemory>::value);
|
||||||
|
|
||||||
|
using KUnusedSlabMemoryTree = util::IntrusiveRedBlackTreeBaseTraits<KUnusedSlabMemory>::TreeType<KUnusedSlabMemory>;
|
||||||
|
|
||||||
|
constinit KLightLock g_unused_slab_memory_lock;
|
||||||
|
constinit KUnusedSlabMemoryTree g_unused_slab_memory_tree;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
KVirtualAddress AllocateUnusedSlabMemory(size_t size, size_t alignment) {
|
||||||
|
/* Acquire exclusive access to the memory tree. */
|
||||||
|
KScopedLightLock lk(g_unused_slab_memory_lock);
|
||||||
|
|
||||||
|
/* Adjust size and alignment. */
|
||||||
|
size = std::max(size, sizeof(KUnusedSlabMemory));
|
||||||
|
alignment = std::max(alignment, alignof(KUnusedSlabMemory));
|
||||||
|
|
||||||
|
/* Find the smallest block which fits our allocation. */
|
||||||
|
KUnusedSlabMemory *best_fit = std::addressof(*g_unused_slab_memory_tree.nfind_key({ size - 1 }));
|
||||||
|
|
||||||
|
/* Ensure that the chunk is valid. */
|
||||||
|
size_t prefix_waste;
|
||||||
|
KVirtualAddress alloc_start;
|
||||||
|
KVirtualAddress alloc_last;
|
||||||
|
KVirtualAddress alloc_end;
|
||||||
|
KVirtualAddress chunk_last;
|
||||||
|
KVirtualAddress chunk_end;
|
||||||
|
while (true) {
|
||||||
|
/* Check that we still have a chunk satisfying our size requirement. */
|
||||||
|
if (AMS_UNLIKELY(best_fit == nullptr)) {
|
||||||
|
return Null<KVirtualAddress>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Determine where the actual allocation would start. */
|
||||||
|
alloc_start = util::AlignUp(GetInteger(best_fit->GetAddress()), alignment);
|
||||||
|
if (AMS_LIKELY(alloc_start >= best_fit->GetAddress())) {
|
||||||
|
prefix_waste = alloc_start - best_fit->GetAddress();
|
||||||
|
alloc_end = alloc_start + size;
|
||||||
|
alloc_last = alloc_end - 1;
|
||||||
|
|
||||||
|
/* Check that the allocation remains in bounds. */
|
||||||
|
if (alloc_start <= alloc_last) {
|
||||||
|
chunk_end = best_fit->GetAddress() + best_fit->GetSize();
|
||||||
|
chunk_last = chunk_end - 1;
|
||||||
|
if (AMS_LIKELY(alloc_last <= chunk_last)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check the next smallest block. */
|
||||||
|
best_fit = best_fit->GetNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Remove the chunk we selected from the tree. */
|
||||||
|
g_unused_slab_memory_tree.erase(g_unused_slab_memory_tree.iterator_to(*best_fit));
|
||||||
|
std::destroy_at(best_fit);
|
||||||
|
|
||||||
|
/* If there's enough prefix waste due to alignment for a new chunk, insert it into the tree. */
|
||||||
|
if (prefix_waste >= sizeof(KUnusedSlabMemory)) {
|
||||||
|
std::construct_at(best_fit, prefix_waste);
|
||||||
|
g_unused_slab_memory_tree.insert(*best_fit);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If there's enough suffix waste after the allocation for a new chunk, insert it into the tree. */
|
||||||
|
if (alloc_last < alloc_end + sizeof(KUnusedSlabMemory) - 1 && alloc_end + sizeof(KUnusedSlabMemory) - 1 <= chunk_last) {
|
||||||
|
KUnusedSlabMemory *suffix_chunk = GetPointer<KUnusedSlabMemory>(alloc_end);
|
||||||
|
std::construct_at(suffix_chunk, chunk_end - alloc_end);
|
||||||
|
g_unused_slab_memory_tree.insert(*suffix_chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the allocated memory. */
|
||||||
|
return alloc_start;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeUnusedSlabMemory(KVirtualAddress address, size_t size) {
|
||||||
|
/* NOTE: This is called only during initialization, so we don't need exclusive access. */
|
||||||
|
/* Nintendo doesn't acquire the lock here, either. */
|
||||||
|
|
||||||
|
/* Check that there's anything at all for us to free. */
|
||||||
|
if (AMS_UNLIKELY(size == 0)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Determine the start of the block. */
|
||||||
|
const KVirtualAddress block_start = util::AlignUp(GetInteger(address), alignof(KUnusedSlabMemory));
|
||||||
|
|
||||||
|
/* Check that there's space for a KUnusedSlabMemory to exist. */
|
||||||
|
if (AMS_UNLIKELY(std::numeric_limits<uintptr_t>::max() - sizeof(KUnusedSlabMemory) < GetInteger(block_start))) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Determine the end of the block region. */
|
||||||
|
const KVirtualAddress block_end = util::AlignDown(GetInteger(address) + size, alignof(KUnusedSlabMemory));
|
||||||
|
|
||||||
|
/* Check that the block remains within bounds. */
|
||||||
|
if (AMS_UNLIKELY(block_start + sizeof(KUnusedSlabMemory) - 1 > block_end - 1)){
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Create the block. */
|
||||||
|
KUnusedSlabMemory *block = GetPointer<KUnusedSlabMemory>(block_start);
|
||||||
|
std::construct_at(block, GetInteger(block_end) - GetInteger(block_start));
|
||||||
|
|
||||||
|
/* Insert the block into the tree. */
|
||||||
|
g_unused_slab_memory_tree.insert(*block);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -66,15 +66,11 @@ namespace ams::kern {
|
||||||
|
|
||||||
void Kernel::InitializeResourceManagers(KVirtualAddress address, size_t size) {
|
void Kernel::InitializeResourceManagers(KVirtualAddress address, size_t size) {
|
||||||
/* Ensure that the buffer is suitable for our use. */
|
/* Ensure that the buffer is suitable for our use. */
|
||||||
//const size_t app_size = ApplicationMemoryBlockSlabHeapSize * sizeof(KMemoryBlock);
|
|
||||||
//const size_t sys_size = SystemMemoryBlockSlabHeapSize * sizeof(KMemoryBlock);
|
|
||||||
//const size_t info_size = BlockInfoSlabHeapSize * sizeof(KBlockInfo);
|
|
||||||
//const size_t fixed_size = util::AlignUp(app_size + sys_size + info_size, PageSize);
|
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), PageSize));
|
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), PageSize));
|
||||||
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
|
||||||
|
|
||||||
/* Ensure that we have space for our reference counts. */
|
/* Ensure that we have space for our reference counts. */
|
||||||
const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(size), PageSize);
|
const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize);
|
||||||
MESOSPHERE_ABORT_UNLESS(rc_size < size);
|
MESOSPHERE_ABORT_UNLESS(rc_size < size);
|
||||||
size -= rc_size;
|
size -= rc_size;
|
||||||
|
|
||||||
|
@ -82,13 +78,28 @@ namespace ams::kern {
|
||||||
g_resource_manager_page_manager.Initialize(address, size);
|
g_resource_manager_page_manager.Initialize(address, size);
|
||||||
|
|
||||||
/* Initialize the fixed-size slabheaps. */
|
/* Initialize the fixed-size slabheaps. */
|
||||||
s_app_memory_block_manager.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize);
|
s_app_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize);
|
||||||
s_sys_memory_block_manager.Initialize(std::addressof(g_resource_manager_page_manager), SystemMemoryBlockSlabHeapSize);
|
s_sys_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), SystemMemoryBlockSlabHeapSize);
|
||||||
s_block_info_manager.Initialize(std::addressof(g_resource_manager_page_manager), BlockInfoSlabHeapSize);
|
s_block_info_heap.Initialize(std::addressof(g_resource_manager_page_manager), BlockInfoSlabHeapSize);
|
||||||
|
|
||||||
/* Reserve all remaining pages for the page table manager. */
|
/* Reserve all but a fixed number of remaining pages for the page table heap. */
|
||||||
const size_t num_pt_pages = g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed();
|
const size_t num_pt_pages = g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed() - ReservedDynamicPageCount;
|
||||||
s_page_table_manager.Initialize(std::addressof(g_resource_manager_page_manager), num_pt_pages, GetPointer<KPageTableManager::RefCount>(address + size));
|
s_page_table_heap.Initialize(std::addressof(g_resource_manager_page_manager), num_pt_pages, GetPointer<KPageTableManager::RefCount>(address + size));
|
||||||
|
|
||||||
|
/* Setup the slab managers. */
|
||||||
|
KDynamicPageManager * const app_dynamic_page_manager = nullptr;
|
||||||
|
KDynamicPageManager * const sys_dynamic_page_manager = KTargetSystem::IsDynamicResourceLimitsEnabled() ? std::addressof(g_resource_manager_page_manager) : nullptr;
|
||||||
|
s_app_memory_block_manager.Initialize(app_dynamic_page_manager, std::addressof(s_app_memory_block_heap));
|
||||||
|
s_sys_memory_block_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_sys_memory_block_heap));
|
||||||
|
|
||||||
|
s_app_block_info_manager.Initialize(app_dynamic_page_manager, std::addressof(s_block_info_heap));
|
||||||
|
s_sys_block_info_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_block_info_heap));
|
||||||
|
|
||||||
|
s_app_page_table_manager.Initialize(app_dynamic_page_manager, std::addressof(s_page_table_heap));
|
||||||
|
s_sys_page_table_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_page_table_heap));
|
||||||
|
|
||||||
|
/* Check that we have the correct number of dynamic pages available. */
|
||||||
|
MESOSPHERE_ABORT_UNLESS(g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed() == ReservedDynamicPageCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Kernel::PrintLayout() {
|
void Kernel::PrintLayout() {
|
||||||
|
|
|
@ -60,12 +60,28 @@ namespace ams::kern::svc {
|
||||||
auto &process = GetCurrentProcess();
|
auto &process = GetCurrentProcess();
|
||||||
auto &handle_table = process.GetHandleTable();
|
auto &handle_table = process.GetHandleTable();
|
||||||
|
|
||||||
|
/* Declare the event we're going to allocate. */
|
||||||
|
KEvent *event;
|
||||||
|
|
||||||
/* Reserve a new event from the process resource limit. */
|
/* Reserve a new event from the process resource limit. */
|
||||||
KScopedResourceReservation event_reservation(std::addressof(process), ams::svc::LimitableResource_EventCountMax);
|
KScopedResourceReservation event_reservation(std::addressof(process), ams::svc::LimitableResource_EventCountMax);
|
||||||
R_UNLESS(event_reservation.Succeeded(), svc::ResultLimitReached());
|
if (event_reservation.Succeeded()) {
|
||||||
|
/* Allocate an event normally. */
|
||||||
|
event = KEvent::Create();
|
||||||
|
} else {
|
||||||
|
/* We couldn't reserve an event. Check that we support dynamically expanding the resource limit. */
|
||||||
|
R_UNLESS(process.GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
|
||||||
|
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
|
||||||
|
|
||||||
/* Create a new event. */
|
/* Try to allocate an event from unused slab memory. */
|
||||||
KEvent *event = KEvent::Create();
|
event = KEvent::CreateFromUnusedSlabMemory();
|
||||||
|
R_UNLESS(event != nullptr, svc::ResultLimitReached());
|
||||||
|
|
||||||
|
/* We successfully allocated an event, so add the object we allocated to the resource limit. */
|
||||||
|
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_EventCountMax, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that we successfully created an event. */
|
||||||
R_UNLESS(event != nullptr, svc::ResultOutOfResource());
|
R_UNLESS(event != nullptr, svc::ResultOutOfResource());
|
||||||
|
|
||||||
/* Initialize the event. */
|
/* Initialize the event. */
|
||||||
|
|
|
@ -27,12 +27,44 @@ namespace ams::kern::svc {
|
||||||
auto &process = GetCurrentProcess();
|
auto &process = GetCurrentProcess();
|
||||||
auto &handle_table = process.GetHandleTable();
|
auto &handle_table = process.GetHandleTable();
|
||||||
|
|
||||||
|
/* Declare the session we're going to allocate. */
|
||||||
|
T *session;
|
||||||
|
|
||||||
/* Reserve a new session from the process resource limit. */
|
/* Reserve a new session from the process resource limit. */
|
||||||
KScopedResourceReservation session_reservation(std::addressof(process), ams::svc::LimitableResource_SessionCountMax);
|
KScopedResourceReservation session_reservation(std::addressof(process), ams::svc::LimitableResource_SessionCountMax);
|
||||||
R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached());
|
if (session_reservation.Succeeded()) {
|
||||||
|
/* Allocate a session normally. */
|
||||||
|
session = T::Create();
|
||||||
|
} else {
|
||||||
|
/* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */
|
||||||
|
R_UNLESS(process.GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
|
||||||
|
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
|
||||||
|
|
||||||
/* Create a new session. */
|
/* Try to allocate a session from unused slab memory. */
|
||||||
T *session = T::Create();
|
session = T::CreateFromUnusedSlabMemory();
|
||||||
|
R_UNLESS(session != nullptr, svc::ResultLimitReached());
|
||||||
|
|
||||||
|
/* If we're creating a KSession, we want to add two KSessionRequests to the heap, to prevent request exhaustion. */
|
||||||
|
/* NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's no reason to not do this statically. */
|
||||||
|
if constexpr (std::same_as<T, KSession>) {
|
||||||
|
/* Ensure that if we fail to allocate our session requests, we close the session we created. */
|
||||||
|
auto session_guard = SCOPE_GUARD { session->Close(); };
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < 2; ++i) {
|
||||||
|
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
|
||||||
|
R_UNLESS(request != nullptr, svc::ResultLimitReached());
|
||||||
|
|
||||||
|
request->Close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
session_guard.Cancel();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
|
||||||
|
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that we successfully created a session. */
|
||||||
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
|
||||||
|
|
||||||
/* Initialize the session. */
|
/* Initialize the session. */
|
||||||
|
|
|
@ -21,10 +21,6 @@ namespace ams::kern {
|
||||||
constinit Kernel::State Kernel::s_state = Kernel::State::Invalid;
|
constinit Kernel::State Kernel::s_state = Kernel::State::Invalid;
|
||||||
constinit KResourceLimit Kernel::s_system_resource_limit;
|
constinit KResourceLimit Kernel::s_system_resource_limit;
|
||||||
KMemoryManager Kernel::s_memory_manager;
|
KMemoryManager Kernel::s_memory_manager;
|
||||||
constinit KPageTableManager Kernel::s_page_table_manager;
|
|
||||||
constinit KMemoryBlockSlabManager Kernel::s_app_memory_block_manager;
|
|
||||||
constinit KMemoryBlockSlabManager Kernel::s_sys_memory_block_manager;
|
|
||||||
constinit KBlockInfoManager Kernel::s_block_info_manager;
|
|
||||||
constinit KSupervisorPageTable Kernel::s_supervisor_page_table;
|
constinit KSupervisorPageTable Kernel::s_supervisor_page_table;
|
||||||
constinit KUnsafeMemory Kernel::s_unsafe_memory;
|
constinit KUnsafeMemory Kernel::s_unsafe_memory;
|
||||||
constinit KWorkerTaskManager Kernel::s_worker_task_managers[KWorkerTaskManager::WorkerType_Count];
|
constinit KWorkerTaskManager Kernel::s_worker_task_managers[KWorkerTaskManager::WorkerType_Count];
|
||||||
|
@ -33,6 +29,17 @@ namespace ams::kern {
|
||||||
constinit KInterruptTaskManager Kernel::s_interrupt_task_managers[cpu::NumCores];
|
constinit KInterruptTaskManager Kernel::s_interrupt_task_managers[cpu::NumCores];
|
||||||
constinit KHardwareTimer Kernel::s_hardware_timers[cpu::NumCores];
|
constinit KHardwareTimer Kernel::s_hardware_timers[cpu::NumCores];
|
||||||
|
|
||||||
|
constinit KPageTableSlabHeap Kernel::s_page_table_heap;
|
||||||
|
constinit KMemoryBlockSlabHeap Kernel::s_app_memory_block_heap;
|
||||||
|
constinit KMemoryBlockSlabHeap Kernel::s_sys_memory_block_heap;
|
||||||
|
constinit KBlockInfoSlabHeap Kernel::s_block_info_heap;
|
||||||
|
constinit KPageTableManager Kernel::s_app_page_table_manager;
|
||||||
|
constinit KPageTableManager Kernel::s_sys_page_table_manager;
|
||||||
|
constinit KMemoryBlockSlabManager Kernel::s_app_memory_block_manager;
|
||||||
|
constinit KMemoryBlockSlabManager Kernel::s_sys_memory_block_manager;
|
||||||
|
constinit KBlockInfoManager Kernel::s_app_block_info_manager;
|
||||||
|
constinit KBlockInfoManager Kernel::s_sys_block_info_manager;
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constinit std::array<KThread, cpu::NumCores> g_main_threads;
|
constinit std::array<KThread, cpu::NumCores> g_main_threads;
|
||||||
|
|
Loading…
Reference in a new issue