mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-11-26 22:02:15 +00:00
kern: Update page bitmaps/alloc to reflect 10.0.0 changes
This commit is contained in:
parent
3da0cda4ae
commit
152a945561
18 changed files with 498 additions and 267 deletions
|
@ -258,8 +258,6 @@ namespace ams::kern::arch::arm64 {
|
|||
}
|
||||
}
|
||||
|
||||
ClearPageTable(table);
|
||||
|
||||
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0);
|
||||
|
||||
return table;
|
||||
|
|
|
@ -38,14 +38,14 @@ namespace ams::kern::arch::arm64 {
|
|||
" \n"
|
||||
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
|
||||
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
||||
" b.eq done"
|
||||
" b.eq 3f\n"
|
||||
" sevl\n"
|
||||
"2:\n"
|
||||
" wfe\n"
|
||||
" ldaxrh %w[tmp1], %[packed_tickets]\n"
|
||||
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
||||
" b.ne 2b\n"
|
||||
"done:\n"
|
||||
"3:\n"
|
||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [packed_tickets]"+Q"(this->packed_tickets)
|
||||
:
|
||||
: "cc", "memory"
|
||||
|
@ -106,6 +106,6 @@ namespace ams::kern::arch::arm64 {
|
|||
};
|
||||
static_assert(sizeof(KAlignedSpinLock) == 2 * cpu::DataCacheLineSize);
|
||||
|
||||
using KSpinLock = KAlignedSpinLock;
|
||||
using KSpinLock = KNotAlignedSpinLock;
|
||||
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ namespace ams::kern::board::nintendo::nx {
|
|||
/* Randomness. */
|
||||
static void GenerateRandomBytes(void *dst, size_t size);
|
||||
static u64 GenerateRandomRange(u64 min, u64 max);
|
||||
static u64 GenerateRandomU64();
|
||||
|
||||
/* Privileged Access. */
|
||||
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_spin_lock.hpp>
|
||||
#include <mesosphere/kern_k_slab_heap.hpp>
|
||||
#include <mesosphere/kern_k_page_group.hpp>
|
||||
#include <mesosphere/kern_k_memory_block.hpp>
|
||||
#include <mesosphere/kern_k_page_bitmap.hpp>
|
||||
#include <mesosphere/kern_select_interrupt_manager.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KDynamicPageManager {
|
||||
public:
|
||||
class PageBuffer {
|
||||
private:
|
||||
u8 buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(PageBuffer) == PageSize);
|
||||
private:
|
||||
KSpinLock lock;
|
||||
KPageBitmap page_bitmap;
|
||||
size_t used;
|
||||
size_t peak;
|
||||
size_t count;
|
||||
KVirtualAddress address;
|
||||
size_t size;
|
||||
public:
|
||||
KDynamicPageManager() : lock(), page_bitmap(), used(), peak(), count(), address(), size() { /* ... */ }
|
||||
|
||||
Result Initialize(KVirtualAddress memory, size_t sz) {
|
||||
/* We need to have positive size. */
|
||||
R_UNLESS(sz > 0, svc::ResultOutOfMemory());
|
||||
|
||||
/* Calculate metadata overhead. */
|
||||
const size_t metadata_size = KPageBitmap::CalculateMetadataOverheadSize(sz / sizeof(PageBuffer));
|
||||
const size_t allocatable_size = sz - metadata_size;
|
||||
|
||||
/* Set tracking fields. */
|
||||
this->address = memory;
|
||||
this->size = util::AlignDown(allocatable_size, sizeof(PageBuffer));
|
||||
this->count = allocatable_size / sizeof(PageBuffer);
|
||||
R_UNLESS(this->count > 0, svc::ResultOutOfMemory());
|
||||
|
||||
/* Clear the metadata region. */
|
||||
u64 *metadata_ptr = GetPointer<u64>(this->address + allocatable_size);
|
||||
std::memset(metadata_ptr, 0, metadata_size);
|
||||
|
||||
/* Initialize the bitmap. */
|
||||
this->page_bitmap.Initialize(metadata_ptr, this->count);
|
||||
|
||||
/* Free the pages to the bitmap. */
|
||||
PageBuffer *cur_page = GetPointer<PageBuffer>(this->address);
|
||||
for (size_t i = 0; i < this->count; i++) {
|
||||
std::memset(cur_page, 0, sizeof(*cur_page));
|
||||
this->page_bitmap.SetBit(i);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
||||
constexpr size_t GetSize() const { return this->size; }
|
||||
constexpr size_t GetUsed() const { return this->used; }
|
||||
constexpr size_t GetPeak() const { return this->peak; }
|
||||
constexpr size_t GetCount() const { return this->count; }
|
||||
|
||||
PageBuffer *Allocate() {
|
||||
/* Take the lock. */
|
||||
KScopedInterruptDisable di;
|
||||
KScopedSpinLock lk(this->lock);
|
||||
|
||||
/* Find a random free block. */
|
||||
ssize_t soffset = this->page_bitmap.FindFreeBlock(true);
|
||||
if (AMS_UNLIKELY(soffset < 0)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const size_t offset = static_cast<size_t>(soffset);
|
||||
|
||||
/* Update our tracking. */
|
||||
this->page_bitmap.ClearBit(offset);
|
||||
this->peak = std::max(this->peak, (++this->used));
|
||||
|
||||
return GetPointer<PageBuffer>(this->address) + offset;
|
||||
}
|
||||
|
||||
void Free(PageBuffer *pb) {
|
||||
/* Take the lock. */
|
||||
KScopedInterruptDisable di;
|
||||
KScopedSpinLock lk(this->lock);
|
||||
|
||||
/* Set the bit for the free page. */
|
||||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(this->address)) / sizeof(PageBuffer);
|
||||
this->page_bitmap.SetBit(offset);
|
||||
|
||||
/* Decrement our used count. */
|
||||
--this->used;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -18,29 +18,20 @@
|
|||
#include <mesosphere/kern_k_slab_heap.hpp>
|
||||
#include <mesosphere/kern_k_page_group.hpp>
|
||||
#include <mesosphere/kern_k_memory_block.hpp>
|
||||
#include <mesosphere/kern_k_dynamic_page_manager.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class DynamicSlabHeapPage {
|
||||
private:
|
||||
u8 buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(DynamicSlabHeapPage) == PageSize);
|
||||
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class KDynamicSlabHeap {
|
||||
NON_COPYABLE(KDynamicSlabHeap);
|
||||
NON_MOVEABLE(KDynamicSlabHeap);
|
||||
private:
|
||||
using Impl = impl::KSlabHeapImpl;
|
||||
using PageBuffer = impl::DynamicSlabHeapPage;
|
||||
using PageBuffer = KDynamicPageManager::PageBuffer;
|
||||
private:
|
||||
Impl impl;
|
||||
KDynamicSlabHeap<PageBuffer> *next_allocator;
|
||||
KDynamicPageManager *page_allocator;
|
||||
std::atomic<size_t> used;
|
||||
std::atomic<size_t> peak;
|
||||
std::atomic<size_t> count;
|
||||
|
@ -54,7 +45,7 @@ namespace ams::kern {
|
|||
return std::addressof(this->impl);
|
||||
}
|
||||
public:
|
||||
constexpr KDynamicSlabHeap() : impl(), next_allocator(), used(), peak(), count(), address(), size() { /* ... */ }
|
||||
constexpr KDynamicSlabHeap() : impl(), page_allocator(), used(), peak(), count(), address(), size() { /* ... */ }
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
||||
constexpr size_t GetSize() const { return this->size; }
|
||||
|
@ -80,10 +71,10 @@ namespace ams::kern {
|
|||
}
|
||||
}
|
||||
|
||||
void Initialize(KDynamicSlabHeap<PageBuffer> *next) {
|
||||
this->next_allocator = next;
|
||||
this->address = next->GetAddress();
|
||||
this->size = next->GetSize();
|
||||
void Initialize(KDynamicPageManager *page_allocator) {
|
||||
this->page_allocator = page_allocator;
|
||||
this->address = this->page_allocator->GetAddress();
|
||||
this->size = this->page_allocator->GetSize();
|
||||
}
|
||||
|
||||
T *Allocate() {
|
||||
|
@ -91,8 +82,8 @@ namespace ams::kern {
|
|||
|
||||
/* If we fail to allocate, try to get a new page from our next allocator. */
|
||||
if (AMS_UNLIKELY(allocated == nullptr)) {
|
||||
if (this->next_allocator != nullptr) {
|
||||
allocated = reinterpret_cast<T *>(this->next_allocator->Allocate());
|
||||
if (this->page_allocator != nullptr) {
|
||||
allocated = reinterpret_cast<T *>(this->page_allocator->Allocate());
|
||||
if (allocated != nullptr) {
|
||||
/* If we succeeded in getting a page, free the rest to our slab. */
|
||||
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
|
||||
|
@ -126,7 +117,6 @@ namespace ams::kern {
|
|||
}
|
||||
};
|
||||
|
||||
class KDynamicPageManager : public KDynamicSlabHeap<impl::DynamicSlabHeapPage>{};
|
||||
class KBlockInfoManager : public KDynamicSlabHeap<KBlockInfo>{};
|
||||
class KMemoryBlockSlabManager : public KDynamicSlabHeap<KMemoryBlock>{};
|
||||
|
||||
|
|
|
@ -161,9 +161,8 @@ namespace ams::kern {
|
|||
|
||||
enum KMemoryAttribute : u8 {
|
||||
KMemoryAttribute_None = 0x00,
|
||||
KMemoryAttribute_Mask = 0x7F,
|
||||
KMemoryAttribute_All = KMemoryAttribute_Mask,
|
||||
KMemoryAttribute_DontCareMask = 0x80,
|
||||
KMemoryAttribute_UserMask = 0x7F,
|
||||
KMemoryAttribute_All = 0xFF,
|
||||
|
||||
KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked,
|
||||
KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked,
|
||||
|
@ -171,9 +170,6 @@ namespace ams::kern {
|
|||
KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached,
|
||||
};
|
||||
|
||||
static_assert((KMemoryAttribute_Mask & KMemoryAttribute_DontCareMask) == 0);
|
||||
static_assert(static_cast<typename std::underlying_type<KMemoryAttribute>::type>(~(KMemoryAttribute_Mask | KMemoryAttribute_DontCareMask)) == 0);
|
||||
|
||||
struct KMemoryInfo {
|
||||
uintptr_t address;
|
||||
size_t size;
|
||||
|
@ -189,7 +185,7 @@ namespace ams::kern {
|
|||
.addr = this->address,
|
||||
.size = this->size,
|
||||
.state = static_cast<ams::svc::MemoryState>(this->state & KMemoryState_Mask),
|
||||
.attr = static_cast<ams::svc::MemoryAttribute>(this->attribute & KMemoryAttribute_Mask),
|
||||
.attr = static_cast<ams::svc::MemoryAttribute>(this->attribute & KMemoryAttribute_UserMask),
|
||||
.perm = static_cast<ams::svc::MemoryPermission>(this->perm & KMemoryPermission_UserMask),
|
||||
.ipc_refcount = this->ipc_lock_count,
|
||||
.device_refcount = this->device_use_count,
|
||||
|
@ -297,7 +293,7 @@ namespace ams::kern {
|
|||
|
||||
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
constexpr auto AttributeIgnoreMask = KMemoryAttribute_DontCareMask | KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
||||
constexpr auto AttributeIgnoreMask = KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
||||
return this->memory_state == s && this->perm == p && (this->attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||
}
|
||||
|
||||
|
|
|
@ -58,11 +58,11 @@ namespace ams::kern {
|
|||
Impl *next;
|
||||
Impl *prev;
|
||||
public:
|
||||
constexpr Impl() : heap(), page_reference_counts(), metadata_region(), pool(), next(), prev() { /* ... */ }
|
||||
Impl() : heap(), page_reference_counts(), metadata_region(), pool(), next(), prev() { /* ... */ }
|
||||
|
||||
size_t Initialize(const KMemoryRegion *region, Pool pool, KVirtualAddress metadata_region, KVirtualAddress metadata_region_end);
|
||||
|
||||
KVirtualAddress AllocateBlock(s32 index) { return this->heap.AllocateBlock(index); }
|
||||
KVirtualAddress AllocateBlock(s32 index, bool random) { return this->heap.AllocateBlock(index, random); }
|
||||
void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); }
|
||||
|
||||
void TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages);
|
||||
|
@ -149,8 +149,10 @@ namespace ams::kern {
|
|||
return cur->GetNext();
|
||||
}
|
||||
}
|
||||
|
||||
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool optimize, bool random);
|
||||
public:
|
||||
constexpr KMemoryManager()
|
||||
KMemoryManager()
|
||||
: pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process()
|
||||
{
|
||||
/* ... */
|
||||
|
|
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_system_control.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
class KPageBitmap {
|
||||
private:
|
||||
class RandomBitGenerator {
|
||||
private:
|
||||
util::TinyMT rng;
|
||||
u32 entropy;
|
||||
u32 bits_available;
|
||||
private:
|
||||
void RefreshEntropy() {
|
||||
this->entropy = rng.GenerateRandomU32();
|
||||
this->bits_available = BITSIZEOF(this->entropy);
|
||||
}
|
||||
|
||||
bool GenerateRandomBit() {
|
||||
if (this->bits_available == 0) {
|
||||
this->RefreshEntropy();
|
||||
}
|
||||
|
||||
const bool rnd_bit = (this->entropy & 1) != 0;
|
||||
this->entropy >>= 1;
|
||||
--this->bits_available;
|
||||
return rnd_bit;
|
||||
}
|
||||
public:
|
||||
RandomBitGenerator() : rng(), entropy(), bits_available() {
|
||||
this->rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
||||
}
|
||||
|
||||
size_t SelectRandomBit(u64 bitmap) {
|
||||
u64 selected = 0;
|
||||
|
||||
u64 cur_num_bits = BITSIZEOF(bitmap) / 2;
|
||||
u64 cur_mask = (1ull << cur_num_bits) / 2;
|
||||
|
||||
while (cur_num_bits) {
|
||||
const u64 high = (bitmap >> 0) & cur_mask;
|
||||
const u64 low = (bitmap >> cur_num_bits) & cur_mask;
|
||||
|
||||
bool choose_low;
|
||||
if (high == 0) {
|
||||
/* If only low val is set, choose low. */
|
||||
choose_low = true;
|
||||
} else if (low == 0) {
|
||||
/* If only high val is set, choose high. */
|
||||
choose_low = false;
|
||||
} else {
|
||||
/* If both are set, choose random. */
|
||||
choose_low = this->GenerateRandomBit();
|
||||
}
|
||||
|
||||
/* If we chose low, proceed with low. */
|
||||
if (choose_low) {
|
||||
bitmap = low;
|
||||
selected += 0;
|
||||
} else {
|
||||
bitmap = high;
|
||||
selected += cur_num_bits;
|
||||
}
|
||||
|
||||
/* Proceed. */
|
||||
cur_num_bits /= 2;
|
||||
cur_mask >>= cur_num_bits;
|
||||
}
|
||||
|
||||
return selected;
|
||||
}
|
||||
};
|
||||
public:
|
||||
static constexpr size_t MaxDepth = 4;
|
||||
private:
|
||||
u64 *bit_storages[MaxDepth];
|
||||
RandomBitGenerator rng;
|
||||
size_t num_bits;
|
||||
size_t used_depths;
|
||||
public:
|
||||
KPageBitmap() : bit_storages(), rng(), num_bits(), used_depths() { /* ... */ }
|
||||
|
||||
constexpr size_t GetNumBits() const { return this->num_bits; }
|
||||
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(this->used_depths) - 1; }
|
||||
|
||||
u64 *Initialize(u64 *storage, size_t size) {
|
||||
/* Initially, everything is un-set. */
|
||||
this->num_bits = 0;
|
||||
|
||||
/* Calculate the needed bitmap depth. */
|
||||
this->used_depths = static_cast<size_t>(GetRequiredDepth(size));
|
||||
MESOSPHERE_ASSERT(this->used_depths <= MaxDepth);
|
||||
|
||||
/* Set the bitmap pointers. */
|
||||
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
||||
this->bit_storages[depth] = storage;
|
||||
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
ssize_t FindFreeBlock(bool random) {
|
||||
uintptr_t offset = 0;
|
||||
s32 depth = 0;
|
||||
|
||||
if (random) {
|
||||
do {
|
||||
const u64 v = this->bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
||||
MESOSPHERE_ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * BITSIZEOF(u64) + this->rng.SelectRandomBit(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(this->used_depths));
|
||||
} else {
|
||||
do {
|
||||
const u64 v = this->bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
||||
MESOSPHERE_ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * BITSIZEOF(u64) + __builtin_ctzll(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(this->used_depths));
|
||||
}
|
||||
|
||||
return static_cast<ssize_t>(offset);
|
||||
}
|
||||
|
||||
void SetBit(size_t offset) {
|
||||
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||
this->num_bits++;
|
||||
}
|
||||
|
||||
void ClearBit(size_t offset) {
|
||||
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
||||
this->num_bits--;
|
||||
}
|
||||
|
||||
bool ClearRange(size_t offset, size_t count) {
|
||||
s32 depth = this->GetHighestDepthIndex();
|
||||
u64 *bits = this->bit_storages[depth];
|
||||
size_t bit_ind = offset / BITSIZEOF(u64);
|
||||
if (AMS_LIKELY(count < BITSIZEOF(u64))) {
|
||||
const size_t shift = offset % BITSIZEOF(u64);
|
||||
MESOSPHERE_ASSERT(shift + count <= BITSIZEOF(u64));
|
||||
/* Check that all the bits are set. */
|
||||
const u64 mask = ((u64(1) << count) - 1) << shift;
|
||||
u64 v = bits[bit_ind];
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Clear the bits. */
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
this->ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(offset % BITSIZEOF(u64) == 0);
|
||||
MESOSPHERE_ASSERT(count % BITSIZEOF(u64) == 0);
|
||||
/* Check that all the bits are set. */
|
||||
size_t remaining = count;
|
||||
size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= BITSIZEOF(u64);
|
||||
} while (remaining > 0);
|
||||
|
||||
/* Clear the bits. */
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
this->ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= BITSIZEOF(u64);
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
this->num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
private:
|
||||
void SetBit(s32 depth, size_t offset) {
|
||||
while (depth >= 0) {
|
||||
size_t ind = offset / BITSIZEOF(u64);
|
||||
size_t which = offset % BITSIZEOF(u64);
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
MESOSPHERE_ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
void ClearBit(s32 depth, size_t offset) {
|
||||
while (depth >= 0) {
|
||||
size_t ind = offset / BITSIZEOF(u64);
|
||||
size_t which = offset % BITSIZEOF(u64);
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
MESOSPHERE_ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= BITSIZEOF(u64);
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
public:
|
||||
static constexpr size_t CalculateMetadataOverheadSize(size_t region_size) {
|
||||
size_t overhead_bits = 0;
|
||||
for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {
|
||||
region_size = util::AlignUp(region_size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_k_page_bitmap.hpp>
|
||||
|
||||
namespace ams::kern {
|
||||
|
||||
|
@ -52,178 +53,13 @@ namespace ams::kern {
|
|||
private:
|
||||
class Block {
|
||||
private:
|
||||
class Bitmap {
|
||||
public:
|
||||
static constexpr size_t MaxDepth = 4;
|
||||
private:
|
||||
u64 *bit_storages[MaxDepth];
|
||||
size_t num_bits;
|
||||
size_t used_depths;
|
||||
public:
|
||||
constexpr Bitmap() : bit_storages(), num_bits(), used_depths() { /* ... */ }
|
||||
|
||||
constexpr size_t GetNumBits() const { return this->num_bits; }
|
||||
constexpr s32 GetHighestDepthIndex() const { return static_cast<s32>(this->used_depths) - 1; }
|
||||
|
||||
u64 *Initialize(u64 *storage, size_t size) {
|
||||
/* Initially, everything is un-set. */
|
||||
this->num_bits = 0;
|
||||
|
||||
/* Calculate the needed bitmap depth. */
|
||||
this->used_depths = static_cast<size_t>(GetRequiredDepth(size));
|
||||
MESOSPHERE_ASSERT(this->used_depths <= MaxDepth);
|
||||
|
||||
/* Set the bitmap pointers. */
|
||||
for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) {
|
||||
this->bit_storages[depth] = storage;
|
||||
size = util::AlignUp(size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||
storage += size;
|
||||
}
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
ssize_t FindFreeBlock() const {
|
||||
uintptr_t offset = 0;
|
||||
s32 depth = 0;
|
||||
|
||||
do {
|
||||
const u64 v = this->bit_storages[depth][offset];
|
||||
if (v == 0) {
|
||||
/* If depth is bigger than zero, then a previous level indicated a block was free. */
|
||||
MESOSPHERE_ASSERT(depth == 0);
|
||||
return -1;
|
||||
}
|
||||
offset = offset * BITSIZEOF(u64) + __builtin_ctzll(v);
|
||||
++depth;
|
||||
} while (depth < static_cast<s32>(this->used_depths));
|
||||
|
||||
return static_cast<ssize_t>(offset);
|
||||
}
|
||||
|
||||
void SetBit(size_t offset) {
|
||||
this->SetBit(this->GetHighestDepthIndex(), offset);
|
||||
this->num_bits++;
|
||||
}
|
||||
|
||||
void ClearBit(size_t offset) {
|
||||
this->ClearBit(this->GetHighestDepthIndex(), offset);
|
||||
this->num_bits--;
|
||||
}
|
||||
|
||||
bool ClearRange(size_t offset, size_t count) {
|
||||
s32 depth = this->GetHighestDepthIndex();
|
||||
u64 *bits = this->bit_storages[depth];
|
||||
size_t bit_ind = offset / BITSIZEOF(u64);
|
||||
if (AMS_LIKELY(count < BITSIZEOF(u64))) {
|
||||
const size_t shift = offset % BITSIZEOF(u64);
|
||||
MESOSPHERE_ASSERT(shift + count <= BITSIZEOF(u64));
|
||||
/* Check that all the bits are set. */
|
||||
const u64 mask = ((u64(1) << count) - 1) << shift;
|
||||
u64 v = bits[bit_ind];
|
||||
if ((v & mask) != mask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Clear the bits. */
|
||||
v &= ~mask;
|
||||
bits[bit_ind] = v;
|
||||
if (v == 0) {
|
||||
this->ClearBit(depth - 1, bit_ind);
|
||||
}
|
||||
} else {
|
||||
MESOSPHERE_ASSERT(offset % BITSIZEOF(u64) == 0);
|
||||
MESOSPHERE_ASSERT(count % BITSIZEOF(u64) == 0);
|
||||
/* Check that all the bits are set. */
|
||||
size_t remaining = count;
|
||||
size_t i = 0;
|
||||
do {
|
||||
if (bits[bit_ind + i++] != ~u64(0)) {
|
||||
return false;
|
||||
}
|
||||
remaining -= BITSIZEOF(u64);
|
||||
} while (remaining > 0);
|
||||
|
||||
/* Clear the bits. */
|
||||
remaining = count;
|
||||
i = 0;
|
||||
do {
|
||||
bits[bit_ind + i] = 0;
|
||||
this->ClearBit(depth - 1, bit_ind + i);
|
||||
i++;
|
||||
remaining -= BITSIZEOF(u64);
|
||||
} while (remaining > 0);
|
||||
}
|
||||
|
||||
this->num_bits -= count;
|
||||
return true;
|
||||
}
|
||||
private:
|
||||
void SetBit(s32 depth, size_t offset) {
|
||||
while (depth >= 0) {
|
||||
size_t ind = offset / BITSIZEOF(u64);
|
||||
size_t which = offset % BITSIZEOF(u64);
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
MESOSPHERE_ASSERT((v & mask) == 0);
|
||||
*bit = v | mask;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
|
||||
void ClearBit(s32 depth, size_t offset) {
|
||||
while (depth >= 0) {
|
||||
size_t ind = offset / BITSIZEOF(u64);
|
||||
size_t which = offset % BITSIZEOF(u64);
|
||||
const u64 mask = u64(1) << which;
|
||||
|
||||
u64 *bit = std::addressof(this->bit_storages[depth][ind]);
|
||||
u64 v = *bit;
|
||||
MESOSPHERE_ASSERT((v & mask) != 0);
|
||||
v &= ~mask;
|
||||
*bit = v;
|
||||
if (v) {
|
||||
break;
|
||||
}
|
||||
offset = ind;
|
||||
depth--;
|
||||
}
|
||||
}
|
||||
private:
|
||||
static constexpr s32 GetRequiredDepth(size_t region_size) {
|
||||
s32 depth = 0;
|
||||
while (true) {
|
||||
region_size /= BITSIZEOF(u64);
|
||||
depth++;
|
||||
if (region_size == 0) {
|
||||
return depth;
|
||||
}
|
||||
}
|
||||
}
|
||||
public:
|
||||
static constexpr size_t CalculateMetadataOverheadSize(size_t region_size) {
|
||||
size_t overhead_bits = 0;
|
||||
for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) {
|
||||
region_size = util::AlignUp(region_size, BITSIZEOF(u64)) / BITSIZEOF(u64);
|
||||
overhead_bits += region_size;
|
||||
}
|
||||
return overhead_bits * sizeof(u64);
|
||||
}
|
||||
};
|
||||
private:
|
||||
Bitmap bitmap;
|
||||
KPageBitmap bitmap;
|
||||
KVirtualAddress heap_address;
|
||||
uintptr_t end_offset;
|
||||
size_t block_shift;
|
||||
size_t next_block_shift;
|
||||
public:
|
||||
constexpr Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ }
|
||||
Block() : bitmap(), heap_address(), end_offset(), block_shift(), next_block_shift() { /* ... */ }
|
||||
|
||||
constexpr size_t GetShift() const { return this->block_shift; }
|
||||
constexpr size_t GetNextShift() const { return this->next_block_shift; }
|
||||
|
@ -266,9 +102,9 @@ namespace ams::kern {
|
|||
return Null<KVirtualAddress>;
|
||||
}
|
||||
|
||||
KVirtualAddress PopBlock() {
|
||||
KVirtualAddress PopBlock(bool random) {
|
||||
/* Find a free block. */
|
||||
ssize_t soffset = this->bitmap.FindFreeBlock();
|
||||
ssize_t soffset = this->bitmap.FindFreeBlock(random);
|
||||
if (soffset < 0) {
|
||||
return Null<KVirtualAddress>;
|
||||
}
|
||||
|
@ -283,7 +119,7 @@ namespace ams::kern {
|
|||
const size_t cur_block_size = (u64(1) << cur_block_shift);
|
||||
const size_t next_block_size = (u64(1) << next_block_shift);
|
||||
const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
|
||||
return Bitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size);
|
||||
return KPageBitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size);
|
||||
}
|
||||
};
|
||||
private:
|
||||
|
@ -298,7 +134,7 @@ namespace ams::kern {
|
|||
|
||||
void FreeBlock(KVirtualAddress block, s32 index);
|
||||
public:
|
||||
constexpr KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ }
|
||||
KPageHeap() : heap_address(), heap_size(), used_size(), num_blocks(), blocks() { /* ... */ }
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->heap_address; }
|
||||
constexpr size_t GetSize() const { return this->heap_size; }
|
||||
|
@ -313,7 +149,7 @@ namespace ams::kern {
|
|||
this->used_size = this->heap_size - (this->GetNumFreePages() * PageSize);
|
||||
}
|
||||
|
||||
KVirtualAddress AllocateBlock(s32 index);
|
||||
KVirtualAddress AllocateBlock(s32 index, bool random);
|
||||
void Free(KVirtualAddress addr, size_t num_pages);
|
||||
private:
|
||||
static size_t CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||
|
|
|
@ -91,7 +91,7 @@ namespace ams::kern {
|
|||
};
|
||||
static_assert(std::is_trivially_destructible<PageLinkedList>::value);
|
||||
|
||||
static constexpr u32 DefaultMemoryIgnoreAttr = KMemoryAttribute_DontCareMask | KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
||||
static constexpr u32 DefaultMemoryIgnoreAttr = KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
|
||||
|
||||
static constexpr size_t GetAddressSpaceWidth(ams::svc::CreateProcessFlag as_type) {
|
||||
switch (static_cast<ams::svc::CreateProcessFlag>(as_type & ams::svc::CreateProcessFlag_AddressSpaceMask)) {
|
||||
|
@ -135,6 +135,7 @@ namespace ams::kern {
|
|||
KProcessAddress code_region_end;
|
||||
size_t max_heap_size;
|
||||
size_t max_physical_memory_size;
|
||||
size_t mapped_unsafe_physical_memory;
|
||||
mutable KLightLock general_lock;
|
||||
mutable KLightLock map_physical_memory_lock;
|
||||
KPageTableImpl impl;
|
||||
|
@ -156,9 +157,9 @@ namespace ams::kern {
|
|||
address_space_start(), address_space_end(), heap_region_start(), heap_region_end(), current_heap_end(),
|
||||
alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(),
|
||||
kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(),
|
||||
max_heap_size(), max_physical_memory_size(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(),
|
||||
allocate_option(), address_space_width(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(),
|
||||
cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(),
|
||||
max_heap_size(), max_physical_memory_size(),mapped_unsafe_physical_memory(), general_lock(), map_physical_memory_lock(),
|
||||
impl(), memory_block_manager(), allocate_option(), address_space_width(), is_kernel(), enable_aslr(), memory_block_slab_manager(),
|
||||
block_info_manager(), cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(),
|
||||
heap_fill_value(), ipc_fill_value(), stack_fill_value()
|
||||
{
|
||||
/* ... */
|
||||
|
|
|
@ -72,6 +72,10 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
void Free(KVirtualAddress addr) {
|
||||
/* Ensure all pages in the heap are zero. */
|
||||
cpu::ClearPageToZero(GetVoidPointer(addr));
|
||||
|
||||
/* Free the page. */
|
||||
BaseHeap::Free(GetPointer<impl::PageTablePage>(addr));
|
||||
}
|
||||
|
||||
|
|
|
@ -89,6 +89,7 @@ namespace ams::kern {
|
|||
u32 version{};
|
||||
KHandleTable handle_table{};
|
||||
KProcessAddress plr_address{};
|
||||
void *plr_heap_address{};
|
||||
KThread *exception_thread{};
|
||||
ThreadList thread_list{};
|
||||
SharedMemoryInfoList shared_memory_list{};
|
||||
|
@ -118,7 +119,7 @@ namespace ams::kern {
|
|||
private:
|
||||
Result Initialize(const ams::svc::CreateProcessParameter ¶ms);
|
||||
public:
|
||||
constexpr KProcess() { /* ... */ }
|
||||
KProcess() { /* ... */ }
|
||||
virtual ~KProcess() { /* ... */ }
|
||||
|
||||
Result Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool);
|
||||
|
|
|
@ -765,6 +765,7 @@ namespace ams::kern::arch::arm64 {
|
|||
KVirtualAddress l3_table = util::AlignDown(reinterpret_cast<uintptr_t>(l3_entry), PageSize);
|
||||
if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) {
|
||||
this->GetPageTableManager().Close(l3_table, L2BlockSize / L3BlockSize);
|
||||
ClearPageTable(l3_table);
|
||||
this->FreePageTable(page_list, l3_table);
|
||||
}
|
||||
}
|
||||
|
@ -816,6 +817,7 @@ namespace ams::kern::arch::arm64 {
|
|||
KVirtualAddress l2_table = util::AlignDown(reinterpret_cast<uintptr_t>(l2_entry), PageSize);
|
||||
if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) {
|
||||
this->GetPageTableManager().Close(l2_table, L1BlockSize / L2BlockSize);
|
||||
ClearPageTable(l2_table);
|
||||
this->FreePageTable(page_list, l2_table);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,6 +81,15 @@ namespace ams::kern::board::nintendo::nx {
|
|||
return value;
|
||||
}
|
||||
|
||||
void EnsureRandomGeneratorInitialized() {
|
||||
if (AMS_UNLIKELY(!g_initialized_random_generator)) {
|
||||
u64 seed;
|
||||
smc::GenerateRandomBytes(&seed, sizeof(seed));
|
||||
g_random_generator.Initialize(reinterpret_cast<u32*>(&seed), sizeof(seed) / sizeof(u32));
|
||||
g_initialized_random_generator = true;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u64 GenerateRandomU64FromGenerator() {
|
||||
return g_random_generator.GenerateRandomU64();
|
||||
}
|
||||
|
@ -304,16 +313,20 @@ namespace ams::kern::board::nintendo::nx {
|
|||
KScopedInterruptDisable intr_disable;
|
||||
KScopedSpinLock lk(g_random_lock);
|
||||
|
||||
if (AMS_UNLIKELY(!g_initialized_random_generator)) {
|
||||
u64 seed;
|
||||
GenerateRandomBytes(&seed, sizeof(seed));
|
||||
g_random_generator.Initialize(reinterpret_cast<u32*>(&seed), sizeof(seed) / sizeof(u32));
|
||||
g_initialized_random_generator = true;
|
||||
}
|
||||
EnsureRandomGeneratorInitialized();
|
||||
|
||||
return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator);
|
||||
}
|
||||
|
||||
u64 KSystemControl::GenerateRandomU64() {
|
||||
KScopedInterruptDisable intr_disable;
|
||||
KScopedSpinLock lk(g_random_lock);
|
||||
|
||||
EnsureRandomGeneratorInitialized();
|
||||
|
||||
return GenerateRandomU64();
|
||||
}
|
||||
|
||||
void KSystemControl::SleepSystem() {
|
||||
MESOSPHERE_LOG("SleepSystem() was called\n");
|
||||
KSleepManager::SleepSystem();
|
||||
|
|
|
@ -104,7 +104,7 @@ namespace ams::kern {
|
|||
Impl *chosen_manager = nullptr;
|
||||
KVirtualAddress allocated_block = Null<KVirtualAddress>;
|
||||
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
||||
allocated_block = chosen_manager->AllocateBlock(heap_index);
|
||||
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
|
||||
if (allocated_block != Null<KVirtualAddress>) {
|
||||
break;
|
||||
}
|
||||
|
@ -129,19 +129,7 @@ namespace ams::kern {
|
|||
return allocated_block;
|
||||
}
|
||||
|
||||
Result KMemoryManager::Allocate(KPageGroup *out, size_t num_pages, u32 option) {
|
||||
MESOSPHERE_ASSERT(out != nullptr);
|
||||
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
/* Early return if we're allocating no pages. */
|
||||
if (num_pages == 0) {
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
/* Lock the pool that we're allocating from. */
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
KScopedLightLock lk(this->pool_locks[pool]);
|
||||
|
||||
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool optimize, bool random) {
|
||||
/* Choose a heap based on our page size request. */
|
||||
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
|
||||
|
@ -162,7 +150,7 @@ namespace ams::kern {
|
|||
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
|
||||
while (num_pages >= pages_per_alloc) {
|
||||
/* Allocate a block. */
|
||||
KVirtualAddress allocated_block = cur_manager->AllocateBlock(index);
|
||||
KVirtualAddress allocated_block = cur_manager->AllocateBlock(index, random);
|
||||
if (allocated_block == Null<KVirtualAddress>) {
|
||||
break;
|
||||
}
|
||||
|
@ -175,7 +163,7 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Maintain the optimized memory bitmap, if we should. */
|
||||
if (this->has_optimized_process[pool]) {
|
||||
if (optimize) {
|
||||
cur_manager->TrackAllocationForOptimizedProcess(allocated_block, pages_per_alloc);
|
||||
}
|
||||
|
||||
|
@ -193,6 +181,21 @@ namespace ams::kern {
|
|||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KMemoryManager::Allocate(KPageGroup *out, size_t num_pages, u32 option) {
|
||||
MESOSPHERE_ASSERT(out != nullptr);
|
||||
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
|
||||
|
||||
/* Early return if we're allocating no pages. */
|
||||
R_SUCCEED_IF(num_pages == 0);
|
||||
|
||||
/* Lock the pool that we're allocating from. */
|
||||
const auto [pool, dir] = DecodeOption(option);
|
||||
KScopedLightLock lk(this->pool_locks[pool]);
|
||||
|
||||
/* Allocate the page group. */
|
||||
return this->AllocatePageGroupImpl(out, num_pages, pool, dir, this->has_optimized_process[pool], true);
|
||||
}
|
||||
|
||||
size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) {
|
||||
/* Calculate metadata sizes. */
|
||||
const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16);
|
||||
|
|
|
@ -51,11 +51,11 @@ namespace ams::kern {
|
|||
return num_free;
|
||||
}
|
||||
|
||||
KVirtualAddress KPageHeap::AllocateBlock(s32 index) {
|
||||
KVirtualAddress KPageHeap::AllocateBlock(s32 index, bool random) {
|
||||
const size_t needed_size = this->blocks[index].GetSize();
|
||||
|
||||
for (s32 i = index; i < static_cast<s32>(this->num_blocks); i++) {
|
||||
if (const KVirtualAddress addr = this->blocks[i].PopBlock(); addr != Null<KVirtualAddress>) {
|
||||
if (const KVirtualAddress addr = this->blocks[i].PopBlock(random); addr != Null<KVirtualAddress>) {
|
||||
if (const size_t allocated_size = this->blocks[i].GetSize(); allocated_size > needed_size) {
|
||||
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||
}
|
||||
|
|
|
@ -20,35 +20,36 @@ namespace ams::kern {
|
|||
|
||||
Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) {
|
||||
/* Initialize our members. */
|
||||
this->address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32);
|
||||
this->address_space_start = KProcessAddress(GetInteger(start));
|
||||
this->address_space_end = KProcessAddress(GetInteger(end));
|
||||
this->is_kernel = true;
|
||||
this->enable_aslr = true;
|
||||
this->address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32);
|
||||
this->address_space_start = KProcessAddress(GetInteger(start));
|
||||
this->address_space_end = KProcessAddress(GetInteger(end));
|
||||
this->is_kernel = true;
|
||||
this->enable_aslr = true;
|
||||
|
||||
this->heap_region_start = 0;
|
||||
this->heap_region_end = 0;
|
||||
this->current_heap_end = 0;
|
||||
this->alias_region_start = 0;
|
||||
this->alias_region_end = 0;
|
||||
this->stack_region_start = 0;
|
||||
this->stack_region_end = 0;
|
||||
this->kernel_map_region_start = 0;
|
||||
this->kernel_map_region_end = 0;
|
||||
this->alias_code_region_start = 0;
|
||||
this->alias_code_region_end = 0;
|
||||
this->code_region_start = 0;
|
||||
this->code_region_end = 0;
|
||||
this->max_heap_size = 0;
|
||||
this->max_physical_memory_size = 0;
|
||||
this->heap_region_start = 0;
|
||||
this->heap_region_end = 0;
|
||||
this->current_heap_end = 0;
|
||||
this->alias_region_start = 0;
|
||||
this->alias_region_end = 0;
|
||||
this->stack_region_start = 0;
|
||||
this->stack_region_end = 0;
|
||||
this->kernel_map_region_start = 0;
|
||||
this->kernel_map_region_end = 0;
|
||||
this->alias_code_region_start = 0;
|
||||
this->alias_code_region_end = 0;
|
||||
this->code_region_start = 0;
|
||||
this->code_region_end = 0;
|
||||
this->max_heap_size = 0;
|
||||
this->max_physical_memory_size = 0;
|
||||
this->mapped_unsafe_physical_memory = 0;
|
||||
|
||||
this->memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager());
|
||||
this->block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
||||
this->memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager());
|
||||
this->block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
||||
|
||||
this->allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
||||
this->heap_fill_value = MemoryFillValue_Zero;
|
||||
this->ipc_fill_value = MemoryFillValue_Zero;
|
||||
this->stack_fill_value = MemoryFillValue_Zero;
|
||||
this->allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
|
||||
this->heap_fill_value = MemoryFillValue_Zero;
|
||||
this->ipc_fill_value = MemoryFillValue_Zero;
|
||||
this->stack_fill_value = MemoryFillValue_Zero;
|
||||
|
||||
this->cached_physical_linear_region = nullptr;
|
||||
this->cached_physical_heap_region = nullptr;
|
||||
|
@ -222,9 +223,10 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* Set heap and fill members. */
|
||||
this->current_heap_end = this->heap_region_start;
|
||||
this->max_heap_size = 0;
|
||||
this->max_physical_memory_size = 0;
|
||||
this->current_heap_end = this->heap_region_start;
|
||||
this->max_heap_size = 0;
|
||||
this->max_physical_memory_size = 0;
|
||||
this->mapped_unsafe_physical_memory = 0;
|
||||
|
||||
const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
|
||||
this->heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero;
|
||||
|
|
|
@ -35,7 +35,8 @@ namespace ams::kern {
|
|||
|
||||
/* Create and clear the process local region. */
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(this->plr_address)));
|
||||
std::memset(this->GetThreadLocalRegionPointer(this->plr_address), 0, ams::svc::ThreadLocalRegionSize);
|
||||
this->plr_heap_address = this->GetThreadLocalRegionPointer(this->plr_address);
|
||||
std::memset(this->plr_heap_address, 0, ams::svc::ThreadLocalRegionSize);
|
||||
|
||||
/* Copy in the name from parameters. */
|
||||
static_assert(sizeof(params.name) < sizeof(this->name));
|
||||
|
|
Loading…
Reference in a new issue