mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2024-11-26 13:52:21 +00:00
kern: implement additional randomness in KPageHeap allocations
This commit is contained in:
parent
24739f245e
commit
401047f603
6 changed files with 150 additions and 18 deletions
|
@ -73,6 +73,7 @@ namespace ams::kern {
|
||||||
size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
|
size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
|
||||||
|
|
||||||
KPhysicalAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
KPhysicalAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
|
||||||
|
KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { return m_heap.AllocateAligned(index, num_pages, align_pages); }
|
||||||
void Free(KPhysicalAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
void Free(KPhysicalAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
|
||||||
|
|
||||||
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }
|
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
namespace ams::kern {
|
namespace ams::kern {
|
||||||
|
|
||||||
class KPageBitmap {
|
class KPageBitmap {
|
||||||
private:
|
public:
|
||||||
class RandomBitGenerator {
|
class RandomBitGenerator {
|
||||||
private:
|
private:
|
||||||
util::TinyMT m_rng;
|
util::TinyMT m_rng;
|
||||||
|
@ -42,12 +42,43 @@ namespace ams::kern {
|
||||||
--m_bits_available;
|
--m_bits_available;
|
||||||
return rnd_bit;
|
return rnd_bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 GenerateRandomBits(u32 num_bits) {
|
||||||
|
u64 result = 0;
|
||||||
|
|
||||||
|
/* Iteratively add random bits to our result. */
|
||||||
|
while (num_bits > 0) {
|
||||||
|
/* Ensure we have random bits to take from. */
|
||||||
|
if (m_bits_available == 0) {
|
||||||
|
this->RefreshEntropy();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Determine how many bits to take this round. */
|
||||||
|
const auto cur_bits = std::min(num_bits, m_bits_available);
|
||||||
|
|
||||||
|
/* Generate mask for our current bits. */
|
||||||
|
const u64 mask = (static_cast<u64>(1) << cur_bits) - 1;
|
||||||
|
|
||||||
|
/* Add bits to output from our entropy. */
|
||||||
|
result <<= cur_bits;
|
||||||
|
result |= (m_entropy & mask);
|
||||||
|
|
||||||
|
/* Remove bits from our entropy. */
|
||||||
|
m_entropy >>= cur_bits;
|
||||||
|
m_bits_available -= cur_bits;
|
||||||
|
|
||||||
|
/* Advance. */
|
||||||
|
num_bits -= cur_bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
public:
|
public:
|
||||||
RandomBitGenerator() : m_entropy(), m_bits_available() {
|
RandomBitGenerator() : m_entropy(), m_bits_available() {
|
||||||
m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64()));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t SelectRandomBit(u64 bitmap) {
|
u64 SelectRandomBit(u64 bitmap) {
|
||||||
u64 selected = 0;
|
u64 selected = 0;
|
||||||
|
|
||||||
for (size_t cur_num_bits = BITSIZEOF(bitmap) / 2; cur_num_bits != 0; cur_num_bits /= 2) {
|
for (size_t cur_num_bits = BITSIZEOF(bitmap) / 2; cur_num_bits != 0; cur_num_bits /= 2) {
|
||||||
|
@ -66,6 +97,17 @@ namespace ams::kern {
|
||||||
|
|
||||||
return selected;
|
return selected;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 GenerateRandom(u64 max) {
|
||||||
|
/* Determine the number of bits we need. */
|
||||||
|
const u64 bits_needed = 1 + (BITSIZEOF(max) - util::CountLeadingZeros(max));
|
||||||
|
|
||||||
|
/* Generate a random value of the desired bitwidth. */
|
||||||
|
const u64 rnd = this->GenerateRandomBits(bits_needed);
|
||||||
|
|
||||||
|
/* Adjust the value to be in range. */
|
||||||
|
return rnd - ((rnd / max) * max);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
public:
|
public:
|
||||||
static constexpr size_t MaxDepth = 4;
|
static constexpr size_t MaxDepth = 4;
|
||||||
|
|
|
@ -27,7 +27,7 @@ namespace ams::kern {
|
||||||
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
|
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
|
||||||
const size_t target_pages = std::max(num_pages, align_pages);
|
const size_t target_pages = std::max(num_pages, align_pages);
|
||||||
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
|
||||||
if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||||
return static_cast<s32>(i);
|
return static_cast<s32>(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ namespace ams::kern {
|
||||||
|
|
||||||
static constexpr s32 GetBlockIndex(size_t num_pages) {
|
static constexpr s32 GetBlockIndex(size_t num_pages) {
|
||||||
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
|
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
|
||||||
if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ namespace ams::kern {
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr size_t GetBlockSize(size_t index) {
|
static constexpr size_t GetBlockSize(size_t index) {
|
||||||
return size_t(1) << MemoryBlockPageShifts[index];
|
return static_cast<size_t>(1) << MemoryBlockPageShifts[index];
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr size_t GetBlockNumPages(size_t index) {
|
static constexpr size_t GetBlockNumPages(size_t index) {
|
||||||
|
@ -128,13 +128,14 @@ namespace ams::kern {
|
||||||
size_t m_initial_used_size;
|
size_t m_initial_used_size;
|
||||||
size_t m_num_blocks;
|
size_t m_num_blocks;
|
||||||
Block m_blocks[NumMemoryBlockPageShifts];
|
Block m_blocks[NumMemoryBlockPageShifts];
|
||||||
|
KPageBitmap::RandomBitGenerator m_rng;
|
||||||
private:
|
private:
|
||||||
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
|
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||||
size_t GetNumFreePages() const;
|
size_t GetNumFreePages() const;
|
||||||
|
|
||||||
void FreeBlock(KPhysicalAddress block, s32 index);
|
void FreeBlock(KPhysicalAddress block, s32 index);
|
||||||
public:
|
public:
|
||||||
KPageHeap() : m_heap_address(Null<KPhysicalAddress>), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks() { /* ... */ }
|
KPageHeap() : m_heap_address(Null<KPhysicalAddress>), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks(), m_rng() { /* ... */ }
|
||||||
|
|
||||||
constexpr KPhysicalAddress GetAddress() const { return m_heap_address; }
|
constexpr KPhysicalAddress GetAddress() const { return m_heap_address; }
|
||||||
constexpr size_t GetSize() const { return m_heap_size; }
|
constexpr size_t GetSize() const { return m_heap_size; }
|
||||||
|
@ -158,9 +159,25 @@ namespace ams::kern {
|
||||||
m_initial_used_size = m_heap_size - free_size - reserved_size;
|
m_initial_used_size = m_heap_size - free_size - reserved_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
KPhysicalAddress AllocateBlock(s32 index, bool random);
|
KPhysicalAddress AllocateBlock(s32 index, bool random) {
|
||||||
|
if (random) {
|
||||||
|
const size_t block_pages = m_blocks[index].GetNumPages();
|
||||||
|
return this->AllocateByRandom(index, block_pages, block_pages);
|
||||||
|
} else {
|
||||||
|
return this->AllocateByLinearSearch(index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
|
||||||
|
/* TODO: linear search support? */
|
||||||
|
return this->AllocateByRandom(index, num_pages, align_pages);
|
||||||
|
}
|
||||||
|
|
||||||
void Free(KPhysicalAddress addr, size_t num_pages);
|
void Free(KPhysicalAddress addr, size_t num_pages);
|
||||||
private:
|
private:
|
||||||
|
KPhysicalAddress AllocateByLinearSearch(s32 index);
|
||||||
|
KPhysicalAddress AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
|
||||||
|
|
||||||
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
|
||||||
public:
|
public:
|
||||||
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
||||||
|
|
|
@ -202,7 +202,7 @@ namespace ams::kern {
|
||||||
Impl *chosen_manager = nullptr;
|
Impl *chosen_manager = nullptr;
|
||||||
KPhysicalAddress allocated_block = Null<KPhysicalAddress>;
|
KPhysicalAddress allocated_block = Null<KPhysicalAddress>;
|
||||||
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
||||||
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
|
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
|
||||||
if (allocated_block != Null<KPhysicalAddress>) {
|
if (allocated_block != Null<KPhysicalAddress>) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -213,12 +213,6 @@ namespace ams::kern {
|
||||||
return Null<KPhysicalAddress>;
|
return Null<KPhysicalAddress>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If we allocated more than we need, free some. */
|
|
||||||
const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
|
|
||||||
if (allocated_pages > num_pages) {
|
|
||||||
chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Maintain the optimized memory bitmap, if we should. */
|
/* Maintain the optimized memory bitmap, if we should. */
|
||||||
if (m_has_optimized_process[pool]) {
|
if (m_has_optimized_process[pool]) {
|
||||||
chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages);
|
chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages);
|
||||||
|
|
|
@ -51,11 +51,11 @@ namespace ams::kern {
|
||||||
return num_free;
|
return num_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
KPhysicalAddress KPageHeap::AllocateBlock(s32 index, bool random) {
|
KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) {
|
||||||
const size_t needed_size = m_blocks[index].GetSize();
|
const size_t needed_size = m_blocks[index].GetSize();
|
||||||
|
|
||||||
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
|
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
|
||||||
if (const KPhysicalAddress addr = m_blocks[i].PopBlock(random); addr != Null<KPhysicalAddress>) {
|
if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != Null<KPhysicalAddress>) {
|
||||||
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
|
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
|
||||||
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
|
||||||
}
|
}
|
||||||
|
@ -66,6 +66,84 @@ namespace ams::kern {
|
||||||
return Null<KPhysicalAddress>;
|
return Null<KPhysicalAddress>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
|
||||||
|
/* Get the size and required alignment. */
|
||||||
|
const size_t needed_size = num_pages * PageSize;
|
||||||
|
const size_t align_size = align_pages * PageSize;
|
||||||
|
|
||||||
|
/* Determine meta-alignment of our desired alignment size. */
|
||||||
|
const size_t align_shift = util::CountTrailingZeros(align_size);
|
||||||
|
|
||||||
|
/* Decide on a block to allocate from. */
|
||||||
|
constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4;
|
||||||
|
{
|
||||||
|
/* By default, we'll want to look at all blocks larger than our current one. */
|
||||||
|
s32 max_blocks = static_cast<s32>(m_num_blocks);
|
||||||
|
|
||||||
|
/* Determine the maximum block we should try to allocate from. */
|
||||||
|
size_t possible_alignments = 0;
|
||||||
|
for (s32 i = index; i < max_blocks; ++i) {
|
||||||
|
/* Add the possible alignments from blocks at the current size. */
|
||||||
|
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * m_blocks[i].GetNumFreeBlocks();
|
||||||
|
|
||||||
|
/* If there are enough possible alignments, we don't need to look at larger blocks. */
|
||||||
|
if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) {
|
||||||
|
max_blocks = i + 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we have any possible alignments which require a larger block, we need to pick one. */
|
||||||
|
if (possible_alignments > 0 && index + 1 < max_blocks) {
|
||||||
|
/* Select a random alignment from the possibilities. */
|
||||||
|
const size_t rnd = m_rng.GenerateRandom(possible_alignments);
|
||||||
|
|
||||||
|
/* Determine which block corresponds to the random alignment we chose. */
|
||||||
|
possible_alignments = 0;
|
||||||
|
for (s32 i = index; i < max_blocks; ++i) {
|
||||||
|
/* Add the possible alignments from blocks at the current size. */
|
||||||
|
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * m_blocks[i].GetNumFreeBlocks();
|
||||||
|
|
||||||
|
/* If the current block gets us to our random choice, use the current block. */
|
||||||
|
if (rnd < possible_alignments) {
|
||||||
|
index = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Pop a block from the index we selected. */
|
||||||
|
if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != Null<KPhysicalAddress>) {
|
||||||
|
/* Determine how much size we have left over. */
|
||||||
|
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; leftover_size > 0) {
|
||||||
|
/* Determine how many valid alignments we can have. */
|
||||||
|
const size_t possible_alignments = 1 + (leftover_size >> align_shift);
|
||||||
|
|
||||||
|
/* Select a random valid alignment. */
|
||||||
|
const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift;
|
||||||
|
|
||||||
|
/* Free memory before the random offset. */
|
||||||
|
if (random_offset != 0) {
|
||||||
|
this->Free(addr, random_offset / PageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Advance our block by the random offset. */
|
||||||
|
addr += random_offset;
|
||||||
|
|
||||||
|
/* Free memory after our allocated block. */
|
||||||
|
if (random_offset != leftover_size) {
|
||||||
|
this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return the block we allocated. */
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return Null<KPhysicalAddress>;
|
||||||
|
}
|
||||||
|
|
||||||
void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
|
void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
|
||||||
do {
|
do {
|
||||||
block = m_blocks[index++].PushBlock(block);
|
block = m_blocks[index++].PushBlock(block);
|
||||||
|
|
|
@ -3608,13 +3608,13 @@ namespace ams::kern {
|
||||||
|
|
||||||
/* Allocate the start page as needed. */
|
/* Allocate the start page as needed. */
|
||||||
if (aligned_src_start < mapping_src_start) {
|
if (aligned_src_start < mapping_src_start) {
|
||||||
start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option);
|
start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
|
||||||
R_UNLESS(start_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
R_UNLESS(start_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate the end page as needed. */
|
/* Allocate the end page as needed. */
|
||||||
if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
|
if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
|
||||||
end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 0, m_allocate_option);
|
end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
|
||||||
R_UNLESS(end_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
R_UNLESS(end_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue