1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-11-26 13:52:21 +00:00

fs: fix clang-build, os: StackGuardManager

This commit is contained in:
Michael Scire 2022-03-12 15:05:43 -08:00 committed by SciresM
parent be9338eb33
commit cb3d20ef79
13 changed files with 264 additions and 28 deletions

View file

@ -58,7 +58,7 @@ namespace ams::fssystem {
s64 end_offset; s64 end_offset;
constexpr bool IsInclude(s64 offset) const { constexpr bool IsInclude(s64 offset) const {
return this->start_offset <= offset & offset < this->end_offset; return this->start_offset <= offset && offset < this->end_offset;
} }
constexpr bool IsInclude(s64 offset, s64 size) const { constexpr bool IsInclude(s64 offset, s64 size) const {

View file

@ -370,6 +370,8 @@ namespace ams::fssystem {
s64 appropriate_virtual_offset = offset; s64 appropriate_virtual_offset = offset;
R_TRY(this->OperatePerEntry(offset, table_offsets.end_offset - offset, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 read_size) -> Result { R_TRY(this->OperatePerEntry(offset, table_offsets.end_offset - offset, [&] (bool *out_continuous, const Entry &entry, s64 virtual_data_size, s64 data_offset, s64 read_size) -> Result {
AMS_UNUSED(virtual_data_size);
/* Determine the physical extents. */ /* Determine the physical extents. */
s64 physical_offset, physical_size; s64 physical_offset, physical_size;
if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) { if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) {
@ -528,7 +530,7 @@ namespace ams::fssystem {
for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) { for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) {
/* Determine the current read size. */ /* Determine the current read size. */
bool will_use_pooled_buffer = false; bool will_use_pooled_buffer = false;
const size_t cur_read_size = [&] ALWAYS_INLINE_LAMBDA () -> size_t { const size_t cur_read_size = [&] () ALWAYS_INLINE_LAMBDA -> size_t {
if (const size_t target_entry_size = static_cast<size_t>(entries[entry_idx].physical_size) + static_cast<size_t>(entries[entry_idx].gap_from_prev); target_entry_size <= pooled_buffer.GetSize()) { if (const size_t target_entry_size = static_cast<size_t>(entries[entry_idx].physical_size) + static_cast<size_t>(entries[entry_idx].gap_from_prev); target_entry_size <= pooled_buffer.GetSize()) {
/* We'll be using the pooled buffer. */ /* We'll be using the pooled buffer. */
will_use_pooled_buffer = true; will_use_pooled_buffer = true;
@ -687,11 +689,11 @@ namespace ams::fssystem {
const s64 required_access_physical_end = required_access_physical_offset + required_access_physical_size; const s64 required_access_physical_end = required_access_physical_offset + required_access_physical_size;
if (required_access_physical_size > 0) { if (required_access_physical_size > 0) {
const bool required_by_gap = !(required_access_physical_end <= physical_offset && physical_offset <= util::AlignUp(required_access_physical_end, CompressionBlockAlignment)); const bool required_by_gap = !(required_access_physical_end <= physical_offset && physical_offset <= util::AlignUp(required_access_physical_end, CompressionBlockAlignment));
const bool required_by_continuous_size = ((physical_size + physical_offset) - required_access_physical_end) + required_access_physical_size > m_continuous_reading_size_max; const bool required_by_continuous_size = ((physical_size + physical_offset) - required_access_physical_end) + required_access_physical_size > static_cast<s64>(m_continuous_reading_size_max);
const bool required_by_entry_count = entry_count == EntriesCountMax; const bool required_by_entry_count = entry_count == EntriesCountMax;
if (required_by_gap || required_by_continuous_size || required_by_entry_count) { if (required_by_gap || required_by_continuous_size || required_by_entry_count) {
/* Check that our planned access is sane. */ /* Check that our planned access is sane. */
AMS_ASSERT(!will_allocate_pooled_buffer || required_access_physical_size <= m_continuous_reading_size_max); AMS_ASSERT(!will_allocate_pooled_buffer || required_access_physical_size <= static_cast<s64>(m_continuous_reading_size_max));
/* Perform the required read. */ /* Perform the required read. */
R_TRY(PerformRequiredRead()); R_TRY(PerformRequiredRead());
@ -716,9 +718,9 @@ namespace ams::fssystem {
if (CompressionTypeUtility::IsDataStorageAccessRequired(entry.compression_type)) { if (CompressionTypeUtility::IsDataStorageAccessRequired(entry.compression_type)) {
/* If the data is compressed, ensure the access is sane. */ /* If the data is compressed, ensure the access is sane. */
if (entry.compression_type != CompressionType_None) { if (entry.compression_type != CompressionType_None) {
R_UNLESS(data_offset == 0, fs::ResultInvalidOffset()); R_UNLESS(data_offset == 0, fs::ResultInvalidOffset());
R_UNLESS(virtual_data_size == read_size, fs::ResultInvalidSize()); R_UNLESS(virtual_data_size == read_size, fs::ResultInvalidSize());
R_UNLESS(entry.GetPhysicalSize() <= m_block_size_max, fs::ResultUnexpectedInCompressedStorageD()); R_UNLESS(entry.GetPhysicalSize() <= static_cast<s64>(m_block_size_max), fs::ResultUnexpectedInCompressedStorageD());
} }
/* Update the required access parameters. */ /* Update the required access parameters. */
@ -926,16 +928,16 @@ namespace ams::fssystem {
head_range = { head_range = {
.virtual_offset = entry.virt_offset, .virtual_offset = entry.virt_offset,
.virtual_size = virtual_data_size, .virtual_size = virtual_data_size,
.physical_size = entry.phys_size, .physical_size = static_cast<u32>(entry.phys_size),
.is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type), .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
}; };
/* If required, set the tail range. */ /* If required, set the tail range. */
if ((offset + read_size) <= entry.virt_offset + virtual_data_size) { if (static_cast<s64>(offset + read_size) <= entry.virt_offset + virtual_data_size) {
tail_range = { tail_range = {
.virtual_offset = entry.virt_offset, .virtual_offset = entry.virt_offset,
.virtual_size = virtual_data_size, .virtual_size = virtual_data_size,
.physical_size = entry.phys_size, .physical_size = static_cast<u32>(entry.phys_size),
.is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type), .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
}; };
is_tail_set = true; is_tail_set = true;
@ -955,7 +957,7 @@ namespace ams::fssystem {
tail_range = { tail_range = {
.virtual_offset = entry.virt_offset, .virtual_offset = entry.virt_offset,
.virtual_size = virtual_data_size, .virtual_size = virtual_data_size,
.physical_size = entry.phys_size, .physical_size = static_cast<u32>(entry.phys_size),
.is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type), .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
}; };
@ -986,15 +988,15 @@ namespace ams::fssystem {
} }
/* Determine our alignment. */ /* Determine our alignment. */
const bool head_unaligned = head_range.is_block_alignment_required && (cur_offset != head_range.virtual_offset || cur_size < head_range.virtual_size); const bool head_unaligned = head_range.is_block_alignment_required && (cur_offset != head_range.virtual_offset || static_cast<s64>(cur_size) < head_range.virtual_size);
const bool tail_unaligned = [&] ALWAYS_INLINE_LAMBDA () -> bool { const bool tail_unaligned = [&] () ALWAYS_INLINE_LAMBDA -> bool {
if (tail_range.is_block_alignment_required) { if (tail_range.is_block_alignment_required) {
if (cur_size + cur_offset == tail_range.GetEndVirtualOffset()) { if (static_cast<s64>(cur_size + cur_offset) == tail_range.GetEndVirtualOffset()) {
return false; return false;
} else if (!head_unaligned) { } else if (!head_unaligned) {
return true; return true;
} else { } else {
return cur_size + cur_offset < head_range.GetEndVirtualOffset(); return static_cast<s64>(cur_size + cur_offset) < head_range.GetEndVirtualOffset();
} }
} else { } else {
return false; return false;
@ -1188,7 +1190,7 @@ namespace ams::fssystem {
/* Determine the current access extents. */ /* Determine the current access extents. */
s64 cur_offset = head_range.virtual_offset + util::AlignDown<s64>(access_offset - head_range.virtual_offset, m_cache_size_unk_0); s64 cur_offset = head_range.virtual_offset + util::AlignDown<s64>(access_offset - head_range.virtual_offset, m_cache_size_unk_0);
while (cur_offset < head_range.GetEndVirtualOffset() && cur_offset < offset + size) { while (cur_offset < head_range.GetEndVirtualOffset() && cur_offset < static_cast<s64>(offset + size)) {
/* Find the relevant entry. */ /* Find the relevant entry. */
fs::IBufferManager::MemoryRange memory_range = {}; fs::IBufferManager::MemoryRange memory_range = {};
CacheEntry entry = {}; CacheEntry entry = {};
@ -1240,7 +1242,7 @@ namespace ams::fssystem {
new_head_range = { new_head_range = {
.virtual_offset = entry.virt_offset, .virtual_offset = entry.virt_offset,
.virtual_size = virtual_data_size, .virtual_size = virtual_data_size,
.physical_size = entry.phys_size, .physical_size = static_cast<u32>(entry.phys_size),
.is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type), .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
}; };
} }
@ -1306,7 +1308,7 @@ namespace ams::fssystem {
tail_range = { tail_range = {
.virtual_offset = entry.virt_offset, .virtual_offset = entry.virt_offset,
.virtual_size = virtual_data_size, .virtual_size = virtual_data_size,
.physical_size = entry.phys_size, .physical_size = static_cast<u32>(entry.phys_size),
.is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type), .is_block_alignment_required = CompressionTypeUtility::IsBlockAlignmentRequired(entry.compression_type),
}; };
@ -1406,6 +1408,8 @@ namespace ams::fssystem {
} }
virtual Result OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) override { virtual Result OperateRange(void *dst, size_t dst_size, fs::OperationId op_id, s64 offset, s64 size, const void *src, size_t src_size) override {
AMS_UNUSED(src, src_size);
/* Check pre-conditions. */ /* Check pre-conditions. */
AMS_ASSERT(offset >= 0); AMS_ASSERT(offset >= 0);
AMS_ASSERT(size >= 0); AMS_ASSERT(size >= 0);

View file

@ -24,10 +24,10 @@ namespace ams::fssystem::impl {
NON_COPYABLE(BlockCacheManager); NON_COPYABLE(BlockCacheManager);
NON_MOVEABLE(BlockCacheManager); NON_MOVEABLE(BlockCacheManager);
public: public:
using MemoryRange = AllocatorType::MemoryRange; using MemoryRange = typename AllocatorType::MemoryRange;
using CacheIndex = s32; using CacheIndex = s32;
using BufferAttribute = AllocatorType::BufferAttribute; using BufferAttribute = typename AllocatorType::BufferAttribute;
static constexpr CacheIndex InvalidCacheIndex = -1; static constexpr CacheIndex InvalidCacheIndex = -1;

View file

@ -19,6 +19,12 @@
namespace ams::os::impl { namespace ams::os::impl {
enum AddressAllocationResult {
AddressAllocationResult_Success,
AddressAllocationResult_OutOfMemory,
AddressAllocationResult_OutOfSpace,
};
template<std::unsigned_integral AddressType, std::unsigned_integral SizeType> template<std::unsigned_integral AddressType, std::unsigned_integral SizeType>
class AddressSpaceAllocatorBase { class AddressSpaceAllocatorBase {
NON_COPYABLE(AddressSpaceAllocatorBase); NON_COPYABLE(AddressSpaceAllocatorBase);

View file

@ -17,6 +17,7 @@
#include <stratosphere.hpp> #include <stratosphere.hpp>
#include "os_rng_manager_impl.hpp" #include "os_rng_manager_impl.hpp"
#include "os_thread_manager_types.hpp" #include "os_thread_manager_types.hpp"
#include "os_stack_guard_manager_types.hpp"
#include "os_tick_manager_impl.hpp" #include "os_tick_manager_impl.hpp"
#include "os_aslr_space_manager_types.hpp" #include "os_aslr_space_manager_types.hpp"
#include "os_tls_manager_types.hpp" #include "os_tls_manager_types.hpp"
@ -29,7 +30,7 @@ namespace ams::os::impl {
private: private:
RngManager m_rng_manager{}; RngManager m_rng_manager{};
AslrSpaceManager m_aslr_space_manager{}; AslrSpaceManager m_aslr_space_manager{};
/* TODO */ StackGuardManager m_stack_guard_manager;
ThreadManager m_thread_manager{}; ThreadManager m_thread_manager{};
//TlsManager m_tls_manager{}; //TlsManager m_tls_manager{};
TickManager m_tick_manager{}; TickManager m_tick_manager{};
@ -42,6 +43,7 @@ namespace ams::os::impl {
constexpr ALWAYS_INLINE RngManager &GetRngManager() { return m_rng_manager; } constexpr ALWAYS_INLINE RngManager &GetRngManager() { return m_rng_manager; }
constexpr ALWAYS_INLINE AslrSpaceManager &GetAslrSpaceManager() { return m_aslr_space_manager; } constexpr ALWAYS_INLINE AslrSpaceManager &GetAslrSpaceManager() { return m_aslr_space_manager; }
constexpr ALWAYS_INLINE ThreadManager &GetThreadManager() { return m_thread_manager; } constexpr ALWAYS_INLINE ThreadManager &GetThreadManager() { return m_thread_manager; }
constexpr ALWAYS_INLINE StackGuardManager &GetStackGuardManager() { return m_stack_guard_manager; }
//constexpr ALWAYS_INLINE TlsManager &GetTlsManager() { return m_tls_manager; } //constexpr ALWAYS_INLINE TlsManager &GetTlsManager() { return m_tls_manager; }
constexpr ALWAYS_INLINE TickManager &GetTickManager() { return m_tick_manager; } constexpr ALWAYS_INLINE TickManager &GetTickManager() { return m_tick_manager; }
constexpr ALWAYS_INLINE VammManager &GetVammManager() { return m_vamm_manager; } constexpr ALWAYS_INLINE VammManager &GetVammManager() { return m_vamm_manager; }

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
#include "os_resource_manager.hpp"
namespace ams::os::impl {
ALWAYS_INLINE StackGuardManager &GetStackGuardManager() {
return GetResourceManager().GetStackGuardManager();
}
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
namespace ams::os::impl {
class StackGuardManagerHorizonImpl {
private:
static u64 GetStackInfo(svc::InfoType type) {
u64 value;
R_ABORT_UNLESS(svc::GetInfo(std::addressof(value), type, svc::PseudoHandle::CurrentProcess, 0));
AMS_ASSERT(value <= std::numeric_limits<size_t>::max());
return static_cast<u64>(static_cast<size_t>(value));
}
public:
static u64 GetStackGuardBeginAddress() { return GetStackInfo(svc::InfoType_StackRegionAddress); }
static u64 GetStackGuardEndAddress() { return GetStackInfo(svc::InfoType_StackRegionSize); }
};
using StackGuardManagerImpl = StackGuardManagerHorizonImpl;
}

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
namespace ams::os::impl {
class StackGuardManagerLinuxImpl {
public:
static u64 GetStackGuardBeginAddress() { return 256_MB; }
static u64 GetStackGuardEndAddress() { return 256_MB + 1_GB; }
};
using StackGuardManagerImpl = StackGuardManagerLinuxImpl;
}

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
namespace ams::os::impl {
class StackGuardManagerMacosImpl {
public:
static u64 GetStackGuardBeginAddress() { return 256_MB; }
static u64 GetStackGuardEndAddress() { return 256_MB + 1_GB; }
};
using StackGuardManagerImpl = StackGuardManagerMacosImpl;
}

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
namespace ams::os::impl {
class StackGuardManagerWindowsImpl {
public:
static u64 GetStackGuardBeginAddress() { return 256_MB; }
static u64 GetStackGuardEndAddress() { return 256_MB + 1_GB; }
};
using StackGuardManagerImpl = StackGuardManagerWindowsImpl;
}

View file

@ -0,0 +1,81 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
#include "os_address_space_allocator.hpp"
#if defined(ATMOSPHERE_OS_HORIZON)
#include "os_stack_guard_manager_impl.os.horizon.hpp"
#elif defined(ATMOSPHERE_OS_WINDOWS)
#include "os_stack_guard_manager_impl.os.windows.hpp"
#elif defined(ATMOSPHERE_OS_LINUX)
#include "os_stack_guard_manager_impl.os.linux.hpp"
#elif defined(ATMOSPHERE_OS_MACOS)
#include "os_stack_guard_manager_impl.os.macos.hpp"
#else
#error "Unknown OS for StackGuardManagerImpl"
#endif
namespace ams::os::impl {
constexpr inline size_t StackGuardSize = 4 * os::MemoryPageSize;
class StackGuardManager {
private:
StackGuardManagerImpl m_impl;
AddressSpaceAllocator m_allocator;
public:
StackGuardManager() : m_impl(), m_allocator(m_impl.GetStackGuardBeginAddress(), m_impl.GetStackGuardEndAddress(), StackGuardSize, nullptr, 0) {
/* ... */
}
void *AllocateStackGuardSpace(size_t size) {
return reinterpret_cast<void *>(m_allocator.AllocateSpace(size, os::MemoryPageSize, 0));
}
bool CheckGuardSpace(uintptr_t address, size_t size) {
return m_allocator.CheckGuardSpace(address, size, StackGuardSize);
}
AddressAllocationResult MapAtRandomAddress(void **out, bool (*map)(const void *, const void *, size_t), void (*unmap)(const void *, const void *, size_t), const void *address, size_t size) {
/* Try to map up to 0x40 times. */
constexpr int TryCountMax = 0x40;
for (auto i = 0; i < TryCountMax; ++i) {
/* Get stack guard space. */
void * const space = this->AllocateStackGuardSpace(size);
if (space == nullptr) {
return AddressAllocationResult_OutOfMemory;
}
/* Try to map. */
if (map(space, address, size)) {
/* Check that the guard space is still there. */
if (this->CheckGuardSpace(reinterpret_cast<uintptr_t>(space), size)) {
*out = space;
return AddressAllocationResult_Success;
} else {
/* We need to retry. */
unmap(space, address, size);
}
}
}
/* We failed. */
return AddressAllocationResult_OutOfSpace;
}
};
}

View file

@ -33,12 +33,6 @@ namespace ams::os::impl {
namespace { namespace {
enum AddressAllocationResult {
AddressAllocationResult_Success,
AddressAllocationResult_OutOfMemory,
AddressAllocationResult_OutOfSpace,
};
class AddressRegion : public util::IntrusiveRedBlackTreeBaseNode<AddressRegion> { class AddressRegion : public util::IntrusiveRedBlackTreeBaseNode<AddressRegion> {
private: private:
uintptr_t m_address; uintptr_t m_address;

View file

@ -100,7 +100,7 @@ namespace ams::util {
} }
}; };
template<typename F, typename = std::enable_if<!std::is_member_pointer<F>::value>::type> template<typename F, typename = typename std::enable_if<!std::is_member_pointer<F>::value>::type>
constexpr ALWAYS_INLINE auto MakeIFunction(F f) { constexpr ALWAYS_INLINE auto MakeIFunction(F f) {
static_assert(!std::is_member_pointer<F>::value); static_assert(!std::is_member_pointer<F>::value);