1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-11-23 04:12:02 +00:00
Atmosphere/libraries/libmesosphere/source/kern_k_capabilities.cpp

370 lines
16 KiB
C++
Raw Normal View History

/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KCapabilities::Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table) {
2020-02-19 12:55:00 +00:00
/* We're initializing an initial process. */
m_svc_access_flags.Reset();
m_irq_access_flags.Reset();
m_debug_capabilities = {0};
m_handle_table_size = 0;
m_intended_kernel_version = {0};
m_program_type = 0;
2020-02-19 12:55:00 +00:00
/* Initial processes may run on all cores. */
constexpr u64 VirtMask = cpu::VirtualCoreMask;
constexpr u64 PhysMask = cpu::ConvertVirtualCoreMaskToPhysical(VirtMask);
m_core_mask = VirtMask;
m_phys_core_mask = PhysMask;
2020-02-19 12:55:00 +00:00
/* Initial processes may use any user priority they like. */
m_priority_mask = ~0xFul;
2020-02-19 12:55:00 +00:00
2020-08-18 12:03:01 +01:00
/* Here, Nintendo sets the kernel version to the current kernel version. */
/* We will follow suit and set the version to the highest supported kernel version. */
m_intended_kernel_version.Set<KernelVersion::MajorVersion>(ams::svc::SupportedKernelMajorVersion);
m_intended_kernel_version.Set<KernelVersion::MinorVersion>(ams::svc::SupportedKernelMinorVersion);
2020-02-19 12:55:00 +00:00
/* Parse the capabilities array. */
R_RETURN(this->SetCapabilities(caps, num_caps, page_table));
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::Initialize(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table) {
/* We're initializing a user process. */
m_svc_access_flags.Reset();
m_irq_access_flags.Reset();
m_debug_capabilities = {0};
m_handle_table_size = 0;
m_intended_kernel_version = {0};
m_program_type = 0;
/* User processes must specify what cores/priorities they can use. */
m_core_mask = 0;
m_priority_mask = 0;
/* Parse the user capabilities array. */
R_RETURN(this->SetCapabilities(user_caps, num_caps, page_table));
}
2020-02-19 12:55:00 +00:00
Result KCapabilities::SetCorePriorityCapability(const util::BitPack32 cap) {
/* We can't set core/priority if we've already set them. */
R_UNLESS(m_core_mask == 0, svc::ResultInvalidArgument());
R_UNLESS(m_priority_mask == 0, svc::ResultInvalidArgument());
2020-02-19 12:55:00 +00:00
/* Validate the core/priority. */
const auto min_core = cap.Get<CorePriority::MinimumCoreId>();
const auto max_core = cap.Get<CorePriority::MaximumCoreId>();
const auto max_prio = cap.Get<CorePriority::LowestThreadPriority>();
const auto min_prio = cap.Get<CorePriority::HighestThreadPriority>();
2021-02-05 22:59:03 +00:00
R_UNLESS(min_core <= max_core, svc::ResultInvalidCombination());
R_UNLESS(min_prio <= max_prio, svc::ResultInvalidCombination());
R_UNLESS(max_core < cpu::NumVirtualCores, svc::ResultInvalidCoreId());
2020-02-19 12:55:00 +00:00
MESOSPHERE_ASSERT(max_prio < BITSIZEOF(u64));
/* Set core mask. */
for (auto core_id = min_core; core_id <= max_core; core_id++) {
m_core_mask |= (1ul << core_id);
2020-02-19 12:55:00 +00:00
}
2021-02-05 22:59:03 +00:00
MESOSPHERE_ASSERT((m_core_mask & cpu::VirtualCoreMask) == m_core_mask);
2020-02-19 12:55:00 +00:00
/* Set physical core mask. */
m_phys_core_mask = cpu::ConvertVirtualCoreMaskToPhysical(m_core_mask);
2020-02-19 12:55:00 +00:00
/* Set priority mask. */
for (auto prio = min_prio; prio <= max_prio; prio++) {
m_priority_mask |= (1ul << prio);
2020-02-19 12:55:00 +00:00
}
/* We must have some core/priority we can use. */
R_UNLESS(m_core_mask != 0, svc::ResultInvalidArgument());
R_UNLESS(m_priority_mask != 0, svc::ResultInvalidArgument());
2020-02-19 12:55:00 +00:00
/* Processes must not have access to kernel thread priorities. */
R_UNLESS((m_priority_mask & 0xF) == 0, svc::ResultInvalidArgument());
R_SUCCEED();
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::SetSyscallMaskCapability(const util::BitPack32 cap, u32 &set_svc) {
/* Validate the index. */
const auto mask = cap.Get<SyscallMask::Mask>();
const auto index = cap.Get<SyscallMask::Index>();
const u32 index_flag = (1u << index);
R_UNLESS((set_svc & index_flag) == 0, svc::ResultInvalidCombination());
set_svc |= index_flag;
/* Set SVCs. */
for (size_t i = 0; i < SyscallMask::Mask::Count; i++) {
const u32 svc_id = SyscallMask::Mask::Count * index + i;
if (mask & (1u << i)) {
R_UNLESS(this->SetSvcAllowed(svc_id), svc::ResultOutOfRange());
}
}
R_SUCCEED();
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::MapRange(const util::BitPack32 cap, const util::BitPack32 size_cap, KProcessPageTable *page_table) {
/* Get/validate address/size */
#if defined(MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES)
const u64 phys_addr = static_cast<u64>(cap.Get<MapRange::Address>() | (size_cap.Get<MapRangeSize::AddressHigh>() << MapRange::Address::Count)) * PageSize;
#else
const u64 phys_addr = static_cast<u64>(cap.Get<MapRange::Address>()) * PageSize;
2020-02-19 12:55:00 +00:00
/* Validate reserved bits are unused. */
R_UNLESS(size_cap.Get<MapRangeSize::Reserved>() == 0, svc::ResultOutOfRange());
#endif
2020-02-19 12:55:00 +00:00
const size_t num_pages = size_cap.Get<MapRangeSize::Pages>();
const size_t size = num_pages * PageSize;
R_UNLESS(phys_addr == GetInteger(KPhysicalAddress(phys_addr)), svc::ResultInvalidAddress());
R_UNLESS(num_pages != 0, svc::ResultInvalidSize());
R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress());
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, svc::ResultInvalidAddress());
/* Do the mapping. */
const KMemoryPermission perm = cap.Get<MapRange::ReadOnly>() ? KMemoryPermission_UserRead : KMemoryPermission_UserReadWrite;
if (size_cap.Get<MapRangeSize::Normal>()) {
R_RETURN(page_table->MapStatic(phys_addr, size, perm));
2020-02-19 12:55:00 +00:00
} else {
R_RETURN(page_table->MapIo(phys_addr, size, perm));
2020-02-19 12:55:00 +00:00
}
}
Result KCapabilities::MapIoPage(const util::BitPack32 cap, KProcessPageTable *page_table) {
/* Get/validate address/size */
const u64 phys_addr = cap.Get<MapIoPage::Address>() * PageSize;
const size_t num_pages = 1;
const size_t size = num_pages * PageSize;
R_UNLESS(phys_addr == GetInteger(KPhysicalAddress(phys_addr)), svc::ResultInvalidAddress());
R_UNLESS(num_pages != 0, svc::ResultInvalidSize());
R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress());
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, svc::ResultInvalidAddress());
/* Do the mapping. */
R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
2020-02-19 12:55:00 +00:00
}
template<typename F>
ALWAYS_INLINE Result KCapabilities::ProcessMapRegionCapability(const util::BitPack32 cap, F f) {
2020-02-19 12:55:00 +00:00
/* Define the allowed memory regions. */
constexpr const KMemoryRegionType MemoryRegions[] = {
2020-02-19 12:55:00 +00:00
KMemoryRegionType_None,
KMemoryRegionType_KernelTraceBuffer,
KMemoryRegionType_OnMemoryBootImage,
KMemoryRegionType_DTB,
};
/* Extract regions/read only. */
const RegionType types[3] = { cap.Get<MapRegion::Region0>(), cap.Get<MapRegion::Region1>(), cap.Get<MapRegion::Region2>(), };
const bool ro[3] = { cap.Get<MapRegion::ReadOnly0>(), cap.Get<MapRegion::ReadOnly1>(), cap.Get<MapRegion::ReadOnly2>(), };
for (size_t i = 0; i < util::size(types); i++) {
const auto type = types[i];
const auto perm = ro[i] ? KMemoryPermission_UserRead : KMemoryPermission_UserReadWrite;
switch (type) {
case RegionType::NoMapping:
2020-02-19 12:55:00 +00:00
break;
case RegionType::KernelTraceBuffer:
/* NOTE: This does not match official, but is used to make pre-processing hbl capabilities in userland unnecessary. */
/* If ktrace isn't enabled, allow ktrace to succeed without mapping anything. */
if constexpr (!ams::kern::IsKTraceEnabled) {
break;
}
2020-02-19 12:55:00 +00:00
case RegionType::OnMemoryBootImage:
case RegionType::DTB:
R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm));
2020-08-17 22:20:24 +01:00
break;
2020-02-19 12:55:00 +00:00
default:
R_THROW(svc::ResultNotFound());
2020-02-19 12:55:00 +00:00
}
}
R_SUCCEED();
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::MapRegion(const util::BitPack32 cap, KProcessPageTable *page_table) {
/* Map each region into the process's page table. */
R_RETURN(ProcessMapRegionCapability(cap, [page_table] ALWAYS_INLINE_LAMBDA (KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
R_RETURN(page_table->MapRegion(region_type, perm));
}));
}
Result KCapabilities::CheckMapRegion(const util::BitPack32 cap) {
/* Check that each region has a physical backing store. */
R_RETURN(ProcessMapRegionCapability(cap, [] ALWAYS_INLINE_LAMBDA (KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
MESOSPHERE_UNUSED(perm);
R_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(region_type) != nullptr, svc::ResultOutOfRange());
R_SUCCEED();
}));
}
2020-02-19 12:55:00 +00:00
Result KCapabilities::SetInterruptPairCapability(const util::BitPack32 cap) {
/* Extract interrupts. */
const u32 ids[2] = { cap.Get<InterruptPair::InterruptId0>(), cap.Get<InterruptPair::InterruptId1>(), };
for (size_t i = 0; i < util::size(ids); i++) {
if (ids[i] != PaddingInterruptId) {
R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(ids[i]), svc::ResultOutOfRange());
R_UNLESS(this->SetInterruptPermitted(ids[i]), svc::ResultOutOfRange());
2020-02-19 12:55:00 +00:00
}
}
R_SUCCEED();
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::SetProgramTypeCapability(const util::BitPack32 cap) {
/* Validate. */
R_UNLESS(cap.Get<ProgramType::Reserved>() == 0, svc::ResultReservedUsed());
m_program_type = cap.Get<ProgramType::Type>();
R_SUCCEED();
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::SetKernelVersionCapability(const util::BitPack32 cap) {
/* Ensure we haven't set our version before. */
R_UNLESS(m_intended_kernel_version.Get<KernelVersion::MajorVersion>() == 0, svc::ResultInvalidArgument());
2020-02-19 12:55:00 +00:00
/* Set, ensure that we set a valid version. */
m_intended_kernel_version = cap;
R_UNLESS(m_intended_kernel_version.Get<KernelVersion::MajorVersion>() != 0, svc::ResultInvalidArgument());
2020-02-19 12:55:00 +00:00
R_SUCCEED();
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::SetHandleTableCapability(const util::BitPack32 cap) {
/* Validate. */
R_UNLESS(cap.Get<HandleTable::Reserved>() == 0, svc::ResultReservedUsed());
m_handle_table_size = cap.Get<HandleTable::Size>();
R_SUCCEED();
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::SetDebugFlagsCapability(const util::BitPack32 cap) {
/* Validate. */
R_UNLESS(cap.Get<DebugFlags::Reserved>() == 0, svc::ResultReservedUsed());
m_debug_capabilities.Set<DebugFlags::AllowDebug>(cap.Get<DebugFlags::AllowDebug>());
m_debug_capabilities.Set<DebugFlags::ForceDebug>(cap.Get<DebugFlags::ForceDebug>());
R_SUCCEED();
2020-02-19 12:55:00 +00:00
}
Result KCapabilities::SetCapability(const util::BitPack32 cap, u32 &set_flags, u32 &set_svc, KProcessPageTable *page_table) {
/* Validate this is a capability we can act on. */
const auto type = GetCapabilityType(cap);
R_UNLESS(type != CapabilityType::Invalid, svc::ResultInvalidArgument());
Implement the NCM sysmodule (closes #91) * Implement NCM * Modernize ncm_main * Remove unnecessary smExit * Give access to svcCallSecureMonitor * Stack size bump * Fix incorrect setup for NandUser's content storage entry * Fix a potential data abort when flushing the placeholder accessor cache * Fix HasFile and HasDirectory * Use r+b, not w+b * Misc fixes * errno begone * Fixed more stdio error handling * More main fixes * Various command improvements * Make dispatch tables great again * Fix logic inversion * Fixed content path generation * Bump heap size, fix CleanupAllPlaceHolder * Various fixes. Note: This contains debug stuff which will be removed later. I was getting tired of having to cherrypick tiny changes * Fixed placeholder/content deletion * Fixed incorrect content manager destruction * Prevent automatic placeholder creation on open * Fixed List implementation. Also lots of debug logging. * Removed debug code * Added a scope guard for WritePlaceHolder * Manually prevent placeholder/content appending * Revert "Removed debug code" This reverts commit d6ff261fcc8c1f26968e894b02c17a01a12ec98b. * Always cache placeholder file. Switch to ftell for preventing appending * Universally use EnsureEnabled * Abstract away file writing logic * Misc cleanup * Refactor placeholder cacheing * Remove debug code (again) * Revert "Remove debug code (again)" This reverts commit 168447d80e9640768fb1b43f04a385507c1bb5ab. * Misc changes * Fixed file modes * Fixed ContentId/PlaceHolderId alignment * Improved type safety * Fixed reinitialization * Fixed doubleup on path creation * Remove debug code * Fixed 1.0.0 booting * Correct amount of add on content * Correct main thread stack size * lr: Introducing registered data * Reorder stratosphere Makefile * Move results to libstrat * lr: Cleanup lr_redirection * lr: lr_manager tweaks * lr: Imrpoved path handling and adjust ResolveAddOnContentPath order * lr: Organise types * Add eof newlines * lr: Eliminate unnecessary vars * lr: Unnecessary vars 2 electric boogaloo * lr: Various helpers * lr: RegisteredLocationResolver helpers * ncm: Move ncm_types to libstrat * ncm: Misc cleanup * Implement NCM * Modernize ncm_main * Remove unnecessary smExit * Give access to svcCallSecureMonitor * Stack size bump * Fix incorrect setup for NandUser's content storage entry * Fix a potential data abort when flushing the placeholder accessor cache * Fix HasFile and HasDirectory * Use r+b, not w+b * Misc fixes * errno begone * Fixed more stdio error handling * More main fixes * Various command improvements * Make dispatch tables great again * Fix logic inversion * Fixed content path generation * Bump heap size, fix CleanupAllPlaceHolder * Various fixes. Note: This contains debug stuff which will be removed later. I was getting tired of having to cherrypick tiny changes * Fixed placeholder/content deletion * Fixed incorrect content manager destruction * Prevent automatic placeholder creation on open * Fixed List implementation. Also lots of debug logging. * Removed debug code * Added a scope guard for WritePlaceHolder * Manually prevent placeholder/content appending * Revert "Removed debug code" This reverts commit d6ff261fcc8c1f26968e894b02c17a01a12ec98b. * Always cache placeholder file. Switch to ftell for preventing appending * Universally use EnsureEnabled * Abstract away file writing logic * Misc cleanup * Refactor placeholder cacheing * Remove debug code (again) * Revert "Remove debug code (again)" This reverts commit 168447d80e9640768fb1b43f04a385507c1bb5ab. * Misc changes * Fixed file modes * Fixed ContentId/PlaceHolderId alignment * Improved type safety * Fixed reinitialization * Fixed doubleup on path creation * Remove debug code * Fixed 1.0.0 booting * Correct amount of add on content * Correct main thread stack size * lr: Introducing registered data * Reorder stratosphere Makefile * Move results to libstrat * lr: Cleanup lr_redirection * lr: lr_manager tweaks * lr: Imrpoved path handling and adjust ResolveAddOnContentPath order * lr: Organise types * Add eof newlines * lr: Eliminate unnecessary vars * lr: Unnecessary vars 2 electric boogaloo * lr: Various helpers * lr: RegisteredLocationResolver helpers * ncm: Move ncm_types to libstrat * ncm: Misc cleanup * Updated AddOnContentLocationResolver and RegisteredLocationResolver to 9.0.0 * Finished updating lr to 9.0.0 * Updated NCM to 9.0.0 * Fix libstrat includes * Fixed application launching * title_id_2 -> owner_tid * Updated to new-ipc * Change to using pure virtuals * Title Id -> Program Id * Fixed compilation against master * std::scoped_lock<> -> std::scoped_lock * Adopted R_UNLESS and R_CONVERT * Prefix namespace to Results * Adopt std::numeric_limits * Fixed incorrect error handling in ReadFile * Adopted AMS_ABORT_UNLESS * Adopt util::GenerateUuid() * Syntax improvements * ncm_types: Address review * Address more review comments * Updated copyrights * Address more feedback * More feedback addressed * More changes * Move dispatch tables out of interface files * Addressed remaining comments * lr: move into libstratosphere * ncm: Fix logic inversion * lr: Add comments * lr: Remove whitespace * ncm: Start addressing feedback * ncm: Cleanup InitializeContentManager * lr: support client-side usage * lr_service -> lr_api * ncm: Begin refactoring content manager * ncm: More content manager improvements * ncm: Content manager mount improvements * ldr: use lr bindings * lr bindings usage: minor fixes * ncm/lr: Pointer placement * ncm: placeholder accessor cleanup * ncm: minor fixes * ncm: refactor rights cache * ncm: content meta database cleanup * ncm: move content meta database impl out of interface file * ncm: Use const ContentMetaKey & * ncm: fix other non-const ContentMetaKey references * ncm: content meta database cleanup * ncm: content storage fixes for 2.0.0 * ncm: add missing end of file newlines * ncm: implement ContentMetaReader * ncm: client-side api * ncm: trim trailing spaces * ncm: FS_MAX_PATH-1 -> fs::EntryNameLengthMax * ncm: Use PathString and Path * fs: implement accessor wrappers for ncm * fs: implement user fs wrappers * fs: add MountSdCard * ncm: move to content manager impl * ncm: fix up main * kvdb: use fs:: * fs: Add wrappers needed for ncm * ncm: use fs bindings, other refactoring * ncm: minor fixes * fsa: fix ReadFile without size output * fs: add substorage, rom path tool * ncm: fix dangling fsdev usage * fs: fix bug in Commit * fs: fixed incorrect mode check * fs: implement Mount(System)Data * ncm: don't delete hos * results: add R_SUCCEED_IF * ams-except-ncm: use R_SUCCEED_IF * ncm: added comments * ncm: fix api definitions * ncm: use R_SUCCEED_IF * pm: think of the savings * ncm: employ kernel strats * ncm: Nintendo has 5 MiB of heap. Give ourselves 4 to be safe, pending analysis * ncm: refactor IDs, split types header into many headers * ams.mitm: use fs bindings instead of stdio * fs: SystemData uses SystemDataId * ncm: improve meta-db accuracy * ncm: inline getlatestkey * fs: improve UnsupportedOperation results * fs: modernize mount utils * ams: misc fixes for merge-errors * fs: improve unsupportedoperation results * git subrepo pull emummc subrepo: subdir: "emummc" merged: "d12dd546" upstream: origin: "https://github.com/m4xw/emuMMC" branch: "develop" commit: "d12dd546" git-subrepo: version: "0.4.1" origin: "???" commit: "???" * util: add boundedmap * ncm: minor style fixes * ncm: don't unmount if mounting fails * lr: bug fixes * ncm: implement ncm.for-initialize + ncm.for-safemode * lr: ncm::ProgramId::Invalid -> ncm::InvalidProgramId * ncm: fix open directory mode on 1.0.0 * ncm: fix fs use, implement more of < 4.0.0 for-initialize/safemode * ncm: implement packagedcontent -> content for building metadb * ncm: fix save data flag management * ncm: address some review suggestions (thanks @leoetlino!) * updater: use fs bindings * fs: implement MountCode * fs: prefer make_unique to operator new * ncm: implement remaining ContentMetaDatabaseBuilder functionality Co-authored-by: Michael Scire <SciresM@gmail.com>
2020-03-08 08:06:23 +00:00
/* If the type is padding, we have no work to do. */
R_SUCCEED_IF(type == CapabilityType::Padding);
2020-02-19 12:55:00 +00:00
/* Check that we haven't already processed this capability. */
const auto flag = GetCapabilityFlag(type);
R_UNLESS(((set_flags & InitializeOnceFlags) & flag) == 0, svc::ResultInvalidCombination());
set_flags |= flag;
/* Process the capability. */
switch (type) {
case CapabilityType::CorePriority: R_RETURN(this->SetCorePriorityCapability(cap));
case CapabilityType::SyscallMask: R_RETURN(this->SetSyscallMaskCapability(cap, set_svc));
case CapabilityType::MapIoPage: R_RETURN(this->MapIoPage(cap, page_table));
case CapabilityType::MapRegion: R_RETURN(this->MapRegion(cap, page_table));
case CapabilityType::InterruptPair: R_RETURN(this->SetInterruptPairCapability(cap));
case CapabilityType::ProgramType: R_RETURN(this->SetProgramTypeCapability(cap));
case CapabilityType::KernelVersion: R_RETURN(this->SetKernelVersionCapability(cap));
case CapabilityType::HandleTable: R_RETURN(this->SetHandleTableCapability(cap));
case CapabilityType::DebugFlags: R_RETURN(this->SetDebugFlagsCapability(cap));
default: R_THROW(svc::ResultInvalidArgument());
2020-02-19 12:55:00 +00:00
}
}
Result KCapabilities::SetCapabilities(const u32 *caps, s32 num_caps, KProcessPageTable *page_table) {
u32 set_flags = 0, set_svc = 0;
for (s32 i = 0; i < num_caps; i++) {
const util::BitPack32 cap = { caps[i] };
if (GetCapabilityType(cap) == CapabilityType::MapRange) {
/* Check that the pair cap exists. */
R_UNLESS((++i) < num_caps, svc::ResultInvalidCombination());
/* Check the pair cap is a map range cap. */
const util::BitPack32 size_cap = { caps[i] };
R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange, svc::ResultInvalidCombination());
/* Map the range. */
R_TRY(this->MapRange(cap, size_cap, page_table));
} else {
R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
}
}
R_SUCCEED();
}
Result KCapabilities::SetCapabilities(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table) {
u32 set_flags = 0, set_svc = 0;
for (s32 i = 0; i < num_caps; i++) {
/* Read the cap from userspace. */
u32 cap0;
R_TRY(user_caps.CopyArrayElementTo(std::addressof(cap0), i));
const util::BitPack32 cap = { cap0 };
if (GetCapabilityType(cap) == CapabilityType::MapRange) {
/* Check that the pair cap exists. */
R_UNLESS((++i) < num_caps, svc::ResultInvalidCombination());
/* Read the second cap from userspace. */
u32 cap1;
R_TRY(user_caps.CopyArrayElementTo(std::addressof(cap1), i));
/* Check the pair cap is a map range cap. */
const util::BitPack32 size_cap = { cap1 };
R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange, svc::ResultInvalidCombination());
/* Map the range. */
R_TRY(this->MapRange(cap, size_cap, page_table));
} else {
R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
}
}
R_SUCCEED();
}
Result KCapabilities::CheckCapabilities(svc::KUserPointer<const u32 *> user_caps, s32 num_caps) {
for (s32 i = 0; i < num_caps; ++i) {
/* Read the cap from userspace. */
u32 cap0;
R_TRY(user_caps.CopyArrayElementTo(std::addressof(cap0), i));
/* Check the capability refers to a valid region. */
const util::BitPack32 cap = { cap0 };
if (GetCapabilityType(cap) == CapabilityType::MapRegion) {
R_TRY(CheckMapRegion(cap));
}
}
R_SUCCEED();
}
}