From 5cff5e629b917ae769df52fc4aa3887581dc0220 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Mon, 23 Aug 2021 09:13:26 -0700 Subject: [PATCH] fusee_cpp: implement bpmp cache driver --- fusee_cpp/program/program.ld | 3 +- fusee_cpp/program/source/fusee_main.cpp | 3 + .../include/exosphere/hw/hw_arm.hpp | 22 +- .../source/hw/avp_cache_registers.hpp | 110 ++++++++ .../source/hw/hw_cache.arch.arm.cpp | 253 ++++++++++++++++++ .../dd/impl/dd_cache_impl.os.horizon.hpp | 10 +- 6 files changed, 390 insertions(+), 11 deletions(-) create mode 100644 libraries/libexosphere/source/hw/avp_cache_registers.hpp create mode 100644 libraries/libexosphere/source/hw/hw_cache.arch.arm.cpp diff --git a/fusee_cpp/program/program.ld b/fusee_cpp/program/program.ld index c84da359f..6d97ae2e7 100644 --- a/fusee_cpp/program/program.ld +++ b/fusee_cpp/program/program.ld @@ -117,7 +117,8 @@ SECTIONS FILL(0x00000000) *(.data .data.* .gnu.linkonce.d.*) SORT(CONSTRUCTORS) - . = ALIGN(64) - 1; + . = ALIGN(16); + . = . + 15; BYTE(0x00); } >main AT>glob diff --git a/fusee_cpp/program/source/fusee_main.cpp b/fusee_cpp/program/source/fusee_main.cpp index 28c863344..3e53b2cd8 100644 --- a/fusee_cpp/program/source/fusee_main.cpp +++ b/fusee_cpp/program/source/fusee_main.cpp @@ -27,6 +27,9 @@ namespace ams::nxboot { /* Initialize Sdram. */ InitializeSdram(); + /* Initialize cache. */ + hw::InitializeDataCache(); + /* Initialize SD card. */ Result result = InitializeSdCard(); diff --git a/libraries/libexosphere/include/exosphere/hw/hw_arm.hpp b/libraries/libexosphere/include/exosphere/hw/hw_arm.hpp index 344e30726..6bb10283f 100644 --- a/libraries/libexosphere/include/exosphere/hw/hw_arm.hpp +++ b/libraries/libexosphere/include/exosphere/hw/hw_arm.hpp @@ -19,7 +19,8 @@ namespace ams::hw::arch::arm { #ifdef __BPMP__ - constexpr inline size_t DataCacheLineSize = 0x1; + constexpr inline size_t DataCacheLineSize = 0x20; + constexpr inline size_t DataCacheSize = 32_KB; ALWAYS_INLINE void DataSynchronizationBarrier() { /* ... */ @@ -37,11 +38,20 @@ namespace ams::hw::arch::arm { /* ... */ } - ALWAYS_INLINE void FlushDataCache(const void *ptr, size_t size) { - AMS_UNUSED(ptr); - AMS_UNUSED(size); - /* ... */ - } + void InitializeDataCache(); + void FinalizeDataCache(); + + void InvalidateEntireDataCache(); + void StoreEntireDataCache(); + void FlushEntireDataCache(); + + void InvalidateDataCacheLine(void *ptr); + void StoreDataCacheLine(void *ptr); + void FlushDataCacheLine(void *ptr); + + void InvalidateDataCache(void *ptr, size_t size); + void StoreDataCache(const void *ptr, size_t size); + void FlushDataCache(const void *ptr, size_t size); #else #error "Unknown ARM board for ams::hw" #endif diff --git a/libraries/libexosphere/source/hw/avp_cache_registers.hpp b/libraries/libexosphere/source/hw/avp_cache_registers.hpp new file mode 100644 index 000000000..8aeb0553f --- /dev/null +++ b/libraries/libexosphere/source/hw/avp_cache_registers.hpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#pragma once + + +#define AVP_CACHE_CONFIG (0x000) +#define AVP_CACHE_LOCK (0x004) +#define AVP_CACHE_SIZE (0x00C) +#define AVP_CACHE_LFSR (0x010) +#define AVP_CACHE_TAG_STATUS (0x014) +#define AVP_CACHE_CLKEN_OVERRIDE (0x018) +#define AVP_CACHE_MAINT_0 (0x020) +#define AVP_CACHE_MAINT_1 (0x024) +#define AVP_CACHE_MAINT_2 (0x028) +#define AVP_CACHE_INT_MASK (0x040) +#define AVP_CACHE_INT_CLEAR (0x044) +#define AVP_CACHE_INT_RAW_EVENT (0x048) +#define AVP_CACHE_INT_STATUS (0x04C) +#define AVP_CACHE_RB_CFG (0x080) +#define AVP_CACHE_WB_CFG (0x084) +#define AVP_CACHE_MMU_FALLBACK_ENTRY (0x0A0) +#define AVP_CACHE_MMU_SHADOW_COPY_MASK_0 (0x0A4) +#define AVP_CACHE_MMU_CFG (0x0AC) +#define AVP_CACHE_MMU_CMD (0x0B0) +#define AVP_CACHE_MMU_ABORT_STAT (0x0B4) +#define AVP_CACHE_MMU_ABORT_ADDR (0x0B8) +#define AVP_CACHE_MMU_ACTIVE_ENTRIES (0x0BC) + +#define AVP_CACHE_MMU_SHADOW_ENTRY_0_MIN_ADDR (0x400) +#define AVP_CACHE_MMU_SHADOW_ENTRY_0_MAX_ADDR (0x404) +#define AVP_CACHE_MMU_SHADOW_ENTRY_0_CFG (0x408) + +#define AVP_CACHE_MMU_SHADOW_ENTRY_1_MIN_ADDR (0x410) +#define AVP_CACHE_MMU_SHADOW_ENTRY_1_MAX_ADDR (0x414) +#define AVP_CACHE_MMU_SHADOW_ENTRY_1_CFG (0x418) + +#define AVP_CACHE_REG_BITS_MASK(NAME) REG_NAMED_BITS_MASK (AVP_CACHE, NAME) +#define AVP_CACHE_REG_BITS_VALUE(NAME, VALUE) REG_NAMED_BITS_VALUE (AVP_CACHE, NAME, VALUE) +#define AVP_CACHE_REG_BITS_ENUM(NAME, ENUM) REG_NAMED_BITS_ENUM (AVP_CACHE, NAME, ENUM) +#define AVP_CACHE_REG_BITS_ENUM_SEL(NAME, __COND__, TRUE_ENUM, FALSE_ENUM) REG_NAMED_BITS_ENUM_SEL(AVP_CACHE, NAME, __COND__, TRUE_ENUM, FALSE_ENUM) + +#define DEFINE_AVP_CACHE_REG(NAME, __OFFSET__, __WIDTH__) REG_DEFINE_NAMED_REG (AVP_CACHE, NAME, __OFFSET__, __WIDTH__) +#define DEFINE_AVP_CACHE_REG_BIT_ENUM(NAME, __OFFSET__, ZERO, ONE) REG_DEFINE_NAMED_BIT_ENUM (AVP_CACHE, NAME, __OFFSET__, ZERO, ONE) +#define DEFINE_AVP_CACHE_REG_TWO_BIT_ENUM(NAME, __OFFSET__, ZERO, ONE, TWO, THREE) REG_DEFINE_NAMED_TWO_BIT_ENUM (AVP_CACHE, NAME, __OFFSET__, ZERO, ONE, TWO, THREE) +#define DEFINE_AVP_CACHE_REG_THREE_BIT_ENUM(NAME, __OFFSET__, ZERO, ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN) REG_DEFINE_NAMED_THREE_BIT_ENUM(AVP_CACHE, NAME, __OFFSET__, ZERO, ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN) +#define DEFINE_AVP_CACHE_REG_FOUR_BIT_ENUM(NAME, __OFFSET__, ZERO, ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN, EIGHT, NINE, TEN, ELEVEN, TWELVE, THIRTEEN, FOURTEEN, FIFTEEN) REG_DEFINE_NAMED_FOUR_BIT_ENUM (AVP_CACHE, NAME, __OFFSET__, ZERO, ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN, EIGHT, NINE, TEN, ELEVEN, TWELVE, THIRTEEN, FOURTEEN, FIFTEEN) + +DEFINE_AVP_CACHE_REG_BIT_ENUM(CONFIG_ENABLE_CACHE, 0, FALSE, TRUE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(CONFIG_FORCE_WRITE_THROUGH, 3, FALSE, TRUE); + +DEFINE_AVP_CACHE_REG_TWO_BIT_ENUM(CONFIG_MMU_TAG_MODE, 8, PARALLEL, TAG_FIRST, MMU_FIRST, RSVD3); +DEFINE_AVP_CACHE_REG_BIT_ENUM(CONFIG_TAG_CHECK_ABORT_ON_ERROR, 14, FALSE, TRUE); + +DEFINE_AVP_CACHE_REG(MAINT_2_OPCODE, 0, 8); +DEFINE_AVP_CACHE_REG(MAINT_2_WAY_BITMAP, 8, 4); + +enum AVP_CACHE_MAINT_OPCODE : u32 { + AVP_CACHE_MAINT_OPCODE_NOP = 0, + + AVP_CACHE_MAINT_OPCODE_CLEAN_PHY = 1, + AVP_CACHE_MAINT_OPCODE_INVALID_PHY = 2, + AVP_CACHE_MAINT_OPCODE_CLEAN_INVALID_PHY = 3, + + AVP_CACHE_MAINT_OPCODE_CLEAN_LINE = 9, + AVP_CACHE_MAINT_OPCODE_INVALID_LINE = 10, + AVP_CACHE_MAINT_OPCODE_CLEAN_INVALID_LINE = 11, + + AVP_CACHE_MAINT_OPCODE_CLEAN_WAY = 17, + AVP_CACHE_MAINT_OPCODE_INVALID_WAY = 18, + AVP_CACHE_MAINT_OPCODE_CLEAN_INVALID_WAY = 19, +}; + +DEFINE_AVP_CACHE_REG_BIT_ENUM(INT_CLEAR_MAINTENANCE_DONE, 0, FALSE, TRUE); + +DEFINE_AVP_CACHE_REG_BIT_ENUM(INT_RAW_EVENT_MAINTENANCE_DONE, 0, FALSE, TRUE); + +DEFINE_AVP_CACHE_REG_BIT_ENUM(INT_STATUS_MAINTENANCE_DONE, 0, FALSE, TRUE); + +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_FALLBACK_ENTRY_CACHED, 0, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_FALLBACK_ENTRY_EXE_ENA, 1, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_FALLBACK_ENTRY_RD_ENA, 2, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_FALLBACK_ENTRY_WR_ENA, 3, DISABLE, ENABLE); + +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_CFG_BLOCK_MAIN_ENTRY_WR, 0, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_CFG_SEQ_ENA, 1, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_CFG_TLB_ENA, 2, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_CFG_SEQ_CHECK_ALL_ENTRIES, 3, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_CFG_ABORT_MODE, 4, STORE_FIRST, STORE_LAST); +DEFINE_AVP_CACHE_REG_BIT_ENUM(MMU_CFG_CLR_ABORT, 5, NOP, CLEAN); + +DEFINE_AVP_CACHE_REG_TWO_BIT_ENUM(MMU_CMD_CMD, 0, NOP, INIT, COPY_SHADOW, RSVD3); + +DEFINE_AVP_CACHE_REG_BIT_ENUM(SHADOW_ENTRY_CFG_CACHED, 0, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(SHADOW_ENTRY_CFG_EXE_ENA, 1, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(SHADOW_ENTRY_CFG_RD_ENA, 2, DISABLE, ENABLE); +DEFINE_AVP_CACHE_REG_BIT_ENUM(SHADOW_ENTRY_CFG_WR_ENA, 3, DISABLE, ENABLE); diff --git a/libraries/libexosphere/source/hw/hw_cache.arch.arm.cpp b/libraries/libexosphere/source/hw/hw_cache.arch.arm.cpp new file mode 100644 index 000000000..e7960994a --- /dev/null +++ b/libraries/libexosphere/source/hw/hw_cache.arch.arm.cpp @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "avp_cache_registers.hpp" + +namespace ams::hw::arch::arm { + +#ifdef __BPMP__ + + namespace { + + constexpr inline uintptr_t AVP_CACHE = 0x50040000; + + ALWAYS_INLINE bool IsLargeBuffer(size_t size) { + /* From TRM: For very large physical buffers or when the full cache needs to be cleared, */ + /* software should simply loop over all lines in all ways and run the *_LINE command on each of them. */ + return size >= DataCacheSize / 4; + } + + ALWAYS_INLINE bool IsCacheEnabled() { + return reg::HasValue(AVP_CACHE + AVP_CACHE_CONFIG, AVP_CACHE_REG_BITS_ENUM(CONFIG_ENABLE_CACHE, TRUE)); + } + + void DoPhyCacheOperation(AVP_CACHE_MAINT_OPCODE op, uintptr_t addr) { + /* Clear maintenance done. */ + reg::Write(AVP_CACHE + AVP_CACHE_INT_CLEAR, AVP_CACHE_REG_BITS_ENUM(INT_CLEAR_MAINTENANCE_DONE, TRUE)); + + /* Write maintenance address. */ + reg::Write(AVP_CACHE + AVP_CACHE_MAINT_0, addr); + + /* Write maintenance request. */ + reg::Write(AVP_CACHE + AVP_CACHE_MAINT_2, AVP_CACHE_REG_BITS_VALUE(MAINT_2_WAY_BITMAP, 0x0), + AVP_CACHE_REG_BITS_VALUE(MAINT_2_OPCODE, op)); + + /* Wait for maintenance to be done. */ + while (!reg::HasValue(AVP_CACHE + AVP_CACHE_INT_RAW_EVENT, AVP_CACHE_REG_BITS_ENUM(INT_RAW_EVENT_MAINTENANCE_DONE, TRUE))) { + /* ... */ + } + + /* Clear raw event. */ + reg::Write(AVP_CACHE + AVP_CACHE_INT_CLEAR, reg::Read(AVP_CACHE + AVP_CACHE_INT_RAW_EVENT)); + } + + void DoEntireCacheOperation(AVP_CACHE_MAINT_OPCODE op) { + /* Clear maintenance done. */ + reg::Write(AVP_CACHE + AVP_CACHE_INT_CLEAR, AVP_CACHE_REG_BITS_ENUM(INT_CLEAR_MAINTENANCE_DONE, TRUE)); + + /* Write maintenance request. */ + reg::Write(AVP_CACHE + AVP_CACHE_MAINT_2, AVP_CACHE_REG_BITS_VALUE(MAINT_2_WAY_BITMAP, 0xF), + AVP_CACHE_REG_BITS_VALUE(MAINT_2_OPCODE, op)); + + /* Wait for maintenance to be done. */ + while (!reg::HasValue(AVP_CACHE + AVP_CACHE_INT_RAW_EVENT, AVP_CACHE_REG_BITS_ENUM(INT_RAW_EVENT_MAINTENANCE_DONE, TRUE))) { + /* ... */ + } + + /* Clear raw event. */ + reg::Write(AVP_CACHE + AVP_CACHE_INT_CLEAR, reg::Read(AVP_CACHE + AVP_CACHE_INT_RAW_EVENT)); + } + + } + + #define REQUIRE_CACHE_ENABLED() \ + do { \ + if (AMS_UNLIKELY(!IsCacheEnabled())) { \ + return; \ + } \ + } while (false) \ + + + #define REQUIRE_CACHE_DISABLED() \ + do { \ + if (AMS_UNLIKELY(IsCacheEnabled())) { \ + return; \ + } \ + } while (false) \ + + void InitializeDataCache() { + REQUIRE_CACHE_DISABLED(); + + /* Issue init mmu command. */ + reg::Write(AVP_CACHE + AVP_CACHE_MMU_CMD, AVP_CACHE_REG_BITS_ENUM(MMU_CMD_CMD, INIT)); + + /* Set mmu fallback entry as RWX, uncached. */ + reg::Write(AVP_CACHE + AVP_CACHE_MMU_FALLBACK_ENTRY, AVP_CACHE_REG_BITS_ENUM(MMU_FALLBACK_ENTRY_WR_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(MMU_FALLBACK_ENTRY_RD_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(MMU_FALLBACK_ENTRY_EXE_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(MMU_FALLBACK_ENTRY_CACHED, DISABLE)); + + /* Set mmu cfg. */ + reg::Write(AVP_CACHE + AVP_CACHE_MMU_CFG, AVP_CACHE_REG_BITS_ENUM(MMU_CFG_CLR_ABORT, NOP), + AVP_CACHE_REG_BITS_ENUM(MMU_CFG_ABORT_MODE, STORE_LAST), + AVP_CACHE_REG_BITS_ENUM(MMU_CFG_SEQ_CHECK_ALL_ENTRIES, DISABLE), + AVP_CACHE_REG_BITS_ENUM(MMU_CFG_TLB_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(MMU_CFG_SEQ_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(MMU_CFG_BLOCK_MAIN_ENTRY_WR, DISABLE)); + + /* Initialize mmu entries. */ + { + /* Clear shadow copy mask. */ + reg::Write(AVP_CACHE + AVP_CACHE_MMU_SHADOW_COPY_MASK_0, 0); + + /* Add DRAM as index 0, RWX/Cached. */ + { + reg::Write(AVP_CACHE + AVP_CACHE_MMU_SHADOW_ENTRY_0_MIN_ADDR, 0x80000000); + reg::Write(AVP_CACHE + AVP_CACHE_MMU_SHADOW_ENTRY_0_MAX_ADDR, util::AlignDown(0xFFFFFFFF, DataCacheLineSize)); + + reg::Write(AVP_CACHE + AVP_CACHE_MMU_SHADOW_ENTRY_0_CFG, AVP_CACHE_REG_BITS_ENUM(SHADOW_ENTRY_CFG_WR_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(SHADOW_ENTRY_CFG_RD_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(SHADOW_ENTRY_CFG_EXE_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(SHADOW_ENTRY_CFG_CACHED, ENABLE)); + } + + /* Add IRAM as index 1, RWX/Cached. */ + { + reg::Write(AVP_CACHE + AVP_CACHE_MMU_SHADOW_ENTRY_1_MIN_ADDR, 0x40000000); + reg::Write(AVP_CACHE + AVP_CACHE_MMU_SHADOW_ENTRY_1_MAX_ADDR, util::AlignDown(0x4003FFFF, DataCacheLineSize)); + + reg::Write(AVP_CACHE + AVP_CACHE_MMU_SHADOW_ENTRY_1_CFG, AVP_CACHE_REG_BITS_ENUM(SHADOW_ENTRY_CFG_WR_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(SHADOW_ENTRY_CFG_RD_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(SHADOW_ENTRY_CFG_EXE_ENA, ENABLE), + AVP_CACHE_REG_BITS_ENUM(SHADOW_ENTRY_CFG_CACHED, ENABLE)); + } + + /* Set index 0/1 in shadow copy mask. */ + reg::Write(AVP_CACHE + AVP_CACHE_MMU_SHADOW_COPY_MASK_0, 0b11); + + /* Issue copy shadow mmu command. */ + reg::Write(AVP_CACHE + AVP_CACHE_MMU_CMD, AVP_CACHE_REG_BITS_ENUM(MMU_CMD_CMD, COPY_SHADOW)); + } + + /* Invalidate entire cache. */ + DoEntireCacheOperation(AVP_CACHE_MAINT_OPCODE_INVALID_WAY); + + /* Enable the cache. */ + reg::Write(AVP_CACHE + AVP_CACHE_CONFIG, AVP_CACHE_REG_BITS_ENUM(CONFIG_ENABLE_CACHE, TRUE), + AVP_CACHE_REG_BITS_ENUM(CONFIG_FORCE_WRITE_THROUGH, TRUE), + AVP_CACHE_REG_BITS_ENUM(CONFIG_MMU_TAG_MODE, PARALLEL), + AVP_CACHE_REG_BITS_ENUM(CONFIG_TAG_CHECK_ABORT_ON_ERROR, TRUE)); + + /* Invalidate entire cache again (WAR for hardware bug). */ + DoEntireCacheOperation(AVP_CACHE_MAINT_OPCODE_INVALID_WAY); + } + + void FinalizeDataCache() { + REQUIRE_CACHE_ENABLED(); + + /* Flush entire data cache. */ + FlushEntireDataCache(); + + /* Disable cache. */ + reg::Write(AVP_CACHE + AVP_CACHE_CONFIG, AVP_CACHE_REG_BITS_ENUM(CONFIG_ENABLE_CACHE, FALSE)); + } + + void InvalidateEntireDataCache() { + REQUIRE_CACHE_ENABLED(); + + DoEntireCacheOperation(AVP_CACHE_MAINT_OPCODE_INVALID_WAY); + } + + void StoreEntireDataCache() { + REQUIRE_CACHE_ENABLED(); + + DoEntireCacheOperation(AVP_CACHE_MAINT_OPCODE_CLEAN_WAY); + } + + void FlushEntireDataCache() { + REQUIRE_CACHE_ENABLED(); + + DoEntireCacheOperation(AVP_CACHE_MAINT_OPCODE_CLEAN_INVALID_WAY); + } + + void InvalidateDataCacheLine(void *ptr) { + /* NOTE: Don't check cache enabled as an optimization, as only direct caller will be InvalidateDataCache(). */ + /* REQUIRE_CACHE_ENABLED(); */ + + DoPhyCacheOperation(AVP_CACHE_MAINT_OPCODE_INVALID_PHY, reinterpret_cast(ptr)); + } + + void StoreDataCacheLine(void *ptr) { + /* NOTE: Don't check cache enabled as an optimization, as only direct caller will be FlushDataCache(). */ + /* REQUIRE_CACHE_ENABLED(); */ + + DoPhyCacheOperation(AVP_CACHE_MAINT_OPCODE_CLEAN_PHY, reinterpret_cast(ptr)); + } + + void FlushDataCacheLine(void *ptr) { + /* NOTE: Don't check cache enabled as an optimization, as only direct caller will be FlushDataCache(). */ + /* REQUIRE_CACHE_ENABLED(); */ + + DoPhyCacheOperation(AVP_CACHE_MAINT_OPCODE_CLEAN_INVALID_PHY, reinterpret_cast(ptr)); + } + + void InvalidateDataCache(void *ptr, size_t size) { + REQUIRE_CACHE_ENABLED(); + + if (IsLargeBuffer(size)) { + InvalidateEntireDataCache(); + } else { + const uintptr_t start = reinterpret_cast(ptr); + const uintptr_t end = util::AlignUp(start + size, hw::DataCacheLineSize); + + for (uintptr_t cur = start; cur < end; cur += hw::DataCacheLineSize) { + InvalidateDataCacheLine(reinterpret_cast(cur)); + } + } + } + + void StoreDataCache(const void *ptr, size_t size) { + REQUIRE_CACHE_ENABLED(); + + if (IsLargeBuffer(size)) { + StoreEntireDataCache(); + } else { + const uintptr_t start = reinterpret_cast(ptr); + const uintptr_t end = util::AlignUp(start + size, hw::DataCacheLineSize); + + for (uintptr_t cur = start; cur < end; cur += hw::DataCacheLineSize) { + StoreDataCacheLine(reinterpret_cast(cur)); + } + } + } + + void FlushDataCache(const void *ptr, size_t size) { + REQUIRE_CACHE_ENABLED(); + + if (IsLargeBuffer(size)) { + FlushEntireDataCache(); + } else { + const uintptr_t start = reinterpret_cast(ptr); + const uintptr_t end = util::AlignUp(start + size, hw::DataCacheLineSize); + + for (uintptr_t cur = start; cur < end; cur += hw::DataCacheLineSize) { + FlushDataCacheLine(reinterpret_cast(cur)); + } + } + } +#endif + +} \ No newline at end of file diff --git a/libraries/libvapours/source/dd/impl/dd_cache_impl.os.horizon.hpp b/libraries/libvapours/source/dd/impl/dd_cache_impl.os.horizon.hpp index 53ccf6473..2f5ab23e9 100644 --- a/libraries/libvapours/source/dd/impl/dd_cache_impl.os.horizon.hpp +++ b/libraries/libvapours/source/dd/impl/dd_cache_impl.os.horizon.hpp @@ -46,8 +46,7 @@ namespace ams::dd::impl { const auto result = svc::StoreProcessDataCache(svc::PseudoHandle::CurrentProcess, reinterpret_cast(addr), size); R_ASSERT(result); #elif defined(ATMOSPHERE_IS_EXOSPHERE) && defined(__BPMP__) - /* Do nothing. */ - AMS_UNUSED(addr, size); + return hw::StoreDataCache(addr, size); #else #error "Unknown execution context for ams::dd::impl::StoreDataCacheImpl" #endif @@ -78,8 +77,7 @@ namespace ams::dd::impl { const auto result = svc::FlushProcessDataCache(svc::PseudoHandle::CurrentProcess, reinterpret_cast(addr), size); R_ASSERT(result); #elif defined(ATMOSPHERE_IS_EXOSPHERE) && defined(__BPMP__) - /* Do nothing. */ - AMS_UNUSED(addr, size); + return hw::FlushDataCache(addr, size); #else #error "Unknown execution context for ams::dd::impl::FlushDataCacheImpl" #endif @@ -87,8 +85,12 @@ namespace ams::dd::impl { } void InvalidateDataCacheImpl(void *addr, size_t size) { + #if defined(ATMOSPHERE_IS_EXOSPHERE) && defined(__BPMP__) + return hw::InvalidateDataCache(addr, size); + #else /* Just perform a flush, which is clean + invalidate. */ return FlushDataCacheImpl(addr, size); + #endif } }