diff --git a/code_templates/nxdt_rw_poc.c b/code_templates/nxdt_rw_poc.c index 5a01886..b38e161 100644 --- a/code_templates/nxdt_rw_poc.c +++ b/code_templates/nxdt_rw_poc.c @@ -2810,13 +2810,13 @@ static bool saveGameCardUid(void *userdata) goto end; } - crc = crc32Calculate(gc_security_information.specific_data.card_uid, sizeof(gc_security_information.specific_data.card_uid)); + crc = crc32Calculate(&(gc_security_information.specific_data.card_uid), sizeof(gc_security_information.specific_data.card_uid)); snprintf(path, MAX_ELEMENTS(path), " (Card UID) (%08X).bin", crc); filename = generateOutputGameCardFileName("Gamecard", path, true); if (!filename) goto end; - if (!saveFileData(filename, gc_security_information.specific_data.card_uid, sizeof(gc_security_information.specific_data.card_uid))) goto end; + if (!saveFileData(filename, &(gc_security_information.specific_data.card_uid), sizeof(gc_security_information.specific_data.card_uid))) goto end; consolePrint("successfully saved gamecard uid as \"%s\"\n", filename); success = true; diff --git a/include/core/fs_ext.h b/include/core/fs_ext.h index c46b3ab..a63ef0a 100644 --- a/include/core/fs_ext.h +++ b/include/core/fs_ext.h @@ -83,6 +83,15 @@ typedef struct { NXDT_ASSERT(FsCardId1, 0x4); +typedef enum { + FsCardId2CardSecurityNumber_Number0 = 0, + FsCardId2CardSecurityNumber_Number1 = 1, + FsCardId2CardSecurityNumber_Number2 = 2, + FsCardId2CardSecurityNumber_Number3 = 3, + FsCardId2CardSecurityNumber_Number4 = 4, + FsCardId2CardSecurityNumber_Count = 5 ///< Total values supported by this enum. +} FsCardId2CardSecurityNumber; + typedef enum { FsCardId2CardType_Rom = 0, FsCardId2CardType_WritableDevT1 = 1, @@ -93,9 +102,9 @@ typedef enum { } FsCardId2CardType; typedef struct { - u8 sel_t1_key; ///< Matches sel_t1_key value from GameCardHeader (usually 0x02). - u8 card_type; ///< FsCardId2CardType. - u8 reserved[0x2]; ///< Usually filled with zeroes. + u8 card_security_number; ///< FsCardId2CardSecurityNumber. + u8 card_type; ///< FsCardId2CardType. + u8 reserved[0x2]; ///< Usually filled with zeroes. } FsCardId2; NXDT_ASSERT(FsCardId2, 0x4); diff --git a/include/core/gamecard.h b/include/core/gamecard.h index 7befe18..2df555e 100644 --- a/include/core/gamecard.h +++ b/include/core/gamecard.h @@ -94,16 +94,44 @@ typedef struct { NXDT_ASSERT(GameCardKeyArea, 0x1000); +typedef enum { + GameCardUidMakerCode_MegaChips = 0, + GameCardUidMakerCode_Lapis = 1, + GameCardUidMakerCode_Unknown = 2, + GameCardUidMakerCode_Count = 3 ///< Total values supported by this enum. +} GameCardUidMakerCode; + +typedef enum { + GameCardUidCardType_Rom = 0, + GameCardUidCardType_WritableDev = 0xFE, + GameCardUidCardType_WritableProd = 0xFF, + GameCardUidCardType_Count = 3 ///< Total values supported by this enum. +} GameCardUidCardType; + +typedef struct { + u8 maker_code; ///< GameCardUidMakerCode. + u8 version; + u8 card_type; ///< GameCardUidCardType. + u8 unique_data[0x9]; + u32 random; + u8 platform_flag; + u8 reserved[0xB]; + FsCardId1 card_id_1; ///< Are we sure about this? + u8 mac[0x20]; +} GameCardUid; + +NXDT_ASSERT(GameCardUid, 0x40); + /// Plaintext area. Dumped from FS program memory. /// Overall structure may change with each new LAFW version. typedef struct { - u32 asic_security_mode; ///< Determines how the Lotus ASIC initialised the gamecard security mode. Usually 0xFFFFFFF9. - u32 asic_status; ///< Bitmask of the internal gamecard interface status. Usually 0x20000000. + u32 asic_security_mode; ///< Determines how the Lotus ASIC initialised the gamecard security mode. Usually 0xFFFFFFF9. + u32 asic_status; ///< Bitmask of the internal gamecard interface status. Usually 0x20000000. FsCardId1 card_id1; FsCardId2 card_id2; - u8 card_uid[0x40]; + GameCardUid card_uid; u8 reserved[0x190]; - u8 asic_session_hash[0x20]; ///< Changes with each gamecard (re)insertion. + u8 mac[0x20]; ///< Changes with each gamecard (re)insertion. } GameCardSpecificData; NXDT_ASSERT(GameCardSpecificData, 0x200); diff --git a/include/core/lz4.h b/include/core/lz4.h index 491c608..80e3e5c 100644 --- a/include/core/lz4.h +++ b/include/core/lz4.h @@ -1,7 +1,7 @@ /* * LZ4 - Fast LZ compression algorithm * Header File - * Copyright (C) 2011-2020, Yann Collet. + * Copyright (C) 2011-2023, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -129,8 +129,8 @@ extern "C" { /*------ Version ------*/ #define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ -#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */ -#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */ +#define LZ4_VERSION_MINOR 10 /* for new (non-breaking) interface capabilities */ +#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */ #define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) @@ -144,23 +144,25 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; /*-************************************ -* Tuning parameter +* Tuning memory usage **************************************/ -#define LZ4_MEMORY_USAGE_MIN 10 -#define LZ4_MEMORY_USAGE_DEFAULT 14 -#define LZ4_MEMORY_USAGE_MAX 20 - /*! * LZ4_MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; ) - * Increasing memory usage improves compression ratio, at the cost of speed. + * Can be selected at compile time, by setting LZ4_MEMORY_USAGE. + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB) + * Increasing memory usage improves compression ratio, generally at the cost of speed. * Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality. - * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache + * Default value is 14, for 16KB, which nicely fits into most L1 caches. */ #ifndef LZ4_MEMORY_USAGE # define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT #endif +/* These are absolute limits, they should not be changed by users */ +#define LZ4_MEMORY_USAGE_MIN 10 +#define LZ4_MEMORY_USAGE_DEFAULT 14 +#define LZ4_MEMORY_USAGE_MAX 20 + #if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN) # error "LZ4_MEMORY_USAGE is too small !" #endif @@ -189,8 +191,9 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); /*! LZ4_decompress_safe() : - * compressedSize : is the exact complete size of the compressed block. - * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size. + * @compressedSize : is the exact complete size of the compressed block. + * @dstCapacity : is the size of destination buffer (which must be already allocated), + * presumed an upper bound of decompressed size. * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) * If destination buffer is not large enough, decoding will stop and output an error code (negative value). * If the source stream is detected malformed, the function will stop decoding and return a negative result. @@ -242,20 +245,20 @@ LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int d LZ4LIB_API int LZ4_sizeofState(void); LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); - /*! LZ4_compress_destSize() : * Reverse the logic : compresses as much data as possible from 'src' buffer - * into already allocated buffer 'dst', of size >= 'targetDestSize'. + * into already allocated buffer 'dst', of size >= 'dstCapacity'. * This function either compresses the entire 'src' content into 'dst' if it's large enough, * or fill 'dst' buffer completely with as much data as possible from 'src'. * note: acceleration parameter is fixed to "default". * - * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. + * *srcSizePtr : in+out parameter. Initially contains size of input. + * Will be modified to indicate how many bytes where read from 'src' to fill 'dst'. * New value is necessarily <= input value. - * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize) + * @return : Nb bytes written into 'dst' (necessarily <= dstCapacity) * or 0 if compression fails. * - * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+): + * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+): * the produced compressed content could, in specific circumstances, * require to be decompressed into a destination buffer larger * by at least 1 byte than the content to decompress. @@ -266,8 +269,7 @@ LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* d * a dstCapacity which is > decompressedSize, by at least 1 byte. * See https://github.com/lz4/lz4/issues/859 for details */ -LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize); - +LZ4LIB_API int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize); /*! LZ4_decompress_safe_partial() : * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', @@ -311,7 +313,7 @@ LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcS ***********************************************/ typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ -/** +/*! Note about RC_INVOKED - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio). @@ -361,13 +363,58 @@ LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. * The same dictionary will have to be loaded on decompression side for successful decoding. * Dictionary are useful for better compression of small data (KB range). - * While LZ4 accept any input as dictionary, - * results are generally better when using Zstandard's Dictionary Builder. + * While LZ4 itself accepts any input as dictionary, dictionary efficiency is also a topic. + * When in doubt, employ the Zstandard's Dictionary Builder. * Loading a size of 0 is allowed, and is the same as reset. - * @return : loaded dictionary size, in bytes (necessarily <= 64 KB) + * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded) */ LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); +/*! LZ4_loadDictSlow() : v1.10.0+ + * Same as LZ4_loadDict(), + * but uses a bit more cpu to reference the dictionary content more thoroughly. + * This is expected to slightly improve compression ratio. + * The extra-cpu cost is likely worth it if the dictionary is re-used across multiple sessions. + * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded) + */ +LZ4LIB_API int LZ4_loadDictSlow(LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); + +/*! LZ4_attach_dictionary() : stable since v1.10.0 + * + * This allows efficient re-use of a static dictionary multiple times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a + * working LZ4_stream_t, this function introduces a no-copy setup mechanism, + * in which the working stream references @dictionaryStream in-place. + * + * Several assumptions are made about the state of @dictionaryStream. + * Currently, only states which have been prepared by LZ4_loadDict() or + * LZ4_loadDictSlow() should be expected to work. + * + * Alternatively, the provided @dictionaryStream may be NULL, + * in which case any existing dictionary stream is unset. + * + * If a dictionary is provided, it replaces any pre-existing stream history. + * The dictionary contents are the only history that can be referenced and + * logically immediately precede the data compressed in the first subsequent + * compression call. + * + * The dictionary will only remain attached to the working stream through the + * first compression call, at the end of which it is cleared. + * @dictionaryStream stream (and source buffer) must remain in-place / accessible / unchanged + * through the completion of the compression session. + * + * Note: there is no equivalent LZ4_attach_*() method on the decompression side + * because there is no initialization cost, hence no need to share the cost across multiple sessions. + * To decompress LZ4 blocks using dictionary, attached or not, + * just employ the regular LZ4_setStreamDecode() for streaming, + * or the stateless LZ4_decompress_safe_usingDict() for one-shot decompression. + */ +LZ4LIB_API void +LZ4_attach_dictionary(LZ4_stream_t* workingStream, + const LZ4_stream_t* dictionaryStream); + /*! LZ4_compress_fast_continue() : * Compress 'src' content using data from previously compressed blocks, for better compression ratio. * 'dst' buffer must be already allocated. @@ -443,11 +490,24 @@ LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); #define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ -/*! LZ4_decompress_*_continue() : - * These decoding functions allow decompression of consecutive blocks in "streaming" mode. - * A block is an unsplittable entity, it must be presented entirely to a decompression function. - * Decompression functions only accepts one block at a time. - * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded. +/*! LZ4_decompress_safe_continue() : + * This decoding function allows decompression of consecutive blocks in "streaming" mode. + * The difference with the usual independent blocks is that + * new blocks are allowed to find references into former blocks. + * A block is an unsplittable entity, and must be presented entirely to the decompression function. + * LZ4_decompress_safe_continue() only accepts one block at a time. + * It's modeled after `LZ4_decompress_safe()` and behaves similarly. + * + * @LZ4_streamDecode : decompression state, tracking the position in memory of past data + * @compressedSize : exact complete size of one compressed block. + * @dstCapacity : size of destination buffer (which must be already allocated), + * must be an upper bound of decompressed size. + * @return : number of bytes decompressed into destination buffer (necessarily <= dstCapacity) + * If destination buffer is not large enough, decoding will stop and output an error code (negative value). + * If the source stream is detected malformed, the function will stop decoding and return a negative result. + * + * The last 64KB of previously decoded data *must* remain available and unmodified + * at the memory position where they were previously decoded. * If less than 64KB of data has been decoded, all the data must be present. * * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : @@ -474,10 +534,10 @@ LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, int srcSize, int dstCapacity); -/*! LZ4_decompress_*_usingDict() : - * These decoding functions work the same as - * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() - * They are stand-alone, and don't need an LZ4_streamDecode_t structure. +/*! LZ4_decompress_safe_usingDict() : + * Works the same as + * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_safe_continue() + * However, it's stateless: it doesn't need any LZ4_streamDecode_t state. * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. * Performance tip : Decompression speed can be substantially increased * when dst == dictStart + dictSize. @@ -487,6 +547,12 @@ LZ4_decompress_safe_usingDict(const char* src, char* dst, int srcSize, int dstCapacity, const char* dictStart, int dictSize); +/*! LZ4_decompress_safe_partial_usingDict() : + * Behaves the same as LZ4_decompress_safe_partial() + * with the added ability to specify a memory segment for past data. + * Performance tip : Decompression speed can be substantially increased + * when dst == dictStart + dictSize. + */ LZ4LIB_API int LZ4_decompress_safe_partial_usingDict(const char* src, char* dst, int compressedSize, @@ -526,9 +592,9 @@ LZ4_decompress_safe_partial_usingDict(const char* src, char* dst, #define LZ4_STATIC_3504398509 #ifdef LZ4_PUBLISH_STATIC_FUNCTIONS -#define LZ4LIB_STATIC_API LZ4LIB_API +# define LZ4LIB_STATIC_API LZ4LIB_API #else -#define LZ4LIB_STATIC_API +# define LZ4LIB_STATIC_API #endif @@ -544,36 +610,11 @@ LZ4_decompress_safe_partial_usingDict(const char* src, char* dst, */ LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); -/*! LZ4_attach_dictionary() : - * This is an experimental API that allows - * efficient use of a static dictionary many times. - * - * Rather than re-loading the dictionary buffer into a working context before - * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a - * working LZ4_stream_t, this function introduces a no-copy setup mechanism, - * in which the working stream references the dictionary stream in-place. - * - * Several assumptions are made about the state of the dictionary stream. - * Currently, only streams which have been prepared by LZ4_loadDict() should - * be expected to work. - * - * Alternatively, the provided dictionaryStream may be NULL, - * in which case any existing dictionary stream is unset. - * - * If a dictionary is provided, it replaces any pre-existing stream history. - * The dictionary contents are the only history that can be referenced and - * logically immediately precede the data compressed in the first subsequent - * compression call. - * - * The dictionary will only remain attached to the working stream through the - * first compression call, at the end of which it is cleared. The dictionary - * stream (and source buffer) must remain in-place / accessible / unchanged - * through the completion of the first compression call on the stream. +/*! LZ4_compress_destSize_extState() : introduced in v1.10.0 + * Same as LZ4_compress_destSize(), but using an externally allocated state. + * Also: exposes @acceleration */ -LZ4LIB_STATIC_API void -LZ4_attach_dictionary(LZ4_stream_t* workingStream, - const LZ4_stream_t* dictionaryStream); - +int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration); /*! In-place compression and decompression * @@ -685,7 +726,7 @@ struct LZ4_stream_t_internal { /* Implicit padding to ensure structure is aligned */ }; -#define LZ4_STREAM_MINSIZE ((1UL << LZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */ +#define LZ4_STREAM_MINSIZE ((1UL << (LZ4_MEMORY_USAGE)) + 32) /* static size, for inter-version compatibility */ union LZ4_stream_u { char minStateSize[LZ4_STREAM_MINSIZE]; LZ4_stream_t_internal internal_donotuse; @@ -706,7 +747,7 @@ union LZ4_stream_u { * Note2: An LZ4_stream_t structure guarantees correct alignment and size. * Note3: Before v1.9.0, use LZ4_resetStream() instead **/ -LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size); +LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* stateBuffer, size_t size); /*! LZ4_streamDecode_t : @@ -818,11 +859,12 @@ LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4 * But they may happen if input data is invalid (error or intentional tampering). * As a consequence, use these functions in trusted environments with trusted data **only**. */ -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead") +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial() instead") LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead") +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider migrating towards LZ4_decompress_safe_continue() instead. " + "Note that the contract will change (requires block's compressed size, instead of decompressed size)") LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); -LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead") +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); /*! LZ4_resetStream() : diff --git a/include/core/npdm.h b/include/core/npdm.h index 14a601a..b27ddef 100644 --- a/include/core/npdm.h +++ b/include/core/npdm.h @@ -61,7 +61,8 @@ typedef struct { u8 process_address_space : 3; ///< NpdmProcessAddressSpace. u8 optimize_memory_allocation : 1; u8 disable_device_address_space_merge : 1; - u8 reserved : 2; + u8 enable_alias_region_extra_size : 1; + u8 reserved : 1; } NpdmMetaFlags; NXDT_ASSERT(NpdmMetaFlags, 0x1); diff --git a/include/core/nso.h b/include/core/nso.h index 4cfe12f..b715a3c 100644 --- a/include/core/nso.h +++ b/include/core/nso.h @@ -60,16 +60,17 @@ typedef struct { NXDT_ASSERT(NsoSectionInfo, 0x8); /// This is the start of every NSO. -/// This is always followed by a NsoModuleName block. +/// This can be optionally followed by the NSO module name. +/// If available, the 'module_name_size' member is greater than 1, and the 'module_name_offset' member will usually be set to 0x100 (the size of this header). typedef struct { u32 magic; ///< "NSO0". u32 version; ///< Always set to 0. u8 reserved_1[0x4]; u32 flags; ///< NsoFlags. NsoSegmentInfo text_segment_info; - u32 module_name_offset; ///< NsoModuleName block offset. + u32 module_name_offset; ///< NSO module name offset. NsoSegmentInfo rodata_segment_info; - u32 module_name_size; ///< NsoModuleName block size. + u32 module_name_size; ///< NSO module name size. NsoSegmentInfo data_segment_info; u32 bss_size; u8 module_id[0x20]; ///< Also known as build ID. @@ -87,19 +88,10 @@ typedef struct { NXDT_ASSERT(NsoHeader, 0x100); -/// Usually placed right after NsoHeader, but its actual offset may vary. -/// If the 'module_name_size' member from NsoHeader is greater than 1 and the 'name_length' element from NsoModuleName is greater than 0, 'name' will hold the module name. -typedef struct { - u8 name_length; - char name[]; -} NsoModuleName; - -NXDT_ASSERT(NsoModuleName, 0x1); - /// Placed at the very start of the decompressed .text segment. typedef struct { - u32 entry_point; - u32 mod_offset; ///< NsoModHeader block offset (relative to the start of this header). Almost always set to 0x8 (the size of this struct). + u32 version; ///< Usually set to 0 or a branch instruction (0x14000002). Set to 1 or 0x14000003 if a NsoNnSdkVersion block is available. + s32 mod_offset; ///< NsoModHeader block offset (relative to the start of this header). Almost always set to 0x8 (the size of this struct). } NsoModStart; NXDT_ASSERT(NsoModStart, 0x8); @@ -121,6 +113,16 @@ typedef struct { NXDT_ASSERT(NsoModHeader, 0x1C); +/// Only available in 17.0.0+ binaries. Holds the nnSdk version used to build this NRO. +/// This is usually placed right after the NsoModHeader block. +typedef struct { + u32 major; + u32 minor; + u32 micro; +} NsoNnSdkVersion; + +NXDT_ASSERT(NsoNnSdkVersion, 0xC); + /// Placed at the start of the decompressed .rodata segment + 0x4. /// If the 'name_length' element is greater than 0, 'name' will hold the module name. typedef struct { @@ -136,6 +138,7 @@ typedef struct { char *nso_filename; ///< Pointer to the NSO filename in the Program NCA FS section #0. NsoHeader nso_header; ///< NSO header. char *module_name; ///< Pointer to a dynamically allocated buffer that holds the NSO module name, if available. Otherwise, this is set to NULL. + NsoNnSdkVersion *nnsdk_version; ///< Pointer to a dynamically allocated buffer that holds the nnSdk version info, if available. Otherwise, this is set to NULL. char *module_info_name; ///< Pointer to a dynamically allocated buffer that holds the .rodata module info module name, if available. Otherwise, this is set to NULL. char *rodata_api_info_section; ///< Pointer to a dynamically allocated buffer that holds the .rodata API info section data, if available. Otherwise, this is set to NULL. ///< Middleware and GuidelineApi entries are retrieved from this section. @@ -155,6 +158,7 @@ NX_INLINE void nsoFreeContext(NsoContext *nso_ctx) { if (!nso_ctx) return; if (nso_ctx->module_name) free(nso_ctx->module_name); + if (nso_ctx->nnsdk_version) free(nso_ctx->nnsdk_version); if (nso_ctx->module_info_name) free(nso_ctx->module_info_name); if (nso_ctx->rodata_api_info_section) free(nso_ctx->rodata_api_info_section); if (nso_ctx->rodata_dynstr_section) free(nso_ctx->rodata_dynstr_section); diff --git a/source/core/lz4.c b/source/core/lz4.c index 35d7cc9..f07436a 100644 --- a/source/core/lz4.c +++ b/source/core/lz4.c @@ -1,6 +1,6 @@ /* LZ4 - Fast LZ compression algorithm - Copyright (C) 2011-2020, Yann Collet. + Copyright (C) 2011-2023, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) @@ -37,7 +37,8 @@ **************************************/ /* * LZ4_HEAPMODE : - * Select how default compression functions will allocate memory for their hash table, + * Select how stateless compression functions like `LZ4_compress_default()` + * allocate memory for their hash table, * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). */ #ifndef LZ4_HEAPMODE @@ -78,7 +79,7 @@ ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) # define LZ4_FORCE_MEMORY_ACCESS 2 -# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER) # define LZ4_FORCE_MEMORY_ACCESS 1 # endif #endif @@ -105,15 +106,13 @@ # define LZ4_SRC_INCLUDED 1 #endif -#ifndef LZ4_STATIC_LINKING_ONLY -#define LZ4_STATIC_LINKING_ONLY -#endif - #ifndef LZ4_DISABLE_DEPRECATE_WARNINGS -#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ +# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ #endif -#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */ +#ifndef LZ4_STATIC_LINKING_ONLY +# define LZ4_STATIC_LINKING_ONLY +#endif #include /* see also "memory routines" below */ @@ -125,14 +124,17 @@ # include /* only present in VS2005+ */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ # pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */ +# pragma warning(disable : 6239) /* disable: C6239: ( && ) always evaluates to the result of */ +# pragma warning(disable : 6240) /* disable: C6240: ( && ) always evaluates to the result of */ +# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */ #endif /* _MSC_VER */ #ifndef LZ4_FORCE_INLINE -# ifdef _MSC_VER /* Visual Studio */ +# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */ # define LZ4_FORCE_INLINE static __forceinline # else # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ +# if defined (__GNUC__) || defined (__clang__) # define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) # else # define LZ4_FORCE_INLINE static inline @@ -279,7 +281,7 @@ static const int LZ4_minLength = (MFLIMIT+1); static int g_debuglog_enable = 1; # define DEBUGLOG(l, ...) { \ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ - fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __FILE__ " %i: ", __LINE__); \ fprintf(stderr, __VA_ARGS__); \ fprintf(stderr, " \n"); \ } } @@ -364,6 +366,11 @@ static unsigned LZ4_isLittleEndian(void) return one.c[0]; } +#if defined(__GNUC__) || defined(__INTEL_COMPILER) +#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__)) +#elif defined(_MSC_VER) +#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop)) +#endif #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2) /* lie to the compiler about data alignment; use with caution */ @@ -379,14 +386,16 @@ static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ -typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) LZ4_unalign; +LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16; +LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32; +LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST; -static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign*)ptr)->u16; } -static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign*)ptr)->u32; } -static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalign*)ptr)->uArch; } +static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; } +static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; } +static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; } -static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign*)memPtr)->u16 = value; } -static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign*)memPtr)->u32 = value; } +static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; } +static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; } #else /* safe and portable access using memcpy() */ @@ -424,10 +433,22 @@ static U16 LZ4_readLE16(const void* memPtr) return LZ4_read16(memPtr); } else { const BYTE* p = (const BYTE*)memPtr; - return (U16)((U16)p[0] + (p[1]<<8)); + return (U16)((U16)p[0] | (p[1]<<8)); } } +#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT +static U32 LZ4_readLE32(const void* memPtr) +{ + if (LZ4_isLittleEndian()) { + return LZ4_read32(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24); + } +} +#endif + static void LZ4_writeLE16(void* memPtr, U16 value) { if (LZ4_isLittleEndian()) { @@ -509,7 +530,7 @@ LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd) /* LZ4_memcpy_using_offset() presumes : * - dstEnd >= dstPtr + MINMATCH - * - there is at least 8 bytes available to write after dstEnd */ + * - there is at least 12 bytes available to write after dstEnd */ LZ4_FORCE_INLINE void LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) { @@ -524,12 +545,12 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si case 2: LZ4_memcpy(v, srcPtr, 2); LZ4_memcpy(&v[2], srcPtr, 2); -#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */ +#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */ # pragma warning(push) # pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */ #endif LZ4_memcpy(&v[4], v, 4); -#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */ +#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */ # pragma warning(pop) #endif break; @@ -776,7 +797,12 @@ LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType) LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) { if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); + +#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT + return LZ4_hash4(LZ4_readLE32(p), tableType); +#else return LZ4_hash4(LZ4_read32(p), tableType); +#endif } LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType) @@ -803,23 +829,19 @@ LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableT } } +/* LZ4_putPosition*() : only used in byPtr mode */ LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, - void* tableBase, tableType_t const tableType, - const BYTE* srcBase) + void* tableBase, tableType_t const tableType) { - switch (tableType) - { - case clearedTable: { /* illegal! */ assert(0); return; } - case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; } - case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } - case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } - } + const BYTE** const hashTable = (const BYTE**)tableBase; + assert(tableType == byPtr); (void)tableType; + hashTable[h] = p; } -LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType) { U32 const h = LZ4_hashPosition(p, tableType); - LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); + LZ4_putPositionOnHash(p, h, tableBase, tableType); } /* LZ4_getIndexOnHash() : @@ -844,20 +866,18 @@ LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_ assert(0); return 0; /* forbidden case */ } -static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase) +static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType) { - if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } - if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; } - { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ + assert(tableType == byPtr); (void)tableType; + { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } } LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, - const void* tableBase, tableType_t tableType, - const BYTE* srcBase) + const void* tableBase, tableType_t tableType) { U32 const h = LZ4_hashPosition(p, tableType); - return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); + return LZ4_getPositionOnHash(h, tableBase, tableType); } LZ4_FORCE_INLINE void @@ -901,9 +921,9 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx, cctx->dictSize = 0; } -/** LZ4_compress_generic() : +/** LZ4_compress_generic_validated() : * inlined, to ensure branches are decided at compilation time. - * Presumed already validated at this stage: + * The following conditions are presumed already validated: * - source != NULL * - inputSize > 0 */ @@ -921,10 +941,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( const int acceleration) { int result; - const BYTE* ip = (const BYTE*) source; + const BYTE* ip = (const BYTE*)source; U32 const startIndex = cctx->currentOffset; - const BYTE* base = (const BYTE*) source - startIndex; + const BYTE* base = (const BYTE*)source - startIndex; const BYTE* lowLimit; const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; @@ -932,7 +952,8 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; const U32 dictSize = dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; - const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */ + const U32 dictDelta = + (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */ int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ @@ -957,11 +978,11 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType); assert(ip != NULL); + if (tableType == byU16) assert(inputSize=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */ - if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */ assert(acceleration >= 1); lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); @@ -981,7 +1002,12 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( if (inputSizehashTable, tableType, base); + { U32 const h = LZ4_hashPosition(ip, tableType); + if (tableType == byPtr) { + LZ4_putPositionOnHash(ip, h, cctx->hashTable, byPtr); + } else { + LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType); + } } ip++; forwardH = LZ4_hashPosition(ip, tableType); /* Main Loop */ @@ -1004,9 +1030,9 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; assert(ip < mflimitPlusOne); - match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); + match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType); forwardH = LZ4_hashPosition(forwardIp, tableType); - LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType); } while ( (match+LZ4_DISTANCE_MAX < ip) || (LZ4_read32(match) != LZ4_read32(ip)) ); @@ -1077,7 +1103,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( /* Catch up */ filledIp = ip; - while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } + assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */ + if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) { + do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1]))); + } /* Encode Literals */ { unsigned const litLength = (unsigned)(ip - anchor); @@ -1092,7 +1121,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated( goto _last_literals; } if (litLength >= RUN_MASK) { - int len = (int)(litLength - RUN_MASK); + unsigned len = litLength - RUN_MASK; *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; *op++ = (BYTE)len; @@ -1204,13 +1233,19 @@ _next_match: if (ip >= mflimitPlusOne) break; /* Fill table */ - LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); + { U32 const h = LZ4_hashPosition(ip-2, tableType); + if (tableType == byPtr) { + LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr); + } else { + U32 const idx = (U32)((ip-2) - base); + LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType); + } } /* Test next position */ if (tableType == byPtr) { - match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); - LZ4_putPosition(ip, cctx->hashTable, tableType, base); + match = LZ4_getPosition(ip, cctx->hashTable, tableType); + LZ4_putPosition(ip, cctx->hashTable, tableType); if ( (match+LZ4_DISTANCE_MAX >= ip) && (LZ4_read32(match) == LZ4_read32(ip)) ) { token=op++; *token=0; goto _next_match; } @@ -1224,6 +1259,7 @@ _next_match: if (dictDirective == usingDictCtx) { if (matchIndex < startIndex) { /* there was no match, try the dictionary */ + assert(tableType == byU32); matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); match = dictBase + matchIndex; lowLimit = dictionary; /* required for match length counter */ @@ -1377,9 +1413,10 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int */ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) { - LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; + LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse; if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT; if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX; + assert(ctx != NULL); if (dstCapacity >= LZ4_compressBound(srcSize)) { if (srcSize < LZ4_64Klimit) { @@ -1413,17 +1450,17 @@ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst } -int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration) { int result; #if (LZ4_HEAPMODE) - LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ if (ctxPtr == NULL) return 0; #else LZ4_stream_t ctx; LZ4_stream_t* const ctxPtr = &ctx; #endif - result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); + result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration); #if (LZ4_HEAPMODE) FREEMEM(ctxPtr); @@ -1432,43 +1469,51 @@ int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutp } -int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize) +int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity) { - return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1); + return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1); } /* Note!: This function leaves the stream in an unclean/broken state! * It is not safe to subsequently use the same state with a _fastReset() or * _continue() call without resetting it. */ -static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize) +static int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration) { void* const s = LZ4_initStream(state, sizeof (*state)); assert(s != NULL); (void)s; if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ - return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); + return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration); } else { if (*srcSizePtr < LZ4_64Klimit) { - return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1); + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration); } else { tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; - return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1); + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration); } } } +int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration) +{ + int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration); + /* clean the state on exit */ + LZ4_initStream(state, sizeof (LZ4_stream_t)); + return r; +} + int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) { #if (LZ4_HEAPMODE) - LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ if (ctx == NULL) return 0; #else LZ4_stream_t ctxBody; - LZ4_stream_t* ctx = &ctxBody; + LZ4_stream_t* const ctx = &ctxBody; #endif - int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); + int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1); #if (LZ4_HEAPMODE) FREEMEM(ctx); @@ -1537,14 +1582,17 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream) #endif +typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e; #define HASH_UNIT sizeof(reg_t) -int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +int LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict, + const char* dictionary, int dictSize, + LoadDict_mode_e _ld) { - LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; + LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; const tableType_t tableType = byU32; const BYTE* p = (const BYTE*)dictionary; const BYTE* const dictEnd = p + dictSize; - const BYTE* base; + U32 idx32; DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict); @@ -1567,19 +1615,46 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) } if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; - base = dictEnd - dict->currentOffset; dict->dictionary = p; dict->dictSize = (U32)(dictEnd - p); dict->tableType = (U32)tableType; + idx32 = dict->currentOffset - dict->dictSize; while (p <= dictEnd-HASH_UNIT) { - LZ4_putPosition(p, dict->hashTable, tableType, base); - p+=3; + U32 const h = LZ4_hashPosition(p, tableType); + /* Note: overwriting => favors positions end of dictionary */ + LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType); + p+=3; idx32+=3; + } + + if (_ld == _ld_slow) { + /* Fill hash table with additional references, to improve compression capability */ + p = dict->dictionary; + idx32 = dict->currentOffset - dict->dictSize; + while (p <= dictEnd-HASH_UNIT) { + U32 const h = LZ4_hashPosition(p, tableType); + U32 const limit = dict->currentOffset - 64 KB; + if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) { + /* Note: not overwriting => favors positions beginning of dictionary */ + LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType); + } + p++; idx32++; + } } return (int)dict->dictSize; } +int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast); +} + +int LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow); +} + void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) { const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL : @@ -1711,7 +1786,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, /* Hidden debug function, to force-test external dictionary mode */ int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) { - LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; + LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse; int result; LZ4_renormDictT(streamPtr, srcSize); @@ -1774,7 +1849,7 @@ typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; * does not know end of input * presumes input is well formed * note : will consume at least one byte */ -size_t read_long_length_no_check(const BYTE** pp) +static size_t read_long_length_no_check(const BYTE** pp) { size_t b, l = 0; do { b = **pp; (*pp)++; l += b; } while (b==255); @@ -1911,6 +1986,17 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit, if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */ return rvl_error; } + s = **ip; + (*ip)++; + length += s; + if (unlikely((*ip) > ilimit)) { /* read limit reached */ + return rvl_error; + } + /* accumulator overflow detection (32-bit mode only) */ + if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { + return rvl_error; + } + if (likely(s != 255)) return length; do { s = **ip; (*ip)++; @@ -1919,10 +2005,10 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit, return rvl_error; } /* accumulator overflow detection (32-bit mode only) */ - if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { + if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) { return rvl_error; } - } while (s==255); + } while (s == 255); return length; } @@ -1988,63 +2074,73 @@ LZ4_decompress_generic( * note : fast loop may show a regression for some client arm chips. */ #if LZ4_FAST_DEC_LOOP if ((oend - op) < FASTLOOP_SAFE_DISTANCE) { - DEBUGLOG(6, "skip fast decode loop"); + DEBUGLOG(6, "move to safe decode loop"); goto safe_decode; } /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */ + DEBUGLOG(6, "using fast decode loop"); while (1) { /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */ assert(oend - op >= FASTLOOP_SAFE_DISTANCE); assert(ip < iend); token = *ip++; length = token >> ML_BITS; /* literal length */ + DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); /* decode literal length */ if (length == RUN_MASK) { size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1); - if (addl == rvl_error) { goto _output_error; } + if (addl == rvl_error) { + DEBUGLOG(6, "error reading long literal length"); + goto _output_error; + } length += addl; if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ /* copy literals */ - cpy = op+length; LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); - if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } - LZ4_wildCopy32(op, ip, cpy); - ip += length; op = cpy; - } else { - cpy = op+length; - DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); + if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } + LZ4_wildCopy32(op, ip, op+length); + ip += length; op += length; + } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) { /* We don't need to check oend, since we check it once for each loop below */ - if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; } + DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */ LZ4_memcpy(op, ip, 16); - ip += length; op = cpy; + ip += length; op += length; + } else { + goto safe_literal_copy; } /* get offset */ offset = LZ4_readLE16(ip); ip+=2; + DEBUGLOG(6, "blockPos%6u: offset = %u", (unsigned)(op-(BYTE*)dst), (unsigned)offset); match = op - offset; assert(match <= op); /* overflow check */ /* get matchlength */ length = token & ML_MASK; + DEBUGLOG(7, " match length token = %u (len==%u)", (unsigned)length, (unsigned)length+MINMATCH); if (length == ML_MASK) { size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0); - if (addl == rvl_error) { goto _output_error; } + if (addl == rvl_error) { + DEBUGLOG(5, "error reading long match length"); + goto _output_error; + } length += addl; length += MINMATCH; + DEBUGLOG(7, " long match length == %u", (unsigned)length); if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */ - if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { goto safe_match_copy; } } else { length += MINMATCH; if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + DEBUGLOG(7, "moving to safe_match_copy (ml==%u)", (unsigned)length); goto safe_match_copy; } @@ -2062,7 +2158,10 @@ LZ4_decompress_generic( continue; } } } - if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ + if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) { + DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match); + goto _output_error; + } /* match starting within external dictionary */ if ((dict==usingExtDict) && (match < lowPrefix)) { assert(dictEnd != NULL); @@ -2071,7 +2170,8 @@ LZ4_decompress_generic( DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd"); length = MIN(length, (size_t)(oend-op)); } else { - goto _output_error; /* end-of-block condition violated */ + DEBUGLOG(6, "end-of-block condition violated") + goto _output_error; } } if (length <= (size_t)(lowPrefix-match)) { @@ -2111,10 +2211,12 @@ LZ4_decompress_generic( #endif /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */ + DEBUGLOG(6, "using safe decode loop"); while (1) { assert(ip < iend); token = *ip++; length = token >> ML_BITS; /* literal length */ + DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); /* A two-stage shortcut for the most common case: * 1) If the literal length is 0..14, and there is enough space, @@ -2135,6 +2237,7 @@ LZ4_decompress_generic( /* The second stage: prepare for match copying, decode full info. * If it doesn't work out, the info won't be wasted. */ length = token & ML_MASK; /* match length */ + DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4); offset = LZ4_readLE16(ip); ip += 2; match = op - offset; assert(match <= op); /* check overflow */ @@ -2166,11 +2269,12 @@ LZ4_decompress_generic( if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ } - /* copy literals */ - cpy = op+length; #if LZ4_FAST_DEC_LOOP safe_literal_copy: #endif + /* copy literals */ + cpy = op+length; + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) { /* We've either hit the input parsing restriction or the output parsing restriction. @@ -2206,9 +2310,10 @@ LZ4_decompress_generic( * so check that we exactly consume the input and don't overrun the output buffer. */ if ((ip+length != iend) || (cpy > oend)) { - DEBUGLOG(6, "should have been last run of literals") - DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend); - DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend); + DEBUGLOG(5, "should have been last run of literals") + DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend); + DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", cpy, oend-MFLIMIT); + DEBUGLOG(5, "after writing %u bytes / %i bytes available", (unsigned)(op-(BYTE*)dst), outputSize); goto _output_error; } } @@ -2234,6 +2339,7 @@ LZ4_decompress_generic( /* get matchlength */ length = token & ML_MASK; + DEBUGLOG(7, "blockPos%6u: matchLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length); _copy_match: if (length == ML_MASK) { @@ -2323,7 +2429,7 @@ LZ4_decompress_generic( while (op < cpy) { *op++ = *match++; } } else { LZ4_memcpy(op, match, 8); - if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } + if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } } op = cpy; /* wildcopy correction */ } @@ -2418,6 +2524,7 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const void* dictStart, size_t dictSize) { + DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict"); return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, decode_full_block, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); diff --git a/source/core/nso.c b/source/core/nso.c index 1908cc3..e4884ac 100644 --- a/source/core/nso.c +++ b/source/core/nso.c @@ -22,17 +22,51 @@ #include #include +/* Type definitions. */ + +typedef enum { + NsoSegmentType_Text = 0, + NsoSegmentType_Rodata = 1, + NsoSegmentType_Data = 2, + NsoSegmentType_Count = 3 ///< Total values supported by this enum. +} NsoSegmentType; + +typedef struct { + u8 type; ///< NsoSegmentType. + const char *name; ///< Pointer to a string that holds the segment name. + NsoSegmentInfo info; ///< Copied from the NSO header. + u8 *data; ///< Dynamically allocated buffer with the decompressed segment data. +} NsoSegment; + +/* Global variables. */ + +#if LOG_LEVEL < LOG_LEVEL_NONE +static const char *g_nsoSegmentTypeNames[NsoSegmentType_Count] = { + [NsoSegmentType_Text] = ".text", + [NsoSegmentType_Rodata] = ".rodata", + [NsoSegmentType_Data] = ".data", +}; +#endif + /* Function prototypes. */ static bool nsoGetModuleName(NsoContext *nso_ctx); -static u8 *nsoGetRodataSegment(NsoContext *nso_ctx); -static bool nsoGetModuleInfoName(NsoContext *nso_ctx, u8 *rodata_buf); -static bool nsoGetSectionFromRodataSegment(NsoContext *nso_ctx, u8 *rodata_buf, u8 **section_ptr, u64 section_offset, u64 section_size); + +static bool nsoGetSegment(NsoContext *nso_ctx, NsoSegment *out, u8 type); +NX_INLINE void nsoFreeSegment(NsoSegment *segment); + +NX_INLINE bool nsoIsNnSdkVersionWithinSegment(const NsoModStart *mod_start, const NsoSegment *segment); +static bool nsoGetNnSdkVersion(NsoContext *nso_ctx, const NsoModStart *mod_start, const NsoSegment *segment); + +static bool nsoGetModuleInfoName(NsoContext *nso_ctx, const NsoSegment *segment); + +static bool nsoGetSectionFromRodataSegment(NsoContext *nso_ctx, const NsoSectionInfo *section_info, const NsoSegment *segment, u8 **out_ptr); bool nsoInitializeContext(NsoContext *out, PartitionFileSystemContext *pfs_ctx, PartitionFileSystemEntry *pfs_entry) { - u8 *rodata_buf = NULL; - bool success = false, dump_nso_header = false; + NsoModStart mod_start = {0}; + NsoSegment segment = {0}; + bool success = false, dump_nso_header = false, read_nnsdk_version = false; if (!out || !pfs_ctx || !ncaStorageIsValidContext(&(pfs_ctx->storage_ctx)) || !pfs_ctx->nca_fs_ctx->nca_ctx || \ pfs_ctx->nca_fs_ctx->nca_ctx->content_type != NcmContentType_Program || !pfs_ctx->offset || !pfs_ctx->size || !pfs_ctx->is_exefs || \ @@ -63,100 +97,103 @@ bool nsoInitializeContext(NsoContext *out, PartitionFileSystemContext *pfs_ctx, goto end; } + dump_nso_header = true; + /* Verify NSO header. */ if (__builtin_bswap32(out->nso_header.magic) != NSO_HEADER_MAGIC) { LOG_MSG_ERROR("Invalid NSO \"%s\" header magic word! (0x%08X != 0x%08X).", out->nso_filename, __builtin_bswap32(out->nso_header.magic), __builtin_bswap32(NSO_HEADER_MAGIC)); - dump_nso_header = true; goto end; } - if (out->nso_header.text_segment_info.file_offset < sizeof(NsoHeader) || !out->nso_header.text_segment_info.size || \ - ((out->nso_header.flags & NsoFlags_TextCompress) && (!out->nso_header.text_file_size || out->nso_header.text_file_size > out->nso_header.text_segment_info.size)) || \ - (!(out->nso_header.flags & NsoFlags_TextCompress) && out->nso_header.text_file_size != out->nso_header.text_segment_info.size) || \ - (out->nso_header.text_segment_info.file_offset + out->nso_header.text_file_size) > pfs_entry->size) - { - LOG_MSG_ERROR("Invalid .text segment offset/size for NSO \"%s\"! (0x%X, 0x%X, 0x%X).", out->nso_filename, out->nso_header.text_segment_info.file_offset, \ - out->nso_header.text_file_size, out->nso_header.text_segment_info.size); - dump_nso_header = true; - goto end; +#define NSO_VERIFY_SEGMENT_INFO(name, flag) \ + if (out->nso_header.name##_segment_info.file_offset < sizeof(NsoHeader) || !out->nso_header.name##_segment_info.size || \ + ((out->nso_header.flags & NsoFlags_##flag##Compress) && (!out->nso_header.name##_file_size || out->nso_header.name##_file_size > out->nso_header.name##_segment_info.size)) || \ + (!(out->nso_header.flags & NsoFlags_##flag##Compress) && out->nso_header.name##_file_size != out->nso_header.name##_segment_info.size) || \ + (out->nso_header.name##_segment_info.file_offset + out->nso_header.name##_file_size) > pfs_entry->size) { \ + LOG_MSG_ERROR("Invalid ." #name " segment offset/size for NSO \"%s\"! (0x%X, 0x%X, 0x%X).", out->nso_filename, out->nso_header.name##_segment_info.file_offset, \ + out->nso_header.name##_file_size, out->nso_header.name##_segment_info.size); \ + goto end; \ } - if (out->nso_header.rodata_segment_info.file_offset < sizeof(NsoHeader) || !out->nso_header.rodata_segment_info.size || \ - ((out->nso_header.flags & NsoFlags_RoCompress) && (!out->nso_header.rodata_file_size || out->nso_header.rodata_file_size > out->nso_header.rodata_segment_info.size)) || \ - (!(out->nso_header.flags & NsoFlags_RoCompress) && out->nso_header.rodata_file_size != out->nso_header.rodata_segment_info.size) || \ - (out->nso_header.rodata_segment_info.file_offset + out->nso_header.rodata_file_size) > pfs_entry->size) - { - LOG_MSG_ERROR("Invalid .rodata segment offset/size for NSO \"%s\"! (0x%X, 0x%X, 0x%X).", out->nso_filename, out->nso_header.rodata_segment_info.file_offset, \ - out->nso_header.rodata_file_size, out->nso_header.rodata_segment_info.size); - dump_nso_header = true; - goto end; +#define NSO_VERIFY_RODATA_SECTION_INFO(name) \ + if (out->nso_header.name##_section_info.size && (out->nso_header.name##_section_info.offset + out->nso_header.name##_section_info.size) > out->nso_header.rodata_segment_info.size) { \ + LOG_MSG_ERROR("Invalid ." #name " section offset/size for NSO \"%s\"! (0x%X, 0x%X).", out->nso_filename, out->nso_header.name##_section_info.offset, out->nso_header.name##_section_info.size); \ + goto end; \ } - if (out->nso_header.data_segment_info.file_offset < sizeof(NsoHeader) || !out->nso_header.data_segment_info.size || \ - ((out->nso_header.flags & NsoFlags_DataCompress) && (!out->nso_header.data_file_size || out->nso_header.data_file_size > out->nso_header.data_segment_info.size)) || \ - (!(out->nso_header.flags & NsoFlags_DataCompress) && out->nso_header.data_file_size != out->nso_header.data_segment_info.size) || \ - (out->nso_header.data_segment_info.file_offset + out->nso_header.data_file_size) > pfs_entry->size) - { - LOG_MSG_ERROR("Invalid .data segment offset/size for NSO \"%s\"! (0x%X, 0x%X, 0x%X).", out->nso_filename, out->nso_header.data_segment_info.file_offset, \ - out->nso_header.data_file_size, out->nso_header.data_segment_info.size); - dump_nso_header = true; - goto end; - } +#define NSO_GET_RODATA_SECTION(name) \ + do { \ + if (!nsoGetSectionFromRodataSegment(out, &(out->nso_header.name##_section_info), &segment, (u8**)&(out->rodata_##name##_section))) goto end; \ + out->rodata_##name##_section_size = out->nso_header.name##_section_info.size; \ + } while(0) + /* Verify NSO segment info. */ + NSO_VERIFY_SEGMENT_INFO(text, Text); + NSO_VERIFY_SEGMENT_INFO(rodata, Ro); + NSO_VERIFY_SEGMENT_INFO(data, Data); + + /* Verify NSO module name properties. */ if (out->nso_header.module_name_size > 1 && (out->nso_header.module_name_offset < sizeof(NsoHeader) || (out->nso_header.module_name_offset + out->nso_header.module_name_size) > pfs_entry->size)) { LOG_MSG_ERROR("Invalid module name offset/size for NSO \"%s\"! (0x%X, 0x%X).", out->nso_filename, out->nso_header.module_name_offset, out->nso_header.module_name_size); - dump_nso_header = true; goto end; } - if (out->nso_header.api_info_section_info.size && (out->nso_header.api_info_section_info.offset + out->nso_header.api_info_section_info.size) > out->nso_header.rodata_segment_info.size) - { - LOG_MSG_ERROR("Invalid .api_info section offset/size for NSO \"%s\"! (0x%X, 0x%X).", out->nso_filename, out->nso_header.api_info_section_info.offset, out->nso_header.api_info_section_info.size); - dump_nso_header = true; - goto end; - } - - if (out->nso_header.dynstr_section_info.size && (out->nso_header.dynstr_section_info.offset + out->nso_header.dynstr_section_info.size) > out->nso_header.rodata_segment_info.size) - { - LOG_MSG_ERROR("Invalid .dynstr section offset/size for NSO \"%s\"! (0x%X, 0x%X).", out->nso_filename, out->nso_header.dynstr_section_info.offset, out->nso_header.dynstr_section_info.size); - dump_nso_header = true; - goto end; - } - - if (out->nso_header.dynsym_section_info.size && (out->nso_header.dynsym_section_info.offset + out->nso_header.dynsym_section_info.size) > out->nso_header.rodata_segment_info.size) - { - LOG_MSG_ERROR("Invalid .dynsym section offset/size for NSO \"%s\"! (0x%X, 0x%X).", out->nso_filename, out->nso_header.dynsym_section_info.offset, out->nso_header.dynsym_section_info.size); - dump_nso_header = true; - goto end; - } + /* Verify section info blocks for the .rodata segment. */ + NSO_VERIFY_RODATA_SECTION_INFO(api_info); + NSO_VERIFY_RODATA_SECTION_INFO(dynstr); + NSO_VERIFY_RODATA_SECTION_INFO(dynsym); /* Get module name. */ if (!nsoGetModuleName(out)) goto end; + /* Get .text segment. */ + if (!nsoGetSegment(out, &segment, NsoSegmentType_Text)) goto end; + + /* Get NsoModStart block. */ + memcpy(&mod_start, segment.data, sizeof(NsoModStart)); + + /* Check if a nnSdk version struct exists within this NRO. */ + read_nnsdk_version = ((mod_start.version & 1) != 0); + + /* Check if the nnSdk version struct is located within the .text segment. */ + /* If so, we'll retrieve it immediately. */ + if (read_nnsdk_version && nsoIsNnSdkVersionWithinSegment(&mod_start, &segment) && !nsoGetNnSdkVersion(out, &mod_start, &segment)) goto end; + /* Get .rodata segment. */ - if (!(rodata_buf = nsoGetRodataSegment(out))) goto end; + if (!nsoGetSegment(out, &segment, NsoSegmentType_Rodata)) goto end; - /* Get module info name. */ - if (!nsoGetModuleInfoName(out, rodata_buf)) goto end; + /* Check if we didn't read the nnSdk version struct from the .text segment. */ + if (read_nnsdk_version && !out->nnsdk_version) + { + /* Check if the nnSdk version struct is located within the .rodata segment. */ + if (!nsoIsNnSdkVersionWithinSegment(&mod_start, &segment)) + { + LOG_MSG_ERROR("nnSdk version struct not located within .text or .rodata segments in NSO \"%s\".", out->nso_filename); + goto end; + } - /* Get .api_info section data. */ - if (!nsoGetSectionFromRodataSegment(out, rodata_buf, (u8**)&(out->rodata_api_info_section), out->nso_header.api_info_section_info.offset, out->nso_header.api_info_section_info.size)) goto end; - out->rodata_api_info_section_size = out->nso_header.api_info_section_info.size; + /* Retrieve nnSdk version struct data from the .rodata segment. */ + if (!nsoGetNnSdkVersion(out, &mod_start, &segment)) goto end; + } - /* Get .dynstr section data. */ - if (!nsoGetSectionFromRodataSegment(out, rodata_buf, (u8**)&(out->rodata_dynstr_section), out->nso_header.dynstr_section_info.offset, out->nso_header.dynstr_section_info.size)) goto end; - out->rodata_dynstr_section_size = out->nso_header.dynstr_section_info.size; + /* Get module info name from the .rodata segment. */ + if (!nsoGetModuleInfoName(out, &segment)) goto end; - /* Get .dynsym section data. */ - if (!nsoGetSectionFromRodataSegment(out, rodata_buf, &(out->rodata_dynsym_section), out->nso_header.dynsym_section_info.offset, out->nso_header.dynsym_section_info.size)) goto end; - out->rodata_dynsym_section_size = out->nso_header.dynsym_section_info.size; + /* Get sections from the .rodata segment. */ + NSO_GET_RODATA_SECTION(api_info); + NSO_GET_RODATA_SECTION(dynstr); + NSO_GET_RODATA_SECTION(dynsym); success = true; +#undef NSO_GET_RODATA_SECTION +#undef NSO_VERIFY_RODATA_SECTION_INFO +#undef NSO_VERIFY_SEGMENT_INFO + end: - if (rodata_buf) free(rodata_buf); + nsoFreeSegment(&segment); if (!success) { @@ -172,24 +209,8 @@ static bool nsoGetModuleName(NsoContext *nso_ctx) { if (nso_ctx->nso_header.module_name_offset < sizeof(NsoHeader) || nso_ctx->nso_header.module_name_size <= 1) return true; - NsoModuleName module_name = {0}; - - /* Get module name. */ - if (!pfsReadEntryData(nso_ctx->pfs_ctx, nso_ctx->pfs_entry, &module_name, sizeof(NsoModuleName), nso_ctx->nso_header.module_name_offset)) - { - LOG_MSG_ERROR("Failed to read NSO \"%s\" module name length!", nso_ctx->nso_filename); - return false; - } - - /* Verify module name length. */ - if (module_name.name_length != ((u8)nso_ctx->nso_header.module_name_size - 1)) - { - LOG_MSG_ERROR("NSO \"%s\" module name length mismatch! (0x%02X != 0x%02X).", nso_ctx->nso_filename, module_name.name_length, (u8)nso_ctx->nso_header.module_name_size - 1); - return false; - } - /* Allocate memory for the module name. */ - nso_ctx->module_name = calloc(nso_ctx->nso_header.module_name_size, sizeof(char)); + nso_ctx->module_name = calloc(nso_ctx->nso_header.module_name_size + 1, sizeof(char)); if (!nso_ctx->module_name) { LOG_MSG_ERROR("Failed to allocate memory for NSO \"%s\" module name!", nso_ctx->nso_filename); @@ -197,7 +218,7 @@ static bool nsoGetModuleName(NsoContext *nso_ctx) } /* Read module name string. */ - if (!pfsReadEntryData(nso_ctx->pfs_ctx, nso_ctx->pfs_entry, nso_ctx->module_name, module_name.name_length, nso_ctx->nso_header.module_name_offset + 1)) + if (!pfsReadEntryData(nso_ctx->pfs_ctx, nso_ctx->pfs_entry, nso_ctx->module_name, nso_ctx->nso_header.module_name_size, nso_ctx->nso_header.module_name_offset)) { LOG_MSG_ERROR("Failed to read NSO \"%s\" module name string!", nso_ctx->nso_filename); return false; @@ -206,75 +227,150 @@ static bool nsoGetModuleName(NsoContext *nso_ctx) return true; } -static u8 *nsoGetRodataSegment(NsoContext *nso_ctx) +static bool nsoGetSegment(NsoContext *nso_ctx, NsoSegment *out, u8 type) { + if (!nso_ctx || !out || type >= NsoSegmentType_Count) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + const char *segment_name = g_nsoSegmentTypeNames[type]; + + const NsoSegmentInfo *segment_info = (type == NsoSegmentType_Text ? &(nso_ctx->nso_header.text_segment_info) : \ + (type == NsoSegmentType_Rodata ? &(nso_ctx->nso_header.rodata_segment_info) : &(nso_ctx->nso_header.data_segment_info))); + + u32 segment_file_size = (type == NsoSegmentType_Text ? nso_ctx->nso_header.text_file_size : \ + (type == NsoSegmentType_Rodata ? nso_ctx->nso_header.rodata_file_size : nso_ctx->nso_header.data_file_size)); + + const u8 *segment_hash = (type == NsoSegmentType_Text ? nso_ctx->nso_header.text_segment_hash : \ + (type == NsoSegmentType_Rodata ? nso_ctx->nso_header.rodata_segment_hash : nso_ctx->nso_header.data_segment_hash)); + int lz4_res = 0; - bool compressed = (nso_ctx->nso_header.flags & NsoFlags_RoCompress), verify = (nso_ctx->nso_header.flags & NsoFlags_RoHash); + bool compressed = (nso_ctx->nso_header.flags & BIT(type)), verify = (nso_ctx->nso_header.flags & BIT(type + 3)); - u8 *rodata_buf = NULL; - u64 rodata_buf_size = (compressed ? LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(nso_ctx->nso_header.rodata_segment_info.size) : nso_ctx->nso_header.rodata_segment_info.size); + u8 *buf = NULL; + u64 buf_size = (compressed ? LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(segment_info->size) : segment_info->size); - u8 *rodata_read_ptr = NULL; - u64 rodata_read_size = (compressed ? nso_ctx->nso_header.rodata_file_size : nso_ctx->nso_header.rodata_segment_info.size); + u8 *read_ptr = NULL; + u64 read_size = (compressed ? segment_file_size : segment_info->size); - u8 rodata_hash[SHA256_HASH_SIZE] = {0}; + u8 hash[SHA256_HASH_SIZE] = {0}; bool success = false; - /* Allocate memory for the .rodata buffer. */ - if (!(rodata_buf = calloc(rodata_buf_size, sizeof(u8)))) + /* Clear output struct. */ + nsoFreeSegment(out); + + /* Allocate memory for the segment buffer. */ + if (!(buf = calloc(buf_size, sizeof(u8)))) { - LOG_MSG_ERROR("Failed to allocate 0x%lX bytes for the .rodata segment in NSO \"%s\"!", rodata_buf_size, nso_ctx->nso_filename); + LOG_MSG_ERROR("Failed to allocate 0x%lX bytes for the %s segment in NSO \"%s\"!", buf_size, segment_name, nso_ctx->nso_filename); return NULL; } - rodata_read_ptr = (compressed ? (rodata_buf + (rodata_buf_size - nso_ctx->nso_header.rodata_file_size)) : rodata_buf); + read_ptr = (compressed ? (buf + (buf_size - segment_file_size)) : buf); - /* Read .rodata segment data. */ - if (!pfsReadEntryData(nso_ctx->pfs_ctx, nso_ctx->pfs_entry, rodata_read_ptr, rodata_read_size, nso_ctx->nso_header.rodata_segment_info.file_offset)) + /* Read segment data. */ + if (!pfsReadEntryData(nso_ctx->pfs_ctx, nso_ctx->pfs_entry, read_ptr, read_size, segment_info->file_offset)) { - LOG_MSG_ERROR("Failed to read .rodata segment in NSO \"%s\"!", nso_ctx->nso_filename); + LOG_MSG_ERROR("Failed to read %s segment in NSO \"%s\"!", segment_name, nso_ctx->nso_filename); goto end; } - if (compressed) + /* Decompress segment data in-place. */ + if (compressed && (lz4_res = LZ4_decompress_safe((char*)read_ptr, (char*)buf, (int)segment_file_size, (int)buf_size)) != (int)segment_info->size) { - /* Decompress .rodata segment in-place. */ - if ((lz4_res = LZ4_decompress_safe((char*)rodata_read_ptr, (char*)rodata_buf, (int)nso_ctx->nso_header.rodata_file_size, (int)rodata_buf_size)) != \ - (int)nso_ctx->nso_header.rodata_segment_info.size) - { - LOG_MSG_ERROR("LZ4 decompression failed for NSO \"%s\"! (%d).", nso_ctx->nso_filename, lz4_res); - goto end; - } + LOG_MSG_ERROR("LZ4 decompression failed for %s segment in NSO \"%s\"! (%d).", segment_name, nso_ctx->nso_filename, lz4_res); + goto end; } if (verify) { - /* Verify .rodata segment hash. */ - sha256CalculateHash(rodata_hash, rodata_buf, nso_ctx->nso_header.rodata_segment_info.size); - if (memcmp(rodata_hash, nso_ctx->nso_header.rodata_segment_hash, SHA256_HASH_SIZE) != 0) + /* Verify segment data hash. */ + sha256CalculateHash(hash, buf, segment_info->size); + if (memcmp(hash, segment_hash, SHA256_HASH_SIZE) != 0) { - LOG_MSG_ERROR(".rodata segment checksum mismatch for NSO \"%s\"!", nso_ctx->nso_filename); + LOG_MSG_ERROR("%s segment checksum mismatch for NSO \"%s\"!", segment_name, nso_ctx->nso_filename); goto end; } } + /* Fill output struct. */ + out->type = type; + out->name = segment_name; + memcpy(&(out->info), segment_info, sizeof(NsoSegmentInfo)); + out->data = buf; + success = true; end: - if (!success && rodata_buf) - { - free(rodata_buf); - rodata_buf = NULL; - } + if (!success && buf) free(buf); - return rodata_buf; + return success; } -static bool nsoGetModuleInfoName(NsoContext *nso_ctx, u8 *rodata_buf) +NX_INLINE void nsoFreeSegment(NsoSegment *segment) { - NsoModuleInfo *module_info = (NsoModuleInfo*)(rodata_buf + 0x4); - if (!module_info->name_length) return true; + if (!segment) return; + if (segment->data) free(segment->data); + memset(segment, 0, sizeof(NsoSegment)); +} + +NX_INLINE bool nsoIsNnSdkVersionWithinSegment(const NsoModStart *mod_start, const NsoSegment *segment) +{ + return (mod_start && segment && mod_start->mod_offset >= (s32)segment->info.memory_offset && \ + (mod_start->mod_offset + (s32)sizeof(NsoModHeader) + (s32)sizeof(NsoNnSdkVersion)) <= (s32)(segment->info.memory_offset + segment->info.size)); +} + +static bool nsoGetNnSdkVersion(NsoContext *nso_ctx, const NsoModStart *mod_start, const NsoSegment *segment) +{ + if (!nso_ctx || !mod_start || !segment || !segment->data) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + /* Return immediately if the nnSdk struct has already been retrieved. */ + if (nso_ctx->nnsdk_version) return 0; + + /* Calculate virtual offset for the nnSdk version struct and check if it is within range. */ + u32 nnsdk_ver_virt_offset = (u32)(mod_start->mod_offset + (s32)sizeof(NsoModHeader)); + if (mod_start->mod_offset < (s32)segment->info.memory_offset || (nnsdk_ver_virt_offset + sizeof(NsoNnSdkVersion)) > (segment->info.memory_offset + segment->info.size)) + { + LOG_MSG_ERROR("nnSdk version struct isn't located within %s segment in NSO \"%s\"! ([0x%X, 0x%X] not within [0x%X, 0x%X]).", segment->name, nso_ctx->nso_filename, \ + mod_start->mod_offset, nnsdk_ver_virt_offset + (u32)sizeof(NsoNnSdkVersion), segment->info.memory_offset, segment->info.memory_offset + segment->info.size); + return false; + } + + /* Allocate memory for the nnSdk version struct. */ + nso_ctx->nnsdk_version = malloc(sizeof(NsoNnSdkVersion)); + if (!nso_ctx->nnsdk_version) + { + LOG_MSG_ERROR("Failed to allocate memory for NSO \"%s\" nnSdk version struct!", nso_ctx->nso_filename); + return false; + } + + /* Calculate segment-relative offset for the nnSdk version struct and copy its data. */ + u32 nnsdk_ver_phys_offset = (nnsdk_ver_virt_offset - segment->info.memory_offset); + memcpy(nso_ctx->nnsdk_version, segment->data + nnsdk_ver_phys_offset, sizeof(NsoNnSdkVersion)); + + LOG_MSG_DEBUG("nnSdk version (NSO \"%s\", %s segment, virtual offset 0x%X, physical offset 0x%X): %u.%u.%u.", nso_ctx->nso_filename, segment->name, \ + nnsdk_ver_virt_offset, nnsdk_ver_phys_offset, nso_ctx->nnsdk_version->major, nso_ctx->nnsdk_version->minor, nso_ctx->nnsdk_version->micro); + + return true; +} + +static bool nsoGetModuleInfoName(NsoContext *nso_ctx, const NsoSegment *segment) +{ + if (!nso_ctx || !segment || segment->type != NsoSegmentType_Rodata || !segment->data) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + const NsoModuleInfo *module_info = (const NsoModuleInfo*)(segment->data + 0x4); + if (!module_info->name_length || !module_info->name[0]) return true; /* Allocate memory for the module info name. */ nso_ctx->module_info_name = calloc(module_info->name_length + 1, sizeof(char)); @@ -286,23 +382,31 @@ static bool nsoGetModuleInfoName(NsoContext *nso_ctx, u8 *rodata_buf) /* Copy module info name. */ sprintf(nso_ctx->module_info_name, "%.*s", (int)module_info->name_length, module_info->name); + LOG_MSG_DEBUG("Module info name (NSO \"%s\"): \"%s\".", nso_ctx->nso_filename, nso_ctx->module_info_name); return true; } -static bool nsoGetSectionFromRodataSegment(NsoContext *nso_ctx, u8 *rodata_buf, u8 **section_ptr, u64 section_offset, u64 section_size) +static bool nsoGetSectionFromRodataSegment(NsoContext *nso_ctx, const NsoSectionInfo *section_info, const NsoSegment *segment, u8 **out_ptr) { - if (!section_size || (section_offset + section_size) > nso_ctx->nso_header.rodata_segment_info.size) return true; + if (!nso_ctx || !section_info || !segment || segment->type != NsoSegmentType_Rodata || !segment->data || !out_ptr) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + /* Return immediately if the desired section is not within the .rodata segment. */ + if (!section_info->size || (section_info->offset + section_info->size) > segment->info.size) return true; /* Allocate memory for the desired .rodata section. */ - if (!(*section_ptr = malloc(section_size))) + if (!(*out_ptr = malloc(section_info->size))) { - LOG_MSG_ERROR("Failed to allocate 0x%lX bytes for section at .rodata offset 0x%lX in NSO \"%s\"!", section_size, section_offset, nso_ctx->nso_filename); + LOG_MSG_ERROR("Failed to allocate 0x%X bytes for section at .rodata segment offset 0x%X in NSO \"%s\"!", section_info->size, section_info->offset, nso_ctx->nso_filename); return false; } /* Copy .rodata section data. */ - memcpy(*section_ptr, rodata_buf + section_offset, section_size); + memcpy(*out_ptr, segment->data + section_info->offset, section_info->size); return true; } diff --git a/source/tasks/gamecard_image_dump_task.cpp b/source/tasks/gamecard_image_dump_task.cpp index e08520e..bee9347 100644 --- a/source/tasks/gamecard_image_dump_task.cpp +++ b/source/tasks/gamecard_image_dump_task.cpp @@ -97,6 +97,9 @@ namespace nxdt::tasks /* Push progress onto the class. */ progress.xfer_size += sizeof(GameCardKeyArea); this->PublishProgress(progress); + + /* Update gamecard image size. */ + gc_img_size -= sizeof(GameCardKeyArea); } /* Allocate memory buffer for the dump process. */