mirror of
https://github.com/DarkMatterCore/nxdumptool.git
synced 2024-11-22 18:26:39 +00:00
bktr: refactor bktrRead*Storage() functions.
Now using loops instead of recursion. Fixes a nasty stack exhaustion issue while dumping Them's Fightin' Herds raw RomFS section (Compressed+Sparse base with Compressed patch).
This commit is contained in:
parent
ecdce35e8d
commit
961459fe01
2 changed files with 243 additions and 195 deletions
|
@ -2966,7 +2966,7 @@ static bool saveNintendoContentArchive(void *userdata)
|
||||||
SharedThreadData *shared_thread_data = &(nca_thread_data.shared_thread_data);
|
SharedThreadData *shared_thread_data = &(nca_thread_data.shared_thread_data);
|
||||||
|
|
||||||
u64 free_space = 0;
|
u64 free_space = 0;
|
||||||
char *filename = NULL;
|
char *filename = NULL, subdir[0x20] = {0};
|
||||||
u32 dev_idx = g_storageMenuElementOption.selected;
|
u32 dev_idx = g_storageMenuElementOption.selected;
|
||||||
|
|
||||||
bool success = false;
|
bool success = false;
|
||||||
|
@ -2991,7 +2991,8 @@ static bool saveNintendoContentArchive(void *userdata)
|
||||||
consolePrint("nca size: 0x%lX\n", shared_thread_data->total_size);
|
consolePrint("nca size: 0x%lX\n", shared_thread_data->total_size);
|
||||||
|
|
||||||
snprintf(path, MAX_ELEMENTS(path), "/%s.%s", nca_thread_data.nca_ctx->content_id_str, content_info->content_type == NcmContentType_Meta ? "cnmt.nca" : "nca");
|
snprintf(path, MAX_ELEMENTS(path), "/%s.%s", nca_thread_data.nca_ctx->content_id_str, content_info->content_type == NcmContentType_Meta ? "cnmt.nca" : "nca");
|
||||||
filename = generateOutputTitleFileName(title_info, "NCA/User", path);
|
snprintf(subdir, MAX_ELEMENTS(subdir), "NCA/%s", nca_thread_data.nca_ctx->storage_id == NcmStorageId_BuiltInSystem ? "System" : "User");
|
||||||
|
filename = generateOutputTitleFileName(title_info, subdir, path);
|
||||||
if (!filename) goto end;
|
if (!filename) goto end;
|
||||||
|
|
||||||
if (dev_idx == 1)
|
if (dev_idx == 1)
|
||||||
|
|
|
@ -79,11 +79,14 @@ static const char *bktrGetStorageTypeName(u8 storage_type);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static bool bktrInitializeIndirectStorageContext(BucketTreeContext *out, NcaFsSectionContext *nca_fs_ctx, bool is_sparse);
|
static bool bktrInitializeIndirectStorageContext(BucketTreeContext *out, NcaFsSectionContext *nca_fs_ctx, bool is_sparse);
|
||||||
|
static bool bktrGetIndirectStorageEntryExtents(BucketTreeVisitor *visitor, u64 offset, BucketTreeIndirectStorageEntry *out_cur_entry, u64 *out_next_entry_offset);
|
||||||
static bool bktrReadIndirectStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset);
|
static bool bktrReadIndirectStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset);
|
||||||
|
|
||||||
static bool bktrInitializeAesCtrExStorageContext(BucketTreeContext *out, NcaFsSectionContext *nca_fs_ctx);
|
static bool bktrInitializeAesCtrExStorageContext(BucketTreeContext *out, NcaFsSectionContext *nca_fs_ctx);
|
||||||
|
static bool bktrGetAesCtrExStorageEntryExtents(BucketTreeVisitor *visitor, u64 offset, BucketTreeAesCtrExStorageEntry *out_cur_entry, u64 *out_next_entry_offset);
|
||||||
static bool bktrReadAesCtrExStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset);
|
static bool bktrReadAesCtrExStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset);
|
||||||
|
|
||||||
|
static bool bktrGetCompressedStorageEntryExtents(BucketTreeVisitor *visitor, u64 offset, BucketTreeCompressedStorageEntry *out_cur_entry, u64 *out_next_entry_offset);
|
||||||
static bool bktrReadCompressedStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset);
|
static bool bktrReadCompressedStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset);
|
||||||
|
|
||||||
static bool bktrReadSubStorage(BucketTreeSubStorage *substorage, BucketTreeSubStorageReadParams *params);
|
static bool bktrReadSubStorage(BucketTreeSubStorage *substorage, BucketTreeSubStorageReadParams *params);
|
||||||
|
@ -130,10 +133,7 @@ NX_INLINE const u64 bktrStorageNodeOffsetGetEntryVirtualOffset(const BucketTreeN
|
||||||
|
|
||||||
NX_INLINE bool bktrVisitorIsValid(BucketTreeVisitor *visitor);
|
NX_INLINE bool bktrVisitorIsValid(BucketTreeVisitor *visitor);
|
||||||
NX_INLINE bool bktrVisitorCanMoveNext(BucketTreeVisitor *visitor);
|
NX_INLINE bool bktrVisitorCanMoveNext(BucketTreeVisitor *visitor);
|
||||||
NX_INLINE bool bktrVisitorCanMovePrevious(BucketTreeVisitor *visitor);
|
|
||||||
|
|
||||||
static bool bktrVisitorMoveNext(BucketTreeVisitor *visitor);
|
static bool bktrVisitorMoveNext(BucketTreeVisitor *visitor);
|
||||||
static bool bktrVisitorMovePrevious(BucketTreeVisitor *visitor);
|
|
||||||
|
|
||||||
bool bktrInitializeContext(BucketTreeContext *out, NcaFsSectionContext *nca_fs_ctx, u8 storage_type)
|
bool bktrInitializeContext(BucketTreeContext *out, NcaFsSectionContext *nca_fs_ctx, u8 storage_type)
|
||||||
{
|
{
|
||||||
|
@ -608,33 +608,30 @@ end:
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bktrReadIndirectStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset)
|
static bool bktrGetIndirectStorageEntryExtents(BucketTreeVisitor *visitor, u64 offset, BucketTreeIndirectStorageEntry *out_cur_entry, u64 *out_next_entry_offset)
|
||||||
{
|
{
|
||||||
BucketTreeContext *ctx = visitor->bktr_ctx;
|
if (!visitor || !out_cur_entry || !out_next_entry_offset)
|
||||||
bool is_sparse = (ctx->storage_type == BucketTreeStorageType_Sparse);
|
|
||||||
bool missing_original_storage = !bktrIsValidSubStorage(&(ctx->substorages[0]));
|
|
||||||
|
|
||||||
if (!out || (is_sparse && (missing_original_storage || ctx->substorages[0].type != BucketTreeSubStorageType_Regular)) || \
|
|
||||||
(!is_sparse && (!bktrIsValidSubStorage(&(ctx->substorages[1])) || ctx->substorages[1].type != BucketTreeSubStorageType_AesCtrEx || \
|
|
||||||
(!missing_original_storage && (ctx->substorages[0].type == BucketTreeSubStorageType_Indirect || ctx->substorages[0].type == BucketTreeSubStorageType_AesCtrEx || \
|
|
||||||
ctx->substorages[0].type >= BucketTreeSubStorageType_Count)))) || (offset + read_size) > ctx->end_offset)
|
|
||||||
{
|
{
|
||||||
LOG_MSG_ERROR("Invalid parameters!");
|
LOG_MSG_ERROR("Invalid parameters!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate Indirect Storage entry. */
|
BucketTreeContext *ctx = visitor->bktr_ctx;
|
||||||
BucketTreeIndirectStorageEntry cur_entry = {0};
|
BucketTreeIndirectStorageEntry cur_entry = {0};
|
||||||
|
u64 cur_entry_offset = 0, next_entry_offset = 0;
|
||||||
|
bool success = false;
|
||||||
|
|
||||||
|
/* Copy current Indirect Storage entry -- we'll move onto the next one, so we'll lose track of it. */
|
||||||
memcpy(&cur_entry, visitor->entry, sizeof(BucketTreeIndirectStorageEntry));
|
memcpy(&cur_entry, visitor->entry, sizeof(BucketTreeIndirectStorageEntry));
|
||||||
|
|
||||||
|
/* Validate Indirect Storage entry. */
|
||||||
if (!bktrIsOffsetWithinStorageRange(ctx, cur_entry.virtual_offset) || cur_entry.virtual_offset > offset || cur_entry.storage_index > BucketTreeIndirectStorageIndex_Patch)
|
if (!bktrIsOffsetWithinStorageRange(ctx, cur_entry.virtual_offset) || cur_entry.virtual_offset > offset || cur_entry.storage_index > BucketTreeIndirectStorageIndex_Patch)
|
||||||
{
|
{
|
||||||
LOG_MSG_ERROR("Invalid Indirect Storage entry! (0x%lX) (#1).", cur_entry.virtual_offset);
|
LOG_MSG_ERROR("Invalid Indirect Storage entry! (0x%lX) (#1).", cur_entry.virtual_offset);
|
||||||
return false;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 cur_entry_offset = cur_entry.virtual_offset, next_entry_offset = 0;
|
cur_entry_offset = cur_entry.virtual_offset;
|
||||||
bool moved = false, success = false;
|
|
||||||
|
|
||||||
/* Check if we can retrieve the next entry. */
|
/* Check if we can retrieve the next entry. */
|
||||||
if (bktrVisitorCanMoveNext(visitor))
|
if (bktrVisitorCanMoveNext(visitor))
|
||||||
|
@ -656,9 +653,6 @@ static bool bktrReadIndirectStorage(BucketTreeVisitor *visitor, void *out, u64 r
|
||||||
|
|
||||||
/* Store next entry's virtual offset. */
|
/* Store next entry's virtual offset. */
|
||||||
next_entry_offset = next_entry->virtual_offset;
|
next_entry_offset = next_entry->virtual_offset;
|
||||||
|
|
||||||
/* Update variable. */
|
|
||||||
moved = true;
|
|
||||||
} else {
|
} else {
|
||||||
/* Set the next entry offset to the storage's end. */
|
/* Set the next entry offset to the storage's end. */
|
||||||
next_entry_offset = ctx->end_offset;
|
next_entry_offset = ctx->end_offset;
|
||||||
|
@ -671,13 +665,61 @@ static bool bktrReadIndirectStorage(BucketTreeVisitor *visitor, void *out, u64 r
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Perform read operation. */
|
/* Update variables. */
|
||||||
if ((offset + read_size) <= next_entry_offset)
|
memcpy(out_cur_entry, &cur_entry, sizeof(BucketTreeIndirectStorageEntry));
|
||||||
{
|
*out_next_entry_offset = next_entry_offset;
|
||||||
/* Read only within the current indirect storage entry. */
|
success = true;
|
||||||
|
|
||||||
|
end:
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool bktrReadIndirectStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset)
|
||||||
|
{
|
||||||
|
BucketTreeContext *ctx = visitor->bktr_ctx;
|
||||||
|
bool is_sparse = (ctx->storage_type == BucketTreeStorageType_Sparse);
|
||||||
|
bool missing_original_storage = !bktrIsValidSubStorage(&(ctx->substorages[0]));
|
||||||
|
|
||||||
|
BucketTreeIndirectStorageEntry cur_entry = {0};
|
||||||
BucketTreeSubStorageReadParams params = {0};
|
BucketTreeSubStorageReadParams params = {0};
|
||||||
const u64 data_offset = (offset - cur_entry_offset + cur_entry.physical_offset);
|
u64 cur_entry_offset = 0, next_entry_offset = 0, accum = 0;
|
||||||
bktrInitializeSubStorageReadParams(¶ms, out, data_offset, read_size, offset, 0, false, ctx->storage_type);
|
|
||||||
|
bool success = false;
|
||||||
|
|
||||||
|
if (!out || (is_sparse && (missing_original_storage || ctx->substorages[0].type != BucketTreeSubStorageType_Regular)) || \
|
||||||
|
(!is_sparse && (!bktrIsValidSubStorage(&(ctx->substorages[1])) || ctx->substorages[1].type != BucketTreeSubStorageType_AesCtrEx || \
|
||||||
|
(!missing_original_storage && (ctx->substorages[0].type == BucketTreeSubStorageType_Indirect || ctx->substorages[0].type == BucketTreeSubStorageType_AesCtrEx || \
|
||||||
|
ctx->substorages[0].type >= BucketTreeSubStorageType_Count)))) || (offset + read_size) > ctx->end_offset)
|
||||||
|
{
|
||||||
|
LOG_MSG_ERROR("Invalid parameters!");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Perform Indirect Storage reads until we reach the requested size. */
|
||||||
|
while(accum < read_size)
|
||||||
|
{
|
||||||
|
u8 *out_ptr = ((u8*)out + accum);
|
||||||
|
const u64 indirect_block_offset = (offset + accum);
|
||||||
|
u64 indirect_block_size = 0, indirect_block_read_size = 0, indirect_block_read_offset = 0, read_size_diff = 0;
|
||||||
|
|
||||||
|
/* Get current Indirect Storage entry and the start offset for the next one. */
|
||||||
|
if (!bktrGetIndirectStorageEntryExtents(visitor, indirect_block_offset, &cur_entry, &next_entry_offset))
|
||||||
|
{
|
||||||
|
LOG_MSG_ERROR("Failed to get Indirect Storage entry extents for offset 0x%lX!", indirect_block_offset);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Calculate Indirect Storage block size. */
|
||||||
|
cur_entry_offset = cur_entry.virtual_offset;
|
||||||
|
indirect_block_size = (!accum ? (next_entry_offset - offset) : (next_entry_offset - cur_entry_offset));
|
||||||
|
|
||||||
|
/* Calculate Indirect Storage block read size and offset. */
|
||||||
|
read_size_diff = (read_size - accum);
|
||||||
|
indirect_block_read_size = (read_size_diff > indirect_block_size ? indirect_block_size : read_size_diff);
|
||||||
|
indirect_block_read_offset = (indirect_block_offset - cur_entry_offset + cur_entry.physical_offset);
|
||||||
|
|
||||||
|
/* Perform read operation within the current Indirect Storage entry. */
|
||||||
|
bktrInitializeSubStorageReadParams(¶ms, out_ptr, indirect_block_read_offset, indirect_block_read_size, indirect_block_offset, 0, false, ctx->storage_type);
|
||||||
|
|
||||||
if (cur_entry.storage_index == BucketTreeIndirectStorageIndex_Original)
|
if (cur_entry.storage_index == BucketTreeIndirectStorageIndex_Original)
|
||||||
{
|
{
|
||||||
|
@ -685,35 +727,37 @@ static bool bktrReadIndirectStorage(BucketTreeVisitor *visitor, void *out, u64 r
|
||||||
{
|
{
|
||||||
/* Retrieve data from the original data storage. */
|
/* Retrieve data from the original data storage. */
|
||||||
/* This must either be a Regular/Sparse/Compressed storage from the base NCA (Indirect) or a Regular storage from this very same NCA (Sparse). */
|
/* This must either be a Regular/Sparse/Compressed storage from the base NCA (Indirect) or a Regular storage from this very same NCA (Sparse). */
|
||||||
success = bktrReadSubStorage(&(ctx->substorages[0]), ¶ms);
|
if (!bktrReadSubStorage(&(ctx->substorages[0]), ¶ms))
|
||||||
if (!success) LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk from offset 0x%lX in original data storage!", read_size, data_offset);
|
{
|
||||||
|
LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk from offset 0x%lX in original data storage!", indirect_block_read_size, indirect_block_read_offset);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG_MSG_ERROR("Error: attempting to read 0x%lX-byte long chunk from missing original data storage at offset 0x%lX!", read_size, data_offset);
|
LOG_MSG_ERROR("Error: attempting to read 0x%lX-byte long chunk from missing original data storage at offset 0x%lX!", indirect_block_read_size, indirect_block_read_offset);
|
||||||
|
goto end;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!is_sparse)
|
if (!is_sparse)
|
||||||
{
|
{
|
||||||
/* Retrieve data from the indirect data storage. */
|
/* Retrieve data from the Indirect data storage. */
|
||||||
/* This must always be the AesCtrEx storage within this very same NCA (Indirect). */
|
/* This must always be the AesCtrEx storage within this very same NCA (Indirect). */
|
||||||
success = bktrReadSubStorage(&(ctx->substorages[1]), ¶ms);
|
if (!bktrReadSubStorage(&(ctx->substorages[1]), ¶ms))
|
||||||
if (!success) LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk from offset 0x%lX in AesCtrEx storage!", read_size, data_offset);
|
{
|
||||||
|
LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk from offset 0x%lX in AesCtrEx storage!", indirect_block_read_size, indirect_block_read_offset);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Fill output buffer with zeroes (SparseStorage's ZeroStorage). */
|
/* Fill output buffer with zeroes (SparseStorage's ZeroStorage). */
|
||||||
memset(out, 0, read_size);
|
memset(out_ptr, 0, indirect_block_read_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update accumulator. */
|
||||||
|
accum += indirect_block_read_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update flag. */
|
||||||
success = true;
|
success = true;
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* Handle reads that span multiple indirect storage entries. */
|
|
||||||
if (moved) bktrVisitorMovePrevious(visitor);
|
|
||||||
|
|
||||||
const u64 indirect_block_size = (next_entry_offset - offset);
|
|
||||||
|
|
||||||
success = (bktrReadIndirectStorage(visitor, out, indirect_block_size, offset) && \
|
|
||||||
bktrReadIndirectStorage(visitor, (u8*)out + indirect_block_size, read_size - indirect_block_size, offset + indirect_block_size));
|
|
||||||
|
|
||||||
if (!success) LOG_MSG_ERROR("Failed to read 0x%lX bytes block from multiple Indirect Storage entries at offset 0x%lX!", read_size, offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
end:
|
end:
|
||||||
return success;
|
return success;
|
||||||
|
@ -795,28 +839,30 @@ end:
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bktrReadAesCtrExStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset)
|
static bool bktrGetAesCtrExStorageEntryExtents(BucketTreeVisitor *visitor, u64 offset, BucketTreeAesCtrExStorageEntry *out_cur_entry, u64 *out_next_entry_offset)
|
||||||
{
|
{
|
||||||
BucketTreeContext *ctx = visitor->bktr_ctx;
|
if (!visitor || !out_cur_entry || !out_next_entry_offset)
|
||||||
|
|
||||||
if (!out || !bktrIsValidSubStorage(&(ctx->substorages[0])) || ctx->substorages[0].type != BucketTreeSubStorageType_Regular || (offset + read_size) > ctx->end_offset)
|
|
||||||
{
|
{
|
||||||
LOG_MSG_ERROR("Invalid parameters!");
|
LOG_MSG_ERROR("Invalid parameters!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate AesCtrEx Storage entry. */
|
BucketTreeContext *ctx = visitor->bktr_ctx;
|
||||||
BucketTreeAesCtrExStorageEntry cur_entry = {0};
|
BucketTreeAesCtrExStorageEntry cur_entry = {0};
|
||||||
|
u64 cur_entry_offset = 0, next_entry_offset = 0;
|
||||||
|
bool success = false;
|
||||||
|
|
||||||
|
/* Copy current AesCtrEx Storage entry -- we'll move onto the next one, so we'll lose track of it. */
|
||||||
memcpy(&cur_entry, visitor->entry, sizeof(BucketTreeAesCtrExStorageEntry));
|
memcpy(&cur_entry, visitor->entry, sizeof(BucketTreeAesCtrExStorageEntry));
|
||||||
|
|
||||||
|
/* Validate AesCtrEx Storage entry. */
|
||||||
if (!bktrIsOffsetWithinStorageRange(ctx, cur_entry.offset) || cur_entry.offset > offset || !IS_ALIGNED(cur_entry.offset, AES_BLOCK_SIZE))
|
if (!bktrIsOffsetWithinStorageRange(ctx, cur_entry.offset) || cur_entry.offset > offset || !IS_ALIGNED(cur_entry.offset, AES_BLOCK_SIZE))
|
||||||
{
|
{
|
||||||
LOG_MSG_ERROR("Invalid AesCtrEx Storage entry! (0x%lX) (#1).", cur_entry.offset);
|
LOG_MSG_ERROR("Invalid AesCtrEx Storage entry! (0x%lX) (#1).", cur_entry.offset);
|
||||||
return false;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 cur_entry_offset = cur_entry.offset, next_entry_offset = 0;
|
cur_entry_offset = cur_entry.offset;
|
||||||
bool moved = false, success = false;
|
|
||||||
|
|
||||||
/* Check if we can retrieve the next entry. */
|
/* Check if we can retrieve the next entry. */
|
||||||
if (bktrVisitorCanMoveNext(visitor))
|
if (bktrVisitorCanMoveNext(visitor))
|
||||||
|
@ -838,9 +884,6 @@ static bool bktrReadAesCtrExStorage(BucketTreeVisitor *visitor, void *out, u64 r
|
||||||
|
|
||||||
/* Store next entry's virtual offset. */
|
/* Store next entry's virtual offset. */
|
||||||
next_entry_offset = next_entry->offset;
|
next_entry_offset = next_entry->offset;
|
||||||
|
|
||||||
/* Update variable. */
|
|
||||||
moved = true;
|
|
||||||
} else {
|
} else {
|
||||||
/* Set the next entry offset to the storage's end. */
|
/* Set the next entry offset to the storage's end. */
|
||||||
next_entry_offset = ctx->end_offset;
|
next_entry_offset = ctx->end_offset;
|
||||||
|
@ -853,48 +896,91 @@ static bool bktrReadAesCtrExStorage(BucketTreeVisitor *visitor, void *out, u64 r
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Perform read operation. */
|
/* Update variables. */
|
||||||
if ((offset + read_size) <= next_entry_offset)
|
memcpy(out_cur_entry, &cur_entry, sizeof(BucketTreeAesCtrExStorageEntry));
|
||||||
{
|
*out_next_entry_offset = next_entry_offset;
|
||||||
/* Read only within the current AesCtrEx storage entry. */
|
success = true;
|
||||||
BucketTreeSubStorageReadParams params = {0};
|
|
||||||
bktrInitializeSubStorageReadParams(¶ms, out, offset, read_size, 0, cur_entry.generation, cur_entry.encryption == BucketTreeAesCtrExStorageEncryption_Enabled, ctx->storage_type);
|
|
||||||
|
|
||||||
success = bktrReadSubStorage(&(ctx->substorages[0]), ¶ms);
|
|
||||||
if (!success) LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk at offset 0x%lX from AesCtrEx storage!", read_size, offset);
|
|
||||||
} else {
|
|
||||||
/* Handle reads that span multiple AesCtrEx storage entries. */
|
|
||||||
if (moved) bktrVisitorMovePrevious(visitor);
|
|
||||||
|
|
||||||
const u64 aes_ctr_ex_block_size = (next_entry_offset - offset);
|
|
||||||
|
|
||||||
success = (bktrReadAesCtrExStorage(visitor, out, aes_ctr_ex_block_size, offset) && \
|
|
||||||
bktrReadAesCtrExStorage(visitor, (u8*)out + aes_ctr_ex_block_size, read_size - aes_ctr_ex_block_size, offset + aes_ctr_ex_block_size));
|
|
||||||
|
|
||||||
if (!success) LOG_MSG_ERROR("Failed to read 0x%lX bytes block from multiple AesCtrEx Storage entries at offset 0x%lX!", read_size, offset);
|
|
||||||
}
|
|
||||||
|
|
||||||
end:
|
end:
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bktrReadCompressedStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset)
|
static bool bktrReadAesCtrExStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset)
|
||||||
{
|
{
|
||||||
BucketTreeContext *ctx = visitor->bktr_ctx;
|
BucketTreeContext *ctx = visitor->bktr_ctx;
|
||||||
NcaFsSectionContext *nca_fs_ctx = ctx->nca_fs_ctx;
|
|
||||||
u64 compressed_storage_base_offset = nca_fs_ctx->hash_region.size;
|
|
||||||
|
|
||||||
if (!out || !bktrIsValidSubStorage(&(ctx->substorages[0])) || ctx->substorages[0].type == BucketTreeSubStorageType_AesCtrEx || \
|
BucketTreeAesCtrExStorageEntry cur_entry = {0};
|
||||||
ctx->substorages[0].type == BucketTreeSubStorageType_Compressed || ctx->substorages[0].type >= BucketTreeSubStorageType_Count || (offset + read_size) > ctx->end_offset)
|
BucketTreeSubStorageReadParams params = {0};
|
||||||
|
u64 cur_entry_offset = 0, next_entry_offset = 0, accum = 0;
|
||||||
|
|
||||||
|
bool success = false;
|
||||||
|
|
||||||
|
if (!out || !bktrIsValidSubStorage(&(ctx->substorages[0])) || ctx->substorages[0].type != BucketTreeSubStorageType_Regular || (offset + read_size) > ctx->end_offset)
|
||||||
{
|
{
|
||||||
LOG_MSG_ERROR("Invalid parameters!");
|
LOG_MSG_ERROR("Invalid parameters!");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate Compressed Storage entry. */
|
/* Perform AesCtrEx Storage reads until we reach the requested size. */
|
||||||
|
while(accum < read_size)
|
||||||
|
{
|
||||||
|
u8 *out_ptr = ((u8*)out + accum);
|
||||||
|
const u64 aes_ctr_ex_block_offset = (offset + accum);
|
||||||
|
u64 aes_ctr_ex_block_size = 0, aes_ctr_ex_block_read_size = 0, read_size_diff = 0;
|
||||||
|
|
||||||
|
/* Get current AesCtrEx Storage entry and the start offset for the next one. */
|
||||||
|
if (!bktrGetAesCtrExStorageEntryExtents(visitor, aes_ctr_ex_block_offset, &cur_entry, &next_entry_offset))
|
||||||
|
{
|
||||||
|
LOG_MSG_ERROR("Failed to get AesCtrEx Storage entry extents for offset 0x%lX!", aes_ctr_ex_block_offset);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Calculate AesCtrEx Storage block size. */
|
||||||
|
cur_entry_offset = cur_entry.offset;
|
||||||
|
aes_ctr_ex_block_size = (!accum ? (next_entry_offset - offset) : (next_entry_offset - cur_entry_offset));
|
||||||
|
|
||||||
|
/* Calculate AesCtrEx Storage block read size and offset. */
|
||||||
|
read_size_diff = (read_size - accum);
|
||||||
|
aes_ctr_ex_block_read_size = (read_size_diff > aes_ctr_ex_block_size ? aes_ctr_ex_block_size : read_size_diff);
|
||||||
|
|
||||||
|
/* Perform read operation within the current AesCtrEx Storage entry. */
|
||||||
|
bool aes_ctr_ex_crypt = (cur_entry.encryption == BucketTreeAesCtrExStorageEncryption_Enabled);
|
||||||
|
bktrInitializeSubStorageReadParams(¶ms, out_ptr, aes_ctr_ex_block_offset, aes_ctr_ex_block_read_size, 0, cur_entry.generation, aes_ctr_ex_crypt, ctx->storage_type);
|
||||||
|
|
||||||
|
if (!bktrReadSubStorage(&(ctx->substorages[0]), ¶ms))
|
||||||
|
{
|
||||||
|
LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk at offset 0x%lX from AesCtrEx storage!", aes_ctr_ex_block_read_size, aes_ctr_ex_block_offset);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update accumulator. */
|
||||||
|
accum += aes_ctr_ex_block_read_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update flag. */
|
||||||
|
success = true;
|
||||||
|
|
||||||
|
end:
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool bktrGetCompressedStorageEntryExtents(BucketTreeVisitor *visitor, u64 offset, BucketTreeCompressedStorageEntry *out_cur_entry, u64 *out_next_entry_offset)
|
||||||
|
{
|
||||||
|
if (!visitor || !out_cur_entry || !out_next_entry_offset)
|
||||||
|
{
|
||||||
|
LOG_MSG_ERROR("Invalid parameters!");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
BucketTreeContext *ctx = visitor->bktr_ctx;
|
||||||
BucketTreeCompressedStorageEntry cur_entry = {0};
|
BucketTreeCompressedStorageEntry cur_entry = {0};
|
||||||
|
u64 cur_entry_offset = 0, next_entry_offset = 0;
|
||||||
|
bool success = false;
|
||||||
|
|
||||||
|
/* Copy current Compressed Storage entry -- we'll move onto the next one, so we'll lose track of it. */
|
||||||
memcpy(&cur_entry, visitor->entry, sizeof(BucketTreeCompressedStorageEntry));
|
memcpy(&cur_entry, visitor->entry, sizeof(BucketTreeCompressedStorageEntry));
|
||||||
|
|
||||||
|
/* Validate Compressed Storage entry. */
|
||||||
if (!bktrIsOffsetWithinStorageRange(ctx, (u64)cur_entry.virtual_offset) || (u64)cur_entry.virtual_offset > offset || cur_entry.compression_type == BucketTreeCompressedStorageCompressionType_2 || \
|
if (!bktrIsOffsetWithinStorageRange(ctx, (u64)cur_entry.virtual_offset) || (u64)cur_entry.virtual_offset > offset || cur_entry.compression_type == BucketTreeCompressedStorageCompressionType_2 || \
|
||||||
cur_entry.compression_type > BucketTreeCompressedStorageCompressionType_LZ4 || (cur_entry.compression_type != BucketTreeCompressedStorageCompressionType_LZ4 && \
|
cur_entry.compression_type > BucketTreeCompressedStorageCompressionType_LZ4 || (cur_entry.compression_type != BucketTreeCompressedStorageCompressionType_LZ4 && \
|
||||||
cur_entry.compression_level != 0) || (cur_entry.compression_type == BucketTreeCompressedStorageCompressionType_None && cur_entry.physical_size != BKTR_COMPRESSION_INVALID_PHYS_SIZE) || \
|
cur_entry.compression_level != 0) || (cur_entry.compression_type == BucketTreeCompressedStorageCompressionType_None && cur_entry.physical_size != BKTR_COMPRESSION_INVALID_PHYS_SIZE) || \
|
||||||
|
@ -903,11 +989,10 @@ static bool bktrReadCompressedStorage(BucketTreeVisitor *visitor, void *out, u64
|
||||||
cur_entry.compression_level > BKTR_COMPRESSION_LEVEL_MAX || !IS_ALIGNED(cur_entry.physical_offset, BKTR_COMPRESSION_PHYS_ALIGNMENT))))
|
cur_entry.compression_level > BKTR_COMPRESSION_LEVEL_MAX || !IS_ALIGNED(cur_entry.physical_offset, BKTR_COMPRESSION_PHYS_ALIGNMENT))))
|
||||||
{
|
{
|
||||||
LOG_DATA_ERROR(&cur_entry, sizeof(BucketTreeCompressedStorageEntry), "Invalid Compressed Storage entry! (#1). Entry dump:");
|
LOG_DATA_ERROR(&cur_entry, sizeof(BucketTreeCompressedStorageEntry), "Invalid Compressed Storage entry! (#1). Entry dump:");
|
||||||
return false;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 cur_entry_offset = (u64)cur_entry.virtual_offset, next_entry_offset = 0;
|
cur_entry_offset = (u64)cur_entry.virtual_offset;
|
||||||
bool moved = false, success = false;
|
|
||||||
|
|
||||||
/* Check if we can retrieve the next entry. */
|
/* Check if we can retrieve the next entry. */
|
||||||
if (bktrVisitorCanMoveNext(visitor))
|
if (bktrVisitorCanMoveNext(visitor))
|
||||||
|
@ -935,9 +1020,6 @@ static bool bktrReadCompressedStorage(BucketTreeVisitor *visitor, void *out, u64
|
||||||
|
|
||||||
/* Store next entry's virtual offset. */
|
/* Store next entry's virtual offset. */
|
||||||
next_entry_offset = (u64)next_entry->virtual_offset;
|
next_entry_offset = (u64)next_entry->virtual_offset;
|
||||||
|
|
||||||
/* Update variable. */
|
|
||||||
moved = true;
|
|
||||||
} else {
|
} else {
|
||||||
/* Set the next entry offset to the storage's end. */
|
/* Set the next entry offset to the storage's end. */
|
||||||
next_entry_offset = ctx->end_offset;
|
next_entry_offset = ctx->end_offset;
|
||||||
|
@ -950,60 +1032,109 @@ static bool bktrReadCompressedStorage(BucketTreeVisitor *visitor, void *out, u64
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Perform read operation. */
|
/* Update variables. */
|
||||||
if ((offset + read_size) <= next_entry_offset)
|
memcpy(out_cur_entry, &cur_entry, sizeof(BucketTreeCompressedStorageEntry));
|
||||||
{
|
*out_next_entry_offset = next_entry_offset;
|
||||||
/* Read only within the current compressed storage entry. */
|
success = true;
|
||||||
BucketTreeSubStorageReadParams params = {0};
|
|
||||||
|
|
||||||
|
end:
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool bktrReadCompressedStorage(BucketTreeVisitor *visitor, void *out, u64 read_size, u64 offset)
|
||||||
|
{
|
||||||
|
BucketTreeContext *ctx = visitor->bktr_ctx;
|
||||||
|
NcaFsSectionContext *nca_fs_ctx = ctx->nca_fs_ctx;
|
||||||
|
u64 compressed_storage_base_offset = nca_fs_ctx->hash_region.size;
|
||||||
|
|
||||||
|
BucketTreeCompressedStorageEntry cur_entry = {0};
|
||||||
|
BucketTreeSubStorageReadParams params = {0};
|
||||||
|
u64 cur_entry_offset = 0, next_entry_offset = 0, accum = 0;
|
||||||
|
|
||||||
|
bool success = false;
|
||||||
|
|
||||||
|
if (!out || !bktrIsValidSubStorage(&(ctx->substorages[0])) || ctx->substorages[0].type == BucketTreeSubStorageType_AesCtrEx || \
|
||||||
|
ctx->substorages[0].type == BucketTreeSubStorageType_Compressed || ctx->substorages[0].type >= BucketTreeSubStorageType_Count || (offset + read_size) > ctx->end_offset)
|
||||||
|
{
|
||||||
|
LOG_MSG_ERROR("Invalid parameters!");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Perform Compressed Storage reads until we reach the requested size. */
|
||||||
|
while(accum < read_size)
|
||||||
|
{
|
||||||
|
u8 *out_ptr = ((u8*)out + accum);
|
||||||
|
const u64 compressed_block_offset = (offset + accum);
|
||||||
|
u64 compressed_block_size = 0, compressed_block_read_size = 0, compressed_block_read_offset = 0, read_size_diff = 0;
|
||||||
|
|
||||||
|
/* Get current Compressed Storage entry and the start offset for the next one. */
|
||||||
|
if (!bktrGetCompressedStorageEntryExtents(visitor, compressed_block_offset, &cur_entry, &next_entry_offset))
|
||||||
|
{
|
||||||
|
LOG_MSG_ERROR("Failed to get Compressed Storage entry extents for offset 0x%lX!", compressed_block_offset);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Calculate Compressed Storage block size. */
|
||||||
|
cur_entry_offset = cur_entry.virtual_offset;
|
||||||
|
compressed_block_size = (!accum ? (next_entry_offset - offset) : (next_entry_offset - cur_entry_offset));
|
||||||
|
|
||||||
|
/* Calculate Compressed Storage block read size. */
|
||||||
|
read_size_diff = (read_size - accum);
|
||||||
|
compressed_block_read_size = (read_size_diff > compressed_block_size ? compressed_block_size : read_size_diff);
|
||||||
|
|
||||||
|
/* Perform read operation within the current Compressed Storage entry. */
|
||||||
switch(cur_entry.compression_type)
|
switch(cur_entry.compression_type)
|
||||||
{
|
{
|
||||||
case BucketTreeCompressedStorageCompressionType_None:
|
case BucketTreeCompressedStorageCompressionType_None:
|
||||||
{
|
{
|
||||||
/* We can randomly access data that's not compressed. */
|
/* We can randomly access data that's not compressed. */
|
||||||
/* Let's just read what we need. */
|
/* Let's just read what we need. */
|
||||||
const u64 data_offset = (compressed_storage_base_offset + (offset - cur_entry_offset + (u64)cur_entry.physical_offset));
|
compressed_block_read_offset = (compressed_storage_base_offset + (compressed_block_offset - cur_entry_offset + (u64)cur_entry.physical_offset));
|
||||||
bktrInitializeSubStorageReadParams(¶ms, out, data_offset, read_size, 0, 0, false, ctx->storage_type);
|
bktrInitializeSubStorageReadParams(¶ms, out_ptr, compressed_block_read_offset, compressed_block_read_size, 0, 0, false, ctx->storage_type);
|
||||||
|
|
||||||
success = bktrReadSubStorage(&(ctx->substorages[0]), ¶ms);
|
if (!bktrReadSubStorage(&(ctx->substorages[0]), ¶ms))
|
||||||
if (!success) LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk from offset 0x%lX in non-compressed entry!", read_size, data_offset);
|
{
|
||||||
|
LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk from offset 0x%lX in non-compressed entry!", compressed_block_read_size, compressed_block_read_offset);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case BucketTreeCompressedStorageCompressionType_Zero:
|
case BucketTreeCompressedStorageCompressionType_Zero:
|
||||||
{
|
{
|
||||||
/* Fill output buffer with zeroes. */
|
/* Fill output buffer with zeroes. */
|
||||||
memset(out, 0, read_size);
|
memset(out_ptr, 0, compressed_block_read_size);
|
||||||
success = true;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case BucketTreeCompressedStorageCompressionType_LZ4:
|
case BucketTreeCompressedStorageCompressionType_LZ4:
|
||||||
{
|
{
|
||||||
/* We can't randomly access data that's compressed. */
|
/* We can't randomly access data that's compressed. */
|
||||||
/* Let's be lazy and allocate memory for the full entry, read it and then decompress it. */
|
/* Let's be lazy and allocate memory for the full entry, read it and then decompress it. */
|
||||||
const u64 data_offset = (compressed_storage_base_offset + (u64)cur_entry.physical_offset);
|
compressed_block_read_offset = (compressed_storage_base_offset + (u64)cur_entry.physical_offset);
|
||||||
|
|
||||||
const u64 compressed_data_size = (u64)cur_entry.physical_size;
|
const u64 compressed_data_size = (u64)cur_entry.physical_size;
|
||||||
const u64 decompressed_data_size = (next_entry_offset - cur_entry_offset);
|
const u64 decompressed_data_size = (next_entry_offset - cur_entry_offset);
|
||||||
const u64 buffer_size = LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressed_data_size);
|
const u64 buffer_size = LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressed_data_size);
|
||||||
|
|
||||||
u8 *buffer = NULL, *read_ptr = NULL;
|
u8 *buffer = NULL, *read_ptr = NULL;
|
||||||
|
|
||||||
buffer = calloc(1, buffer_size);
|
buffer = calloc(1, buffer_size);
|
||||||
if (!buffer)
|
if (!buffer)
|
||||||
{
|
{
|
||||||
LOG_MSG_ERROR("Failed to allocate 0x%lX-byte long buffer for data decompression! (0x%lX).", buffer_size, decompressed_data_size);
|
LOG_MSG_ERROR("Failed to allocate 0x%lX-byte long buffer for data decompression! (0x%lX).", buffer_size, decompressed_data_size);
|
||||||
break;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust read pointer. This will let us use the same buffer for storing read data and decompressing it. */
|
/* Adjust read pointer. This will let us use the same buffer for storing read data and decompressing it. */
|
||||||
read_ptr = (buffer + (buffer_size - compressed_data_size));
|
read_ptr = (buffer + (buffer_size - compressed_data_size));
|
||||||
bktrInitializeSubStorageReadParams(¶ms, read_ptr, data_offset, compressed_data_size, 0, 0, false, ctx->storage_type);
|
bktrInitializeSubStorageReadParams(¶ms, read_ptr, compressed_block_read_offset, compressed_data_size, 0, 0, false, ctx->storage_type);
|
||||||
|
|
||||||
/* Read compressed LZ4 block. */
|
/* Read compressed LZ4 block. */
|
||||||
if (!bktrReadSubStorage(&(ctx->substorages[0]), ¶ms))
|
if (!bktrReadSubStorage(&(ctx->substorages[0]), ¶ms))
|
||||||
{
|
{
|
||||||
LOG_MSG_ERROR("Failed to read 0x%lX-byte long compressed block from offset 0x%lX!", compressed_data_size, data_offset);
|
LOG_MSG_ERROR("Failed to read 0x%lX-byte long compressed block from offset 0x%lX!", compressed_data_size, compressed_block_read_offset);
|
||||||
free(buffer);
|
free(buffer);
|
||||||
break;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Decompress LZ4 block. */
|
/* Decompress LZ4 block. */
|
||||||
|
@ -1012,33 +1143,28 @@ static bool bktrReadCompressedStorage(BucketTreeVisitor *visitor, void *out, u64
|
||||||
{
|
{
|
||||||
LOG_MSG_ERROR("Failed to decompress 0x%lX-byte long compressed block! (%d).", compressed_data_size, lz4_res);
|
LOG_MSG_ERROR("Failed to decompress 0x%lX-byte long compressed block! (%d).", compressed_data_size, lz4_res);
|
||||||
free(buffer);
|
free(buffer);
|
||||||
break;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy the data we need. */
|
/* Copy the data we need. */
|
||||||
memcpy(out, buffer + (offset - cur_entry_offset), read_size);
|
memcpy(out_ptr, buffer + (compressed_block_offset - cur_entry_offset), compressed_block_read_size);
|
||||||
|
|
||||||
/* Free allocated buffer and update return value. */
|
/* Free allocated buffer. */
|
||||||
free(buffer);
|
free(buffer);
|
||||||
success = true;
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
/* Handle reads that span multiple compressed storage entries. */
|
|
||||||
if (moved) bktrVisitorMovePrevious(visitor);
|
|
||||||
|
|
||||||
const u64 compressed_block_size = (next_entry_offset - offset);
|
/* Update accumulator. */
|
||||||
|
accum += compressed_block_read_size;
|
||||||
success = (bktrReadCompressedStorage(visitor, out, compressed_block_size, offset) && \
|
|
||||||
bktrReadCompressedStorage(visitor, (u8*)out + compressed_block_size, read_size - compressed_block_size, offset + compressed_block_size));
|
|
||||||
|
|
||||||
if (!success) LOG_MSG_ERROR("Failed to read 0x%lX bytes block from multiple Compressed Storage entries at offset 0x%lX!", read_size, offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Update flag. */
|
||||||
|
success = true;
|
||||||
|
|
||||||
end:
|
end:
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
@ -1544,11 +1670,6 @@ NX_INLINE bool bktrVisitorCanMoveNext(BucketTreeVisitor *visitor)
|
||||||
return (bktrVisitorIsValid(visitor) && ((visitor->entry_index + 1) < visitor->entry_set.header.count || (visitor->entry_set.header.index + 1) < visitor->bktr_ctx->entry_set_count));
|
return (bktrVisitorIsValid(visitor) && ((visitor->entry_index + 1) < visitor->entry_set.header.count || (visitor->entry_set.header.index + 1) < visitor->bktr_ctx->entry_set_count));
|
||||||
}
|
}
|
||||||
|
|
||||||
NX_INLINE bool bktrVisitorCanMovePrevious(BucketTreeVisitor *visitor)
|
|
||||||
{
|
|
||||||
return (bktrVisitorIsValid(visitor) && (visitor->entry_index > 0 || visitor->entry_set.header.index > 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool bktrVisitorMoveNext(BucketTreeVisitor *visitor)
|
static bool bktrVisitorMoveNext(BucketTreeVisitor *visitor)
|
||||||
{
|
{
|
||||||
if (!bktrVisitorIsValid(visitor))
|
if (!bktrVisitorIsValid(visitor))
|
||||||
|
@ -1620,77 +1741,3 @@ static bool bktrVisitorMoveNext(BucketTreeVisitor *visitor)
|
||||||
end:
|
end:
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bktrVisitorMovePrevious(BucketTreeVisitor *visitor)
|
|
||||||
{
|
|
||||||
if (!bktrVisitorIsValid(visitor))
|
|
||||||
{
|
|
||||||
LOG_MSG_ERROR("Invalid parameters!");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
BucketTreeContext *ctx = visitor->bktr_ctx;
|
|
||||||
BucketTreeEntrySetHeader *entry_set = &(visitor->entry_set);
|
|
||||||
u32 entry_index = visitor->entry_index;
|
|
||||||
bool success = false;
|
|
||||||
|
|
||||||
/* Invalidate index. */
|
|
||||||
visitor->entry_index = UINT32_MAX;
|
|
||||||
|
|
||||||
if (entry_index == 0)
|
|
||||||
{
|
|
||||||
/* We have reached the start of this entry node. Let's try to retrieve the last entry from the previous one. */
|
|
||||||
if (!entry_set->header.index)
|
|
||||||
{
|
|
||||||
LOG_MSG_ERROR("Error: attempting to move visitor into non-existing Bucket Tree Entry Node!");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Read previous entry set header. */
|
|
||||||
const u64 start_offset = entry_set->start;
|
|
||||||
const u64 entry_set_size = ctx->node_size;
|
|
||||||
const u32 entry_set_index = (entry_set->header.index - 1);
|
|
||||||
const u64 entry_set_offset = (ctx->node_storage_size + (entry_set_index * entry_set_size));
|
|
||||||
|
|
||||||
if ((entry_set_offset + sizeof(BucketTreeEntrySetHeader)) > (ctx->node_storage_size + ctx->entry_storage_size))
|
|
||||||
{
|
|
||||||
LOG_MSG_ERROR("Invalid Bucket Tree Entry Node offset!");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(entry_set, (u8*)ctx->storage_table + entry_set_offset, sizeof(BucketTreeEntrySetHeader));
|
|
||||||
|
|
||||||
/* Validate next entry set header. */
|
|
||||||
if (!bktrVerifyNodeHeader(&(entry_set->header), entry_set_index, entry_set_size, ctx->entry_size) || entry_set->header.offset != start_offset || \
|
|
||||||
entry_set->start >= entry_set->header.offset)
|
|
||||||
{
|
|
||||||
LOG_MSG_ERROR("Bucket Tree Entry Node header verification failed!");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update entry index. */
|
|
||||||
entry_index = entry_set->header.count;
|
|
||||||
}
|
|
||||||
|
|
||||||
entry_index--;
|
|
||||||
|
|
||||||
/* Get the new entry. */
|
|
||||||
const u64 entry_size = ctx->entry_size;
|
|
||||||
const u64 entry_offset = (ctx->node_storage_size + bktrGetEntryNodeEntryOffsetByIndex(entry_set->header.index, ctx->node_size, entry_size, entry_index));
|
|
||||||
|
|
||||||
if ((entry_offset + entry_size) > (ctx->node_storage_size + ctx->entry_storage_size))
|
|
||||||
{
|
|
||||||
LOG_MSG_ERROR("Invalid Bucket Tree Entry Node entry offset!");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update visitor. */
|
|
||||||
visitor->entry_index = entry_index;
|
|
||||||
visitor->entry = ((u8*)ctx->storage_table + entry_offset);
|
|
||||||
|
|
||||||
/* Update return value. */
|
|
||||||
success = true;
|
|
||||||
|
|
||||||
end:
|
|
||||||
return success;
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in a new issue