1
0
Fork 0
mirror of https://github.com/Atmosphere-NX/Atmosphere.git synced 2024-12-23 02:42:09 +00:00

strat: sha256 s/linux/libnx/g

This commit is contained in:
Michael Scire 2019-04-04 12:01:37 -07:00
parent f44a730a90
commit a0f3183c79
9 changed files with 3 additions and 642 deletions

View file

@ -1,113 +0,0 @@
/* Based on linux source code */
/*
* sha256_base.h - core logic for SHA-256 implementations
*
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <string.h>
#include "sha256.h"
#define unlikely(x) __builtin_expect(!!(x), 0)
void sha256_block_data_order (uint32_t *ctx, const void *in, size_t num);
int sha256_init(struct sha256_state *sctx)
{
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
sctx->state[2] = SHA256_H2;
sctx->state[3] = SHA256_H3;
sctx->state[4] = SHA256_H4;
sctx->state[5] = SHA256_H5;
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
sctx->count = 0;
return 0;
}
int sha256_update(struct sha256_state *sctx,
const void *data,
size_t len)
{
const u8 *data8 = (const u8 *)data;
unsigned int len32 = (unsigned int)len;
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->count += len32;
if (unlikely((partial + len32) >= SHA256_BLOCK_SIZE)) {
int blocks;
if (partial) {
int p = SHA256_BLOCK_SIZE - partial;
memcpy(sctx->buf + partial, data8, p);
data8 += p;
len32 -= p;
sha256_block_data_order(sctx->state, sctx->buf, 1);
}
blocks = len32 / SHA256_BLOCK_SIZE;
len32 %= SHA256_BLOCK_SIZE;
if (blocks) {
sha256_block_data_order(sctx->state, data8, blocks);
data8 += blocks * SHA256_BLOCK_SIZE;
}
partial = 0;
}
if (len32)
memcpy(sctx->buf + partial, data8, len32);
return 0;
}
int sha256_finalize(struct sha256_state *sctx)
{
const int bit_offset = SHA256_BLOCK_SIZE - sizeof(u64);
u64 *bits = (u64 *)(sctx->buf + bit_offset);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->buf[partial++] = 0x80;
if (partial > bit_offset) {
memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
partial = 0;
sha256_block_data_order(sctx->state, sctx->buf, 1);
}
memset(sctx->buf + partial, 0x0, bit_offset - partial);
*bits = __builtin_bswap64(sctx->count << 3);
sha256_block_data_order(sctx->state, sctx->buf, 1);
return 0;
}
int sha256_finish(struct sha256_state *sctx, void *out)
{
unsigned int digest_size = 32;
u32 *digest = (u32 *)out;
int i;
// Switch: misalignment shouldn't be a problem here...
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(u32))
*digest++ = __builtin_bswap32(sctx->state[i]);
*sctx = (struct sha256_state){};
return 0;
}
#ifdef __cplusplus
}
#endif

View file

@ -1,36 +0,0 @@
#pragma once
/* Based on linux source code */
#ifdef __cplusplus
extern "C" {
#endif
#include <switch/types.h>
#define SHA256_DIGEST_SIZE 32
#define SHA256_BLOCK_SIZE 64
#define SHA256_H0 0x6a09e667UL
#define SHA256_H1 0xbb67ae85UL
#define SHA256_H2 0x3c6ef372UL
#define SHA256_H3 0xa54ff53aUL
#define SHA256_H4 0x510e527fUL
#define SHA256_H5 0x9b05688cUL
#define SHA256_H6 0x1f83d9abUL
#define SHA256_H7 0x5be0cd19UL
struct sha256_state {
u32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
u8 buf[SHA256_BLOCK_SIZE];
};
int sha256_init(struct sha256_state *sctx);
int sha256_update(struct sha256_state *sctx, const void *data, size_t len);
int sha256_finalize(struct sha256_state *sctx);
int sha256_finish(struct sha256_state *sctx, void *out);
#ifdef __cplusplus
}
#endif

View file

@ -1,163 +0,0 @@
.section .text.sha256_armv8, "ax", %progbits
.align 5
.arch armv8-a+crypto
# SHA256 assembly implementation for ARMv8 AArch64 (based on linux source code)
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
.Lsha256prolog:
stp x29, x30, [sp,#-64]!
mov x29, sp
adr x3, .LKConstant256
str q8, [sp, #16]
ld1 {v16.4s-v19.4s}, [x3], #64
ld1 {v0.4s}, [x0], #16
ld1 {v20.4s-v23.4s}, [x3], #64
add x2, x1, x2, lsl #6
ld1 {v1.4s}, [x0]
ld1 {v24.4s-v27.4s}, [x3], #64
sub x0, x0, #16
str q9, [sp, #32]
str q10, [sp, #48]
ld1 {v28.4s-v31.4s}, [x3], #64
.Lsha256loop:
ld1 {v5.16b-v8.16b}, [x1], #64
mov v2.16b, v0.16b
mov v3.16b, v1.16b
rev32 v5.16b, v5.16b
rev32 v6.16b, v6.16b
add v9.4s, v5.4s, v16.4s
rev32 v7.16b, v7.16b
add v10.4s, v6.4s, v17.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
rev32 v8.16b, v8.16b
add v9.4s, v7.4s, v18.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
add v10.4s, v8.4s, v19.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v7.4s, v8.4s
sha256su1 v6.4s, v8.4s, v5.4s
add v9.4s, v5.4s, v20.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v8.4s, v5.4s
sha256su1 v7.4s, v5.4s, v6.4s
add v10.4s, v6.4s, v21.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
sha256su1 v8.4s, v6.4s, v7.4s
add v9.4s, v7.4s, v22.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
add v10.4s, v8.4s, v23.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v7.4s, v8.4s
sha256su1 v6.4s, v8.4s, v5.4s
add v9.4s, v5.4s, v24.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v8.4s, v5.4s
sha256su1 v7.4s, v5.4s, v6.4s
add v10.4s, v6.4s, v25.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
sha256su1 v8.4s, v6.4s, v7.4s
add v9.4s, v7.4s, v26.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
add v10.4s, v8.4s, v27.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v7.4s, v8.4s
sha256su1 v6.4s, v8.4s, v5.4s
add v9.4s, v5.4s, v28.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v8.4s, v5.4s
sha256su1 v7.4s, v5.4s, v6.4s
add v10.4s, v6.4s, v29.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su1 v8.4s, v6.4s, v7.4s
add v9.4s, v7.4s, v30.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
add v10.4s, v8.4s, v31.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
cmp x1, x2
add v1.4s, v1.4s, v3.4s
add v0.4s, v0.4s, v2.4s
b.ne .Lsha256loop
.Lsha256epilog:
st1 {v0.4s,v1.4s}, [x0]
ldr q10, [sp, #48]
ldr q9, [sp, #32]
ldr q8, [sp, #16]
ldr x29, [sp], #64
ret
.align 5
.LKConstant256:
.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.size sha256_block_data_order,.-sha256_block_data_order
.align 2

View file

@ -23,7 +23,6 @@
#include "debug.hpp" #include "debug.hpp"
#include "utils.hpp" #include "utils.hpp"
#include "ini.h" #include "ini.h"
#include "sha256.h"
#include "set_mitm/setsys_settings_items.hpp" #include "set_mitm/setsys_settings_items.hpp"
@ -124,12 +123,8 @@ void Utils::InitializeThreadFunc(void *args) {
u32 cal0_size = ((u32 *)g_cal0_backup)[2]; u32 cal0_size = ((u32 *)g_cal0_backup)[2];
is_cal0_valid &= cal0_size + 0x40 <= ProdinfoSize; is_cal0_valid &= cal0_size + 0x40 <= ProdinfoSize;
if (is_cal0_valid) { if (is_cal0_valid) {
struct sha256_state sha_ctx;
u8 calc_hash[0x20]; u8 calc_hash[0x20];
sha256_init(&sha_ctx); sha256CalculateHash(calc_hash, g_cal0_backup + 0x40, cal0_size);
sha256_update(&sha_ctx, g_cal0_backup + 0x40, cal0_size);
sha256_finalize(&sha_ctx);
sha256_finish(&sha_ctx, calc_hash);
is_cal0_valid &= memcmp(calc_hash, g_cal0_backup + 0x20, sizeof(calc_hash)) == 0; is_cal0_valid &= memcmp(calc_hash, g_cal0_backup + 0x20, sizeof(calc_hash)) == 0;
} }
has_auto_backup = is_cal0_valid; has_auto_backup = is_cal0_valid;

View file

@ -20,7 +20,6 @@
#include <cstdio> #include <cstdio>
#include <functional> #include <functional>
#include <cstring> #include <cstring>
#include "sha256.h"
#include "ldr_nro.hpp" #include "ldr_nro.hpp"
#include "ldr_registration.hpp" #include "ldr_registration.hpp"
#include "ldr_map.hpp" #include "ldr_map.hpp"
@ -53,7 +52,6 @@ Result NroUtils::LoadNro(Registration::Process *target_proc, Handle process_h, u
unsigned int i; unsigned int i;
Result rc = ResultSuccess; Result rc = ResultSuccess;
u8 nro_hash[0x20]; u8 nro_hash[0x20];
struct sha256_state sha_ctx;
/* Perform cleanup on failure. */ /* Perform cleanup on failure. */
ON_SCOPE_EXIT { ON_SCOPE_EXIT {
@ -108,10 +106,7 @@ Result NroUtils::LoadNro(Registration::Process *target_proc, Handle process_h, u
return rc; return rc;
} }
sha256_init(&sha_ctx); sha256CalculateHash(nro_hash, mcm_nro.mapped_address, nro_hdr.nro_size);
sha256_update(&sha_ctx, (u8 *)mcm_nro.mapped_address, nro_hdr.nro_size);
sha256_finalize(&sha_ctx);
sha256_finish(&sha_ctx, nro_hash);
} }
/* Unmap the NRO. */ /* Unmap the NRO. */

View file

@ -18,7 +18,6 @@
#include <algorithm> #include <algorithm>
#include <cstdio> #include <cstdio>
#include <cstring> #include <cstring>
#include "sha256.h"
#include "lz4.h" #include "lz4.h"
#include "ldr_nso.hpp" #include "ldr_nso.hpp"
#include "ldr_map.hpp" #include "ldr_map.hpp"
@ -275,11 +274,7 @@ Result NsoUtils::LoadNsoSegment(u64 title_id, unsigned int index, unsigned int s
if (check_hash) { if (check_hash) {
u8 hash[0x20] = {0}; u8 hash[0x20] = {0};
struct sha256_state sha_ctx; sha256CalculateHash(hash, dst_addr, out_size);
sha256_init(&sha_ctx);
sha256_update(&sha_ctx, dst_addr, out_size);
sha256_finalize(&sha_ctx);
sha256_finish(&sha_ctx, hash);
if (std::memcmp(g_nso_headers[index].section_hashes[segment], hash, sizeof(hash))) { if (std::memcmp(g_nso_headers[index].section_hashes[segment], hash, sizeof(hash))) {
return ResultLoaderInvalidNso; return ResultLoaderInvalidNso;

View file

@ -1,113 +0,0 @@
/* Based on linux source code */
/*
* sha256_base.h - core logic for SHA-256 implementations
*
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <string.h>
#include "sha256.h"
#define unlikely(x) __builtin_expect(!!(x), 0)
void sha256_block_data_order (uint32_t *ctx, const void *in, size_t num);
int sha256_init(struct sha256_state *sctx)
{
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
sctx->state[2] = SHA256_H2;
sctx->state[3] = SHA256_H3;
sctx->state[4] = SHA256_H4;
sctx->state[5] = SHA256_H5;
sctx->state[6] = SHA256_H6;
sctx->state[7] = SHA256_H7;
sctx->count = 0;
return 0;
}
int sha256_update(struct sha256_state *sctx,
const void *data,
size_t len)
{
const u8 *data8 = (const u8 *)data;
unsigned int len32 = (unsigned int)len;
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->count += len32;
if (unlikely((partial + len32) >= SHA256_BLOCK_SIZE)) {
int blocks;
if (partial) {
int p = SHA256_BLOCK_SIZE - partial;
memcpy(sctx->buf + partial, data8, p);
data8 += p;
len32 -= p;
sha256_block_data_order(sctx->state, sctx->buf, 1);
}
blocks = len32 / SHA256_BLOCK_SIZE;
len32 %= SHA256_BLOCK_SIZE;
if (blocks) {
sha256_block_data_order(sctx->state, data8, blocks);
data8 += blocks * SHA256_BLOCK_SIZE;
}
partial = 0;
}
if (len32)
memcpy(sctx->buf + partial, data8, len32);
return 0;
}
int sha256_finalize(struct sha256_state *sctx)
{
const int bit_offset = SHA256_BLOCK_SIZE - sizeof(u64);
u64 *bits = (u64 *)(sctx->buf + bit_offset);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->buf[partial++] = 0x80;
if (partial > bit_offset) {
memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
partial = 0;
sha256_block_data_order(sctx->state, sctx->buf, 1);
}
memset(sctx->buf + partial, 0x0, bit_offset - partial);
*bits = __builtin_bswap64(sctx->count << 3);
sha256_block_data_order(sctx->state, sctx->buf, 1);
return 0;
}
int sha256_finish(struct sha256_state *sctx, void *out)
{
unsigned int digest_size = 32;
u32 *digest = (u32 *)out;
int i;
// Switch: misalignment shouldn't be a problem here...
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(u32))
*digest++ = __builtin_bswap32(sctx->state[i]);
*sctx = (struct sha256_state){};
return 0;
}
#ifdef __cplusplus
}
#endif

View file

@ -1,36 +0,0 @@
#pragma once
/* Based on linux source code */
#ifdef __cplusplus
extern "C" {
#endif
#include <switch/types.h>
#define SHA256_DIGEST_SIZE 32
#define SHA256_BLOCK_SIZE 64
#define SHA256_H0 0x6a09e667UL
#define SHA256_H1 0xbb67ae85UL
#define SHA256_H2 0x3c6ef372UL
#define SHA256_H3 0xa54ff53aUL
#define SHA256_H4 0x510e527fUL
#define SHA256_H5 0x9b05688cUL
#define SHA256_H6 0x1f83d9abUL
#define SHA256_H7 0x5be0cd19UL
struct sha256_state {
u32 state[SHA256_DIGEST_SIZE / 4];
u64 count;
u8 buf[SHA256_BLOCK_SIZE];
};
int sha256_init(struct sha256_state *sctx);
int sha256_update(struct sha256_state *sctx, const void *data, size_t len);
int sha256_finalize(struct sha256_state *sctx);
int sha256_finish(struct sha256_state *sctx, void *out);
#ifdef __cplusplus
}
#endif

View file

@ -1,163 +0,0 @@
.section .text.sha256_armv8, "ax", %progbits
.align 5
.arch armv8-a+crypto
# SHA256 assembly implementation for ARMv8 AArch64 (based on linux source code)
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
.Lsha256prolog:
stp x29, x30, [sp,#-64]!
mov x29, sp
adr x3, .LKConstant256
str q8, [sp, #16]
ld1 {v16.4s-v19.4s}, [x3], #64
ld1 {v0.4s}, [x0], #16
ld1 {v20.4s-v23.4s}, [x3], #64
add x2, x1, x2, lsl #6
ld1 {v1.4s}, [x0]
ld1 {v24.4s-v27.4s}, [x3], #64
sub x0, x0, #16
str q9, [sp, #32]
str q10, [sp, #48]
ld1 {v28.4s-v31.4s}, [x3], #64
.Lsha256loop:
ld1 {v5.16b-v8.16b}, [x1], #64
mov v2.16b, v0.16b
mov v3.16b, v1.16b
rev32 v5.16b, v5.16b
rev32 v6.16b, v6.16b
add v9.4s, v5.4s, v16.4s
rev32 v7.16b, v7.16b
add v10.4s, v6.4s, v17.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
rev32 v8.16b, v8.16b
add v9.4s, v7.4s, v18.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
add v10.4s, v8.4s, v19.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v7.4s, v8.4s
sha256su1 v6.4s, v8.4s, v5.4s
add v9.4s, v5.4s, v20.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v8.4s, v5.4s
sha256su1 v7.4s, v5.4s, v6.4s
add v10.4s, v6.4s, v21.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
sha256su1 v8.4s, v6.4s, v7.4s
add v9.4s, v7.4s, v22.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
add v10.4s, v8.4s, v23.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v7.4s, v8.4s
sha256su1 v6.4s, v8.4s, v5.4s
add v9.4s, v5.4s, v24.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v8.4s, v5.4s
sha256su1 v7.4s, v5.4s, v6.4s
add v10.4s, v6.4s, v25.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
sha256su1 v8.4s, v6.4s, v7.4s
add v9.4s, v7.4s, v26.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
add v10.4s, v8.4s, v27.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v7.4s, v8.4s
sha256su1 v6.4s, v8.4s, v5.4s
add v9.4s, v5.4s, v28.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v8.4s, v5.4s
sha256su1 v7.4s, v5.4s, v6.4s
add v10.4s, v6.4s, v29.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su1 v8.4s, v6.4s, v7.4s
add v9.4s, v7.4s, v30.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
add v10.4s, v8.4s, v31.4s
mov v4.16b, v2.16b
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
cmp x1, x2
add v1.4s, v1.4s, v3.4s
add v0.4s, v0.4s, v2.4s
b.ne .Lsha256loop
.Lsha256epilog:
st1 {v0.4s,v1.4s}, [x0]
ldr q10, [sp, #48]
ldr q9, [sp, #32]
ldr q8, [sp, #16]
ldr x29, [sp], #64
ret
.align 5
.LKConstant256:
.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.size sha256_block_data_order,.-sha256_block_data_order
.align 2