Added --with-armv8-crypto to configure.ac

Added ARMv8 SHA1 and SHA256 intrinsics
This commit is contained in:
Ed Tubbs 2021-10-14 00:17:32 -05:00
parent 84c5b57e48
commit bffc7a8cb4
3 changed files with 389 additions and 2 deletions

View file

@ -183,6 +183,12 @@ AC_ARG_WITH([intel-avx2],
[intel_avx2=$withval],
[intel_avx2=no])
AC_ARG_WITH([armv8-crypto],
[AS_HELP_STRING([--with-armv8-crypto],
[Build with armv8 crypto (default is no)])],
[armv8_crypto=$withval],
[armv8_crypto=no])
AC_ARG_WITH([protoc-bindir],[AS_HELP_STRING([--with-protoc-bindir=BIN_DIR],[specify protoc bin path])], [protoc_bin_path=$withval], [])
AC_ARG_ENABLE(man,
@ -803,6 +809,13 @@ if test x$intel_avx2 = xyes; then
esac
fi
if test x$armv8_crypto = xyes; then
AC_MSG_CHECKING([whether to build with armv8 crypto])
AC_MSG_RESULT(yes)
AC_DEFINE(USE_ARMV8, 1, [Define this symbol if armv8 crypto works])
CXXFLAGS="$CXXFLAGS -march=armv8-a+crypto"
fi
if test x$use_pkgconfig = xyes; then
: dnl
m4_ifdef(

View file

@ -14,6 +14,21 @@
#include <intel-ipsec-mb.h>
#endif
#if defined(__arm__) || defined(__aarch32__) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM)
# if defined(__GNUC__)
# include <stdint.h>
# endif
# if defined(__ARM_NEON)|| defined(_MSC_VER) || defined(__GNUC__)
# include <arm_neon.h>
# endif
/* GCC and LLVM Clang, but not Apple Clang */
# if defined(__GNUC__) && !defined(__apple_build_version__)
# if defined(__ARM_ACLE) || defined(__ARM_FEATURE_CRYPTO)
# include <arm_acle.h>
# endif
# endif
#endif /* ARM Headers */
// Internal implementation code.
namespace
{
@ -54,7 +69,176 @@ const uint32_t k4 = 0xCA62C1D6ul;
/** Perform a SHA-1 transformation, processing a 64-byte chunk. */
void Transform(uint32_t* s, const unsigned char* chunk)
{
#ifdef USE_AVX2
#ifdef USE_ARMV8
uint32x4_t ABCD, ABCD_SAVED;
uint32x4_t TMP0, TMP1;
uint32x4_t MSG0, MSG1, MSG2, MSG3;
uint32_t E0, E0_SAVED, E1;
/* Load state */
ABCD = vld1q_u32(&s[0]);
E0 = s[4];
/* Save state */
ABCD_SAVED = ABCD;
E0_SAVED = E0;
/* Load message */
MSG0 = vld1q_u32((const uint32_t*)(chunk));
MSG1 = vld1q_u32((const uint32_t*)(chunk + 16));
MSG2 = vld1q_u32((const uint32_t*)(chunk + 32));
MSG3 = vld1q_u32((const uint32_t*)(chunk + 48));
/* Reverse for little endian */
MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x5A827999));
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x5A827999));
/* Rounds 0-3 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x5A827999));
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 4-7 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x5A827999));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
/* Rounds 8-11 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x5A827999));
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
/* Rounds 12-15 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x6ED9EBA1));
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
/* Rounds 16-19 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x6ED9EBA1));
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 20-23 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x6ED9EBA1));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
/* Rounds 24-27 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x6ED9EBA1));
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
/* Rounds 28-31 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x6ED9EBA1));
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
/* Rounds 32-35 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x8F1BBCDC));
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 36-39 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0x8F1BBCDC));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
/* Rounds 40-43 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0x8F1BBCDC));
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
/* Rounds 44-47 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0x8F1BBCDC));
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
/* Rounds 48-51 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0x8F1BBCDC));
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 52-55 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0xCA62C1D6));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
/* Rounds 56-59 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, vdupq_n_u32(0xCA62C1D6));
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
/* Rounds 60-63 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, vdupq_n_u32(0xCA62C1D6));
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
/* Rounds 64-67 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, vdupq_n_u32(0xCA62C1D6));
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
/* Rounds 68-71 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, vdupq_n_u32(0xCA62C1D6));
MSG0 = vsha1su1q_u32(MSG0, MSG3);
/* Rounds 72-75 */
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
/* Rounds 76-79 */
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
/* Combine state */
E0 += E0_SAVED;
ABCD = vaddq_u32(ABCD_SAVED, ABCD);
/* Save state */
vst1q_u32(&s[0], ABCD);
s[4] = E0;
#elif USE_AVX2
// Perform SHA1 one block (Intel AVX2)
sha1_one_block_avx2(chunk, s);
#else

View file

@ -14,6 +14,41 @@
#include <intel-ipsec-mb.h>
#endif
#if defined(__arm__) || defined(__aarch32__) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM)
# if defined(__GNUC__)
# include <stdint.h>
# endif
# if defined(__ARM_NEON) || defined(_MSC_VER) || defined(__GNUC__)
# include <arm_neon.h>
# endif
/* GCC and LLVM Clang, but not Apple Clang */
# if defined(__GNUC__) && !defined(__apple_build_version__)
# if defined(__ARM_ACLE) || defined(__ARM_FEATURE_CRYPTO)
# include <arm_acle.h>
# endif
# endif
#endif /* ARM Headers */
static const uint32_t K[] =
{
0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5,
0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5,
0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3,
0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174,
0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC,
0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA,
0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7,
0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967,
0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13,
0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3,
0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070,
0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5,
0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3,
0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208,
0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2,
};
// Internal implementation code.
namespace
{
@ -54,7 +89,162 @@ void inline Initialize(uint32_t* s)
/** Perform one SHA-256 transformation, processing a 64-byte chunk. */
void Transform(uint32_t* s, const unsigned char* chunk)
{
#ifdef USE_AVX2
#ifdef USE_ARMV8
uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE;
uint32x4_t MSG0, MSG1, MSG2, MSG3;
uint32x4_t TMP0, TMP1, TMP2;
/* Load state */
STATE0 = vld1q_u32(&s[0]);
STATE1 = vld1q_u32(&s[4]);
/* Save state */
ABEF_SAVE = STATE0;
CDGH_SAVE = STATE1;
/* Load message */
MSG0 = vld1q_u32((const uint32_t *)(chunk + 0));
MSG1 = vld1q_u32((const uint32_t *)(chunk + 16));
MSG2 = vld1q_u32((const uint32_t *)(chunk + 32));
MSG3 = vld1q_u32((const uint32_t *)(chunk + 48));
/* Reverse for little endian */
MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0x00]));
/* Rounds 0-3 */
MSG0 = vsha256su0q_u32(MSG0, MSG1);
TMP2 = STATE0;
TMP1 = vaddq_u32(MSG1, vld1q_u32(&K[0x04]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
/* Rounds 4-7 */
MSG1 = vsha256su0q_u32(MSG1, MSG2);
TMP2 = STATE0;
TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[0x08]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
/* Rounds 8-11 */
MSG2 = vsha256su0q_u32(MSG2, MSG3);
TMP2 = STATE0;
TMP1 = vaddq_u32(MSG3, vld1q_u32(&K[0x0c]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
/* Rounds 12-15 */
MSG3 = vsha256su0q_u32(MSG3, MSG0);
TMP2 = STATE0;
TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0x10]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
/* Rounds 16-19 */
MSG0 = vsha256su0q_u32(MSG0, MSG1);
TMP2 = STATE0;
TMP1 = vaddq_u32(MSG1, vld1q_u32(&K[0x14]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
/* Rounds 20-23 */
MSG1 = vsha256su0q_u32(MSG1, MSG2);
TMP2 = STATE0;
TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[0x18]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
/* Rounds 24-27 */
MSG2 = vsha256su0q_u32(MSG2, MSG3);
TMP2 = STATE0;
TMP1 = vaddq_u32(MSG3, vld1q_u32(&K[0x1c]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
/* Rounds 28-31 */
MSG3 = vsha256su0q_u32(MSG3, MSG0);
TMP2 = STATE0;
TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0x20]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
/* Rounds 32-35 */
MSG0 = vsha256su0q_u32(MSG0, MSG1);
TMP2 = STATE0;
TMP1 = vaddq_u32(MSG1, vld1q_u32(&K[0x24]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);
/* Rounds 36-39 */
MSG1 = vsha256su0q_u32(MSG1, MSG2);
TMP2 = STATE0;
TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[0x28]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);
/* Rounds 40-43 */
MSG2 = vsha256su0q_u32(MSG2, MSG3);
TMP2 = STATE0;
TMP1 = vaddq_u32(MSG3, vld1q_u32(&K[0x2c]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);
/* Rounds 44-47 */
MSG3 = vsha256su0q_u32(MSG3, MSG0);
TMP2 = STATE0;
TMP0 = vaddq_u32(MSG0, vld1q_u32(&K[0x30]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);
/* Rounds 48-51 */
TMP2 = STATE0;
TMP1 = vaddq_u32(MSG1, vld1q_u32(&K[0x34]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
/* Rounds 52-55 */
TMP2 = STATE0;
TMP0 = vaddq_u32(MSG2, vld1q_u32(&K[0x38]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
/* Rounds 56-59 */
TMP2 = STATE0;
TMP1 = vaddq_u32(MSG3, vld1q_u32(&K[0x3c]));
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
/* Rounds 60-63 */
TMP2 = STATE0;
STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
/* Combine state */
STATE0 = vaddq_u32(STATE0, ABEF_SAVE);
STATE1 = vaddq_u32(STATE1, CDGH_SAVE);
/* Save state */
vst1q_u32(&s[0], STATE0);
vst1q_u32(&s[4], STATE1);
#elif USE_AVX2
// Perform SHA256 one block (Intel AVX2)
sha256_one_block_avx2(chunk, s);
#else