diff --git a/.cirrus.yml b/.cirrus.yml index 506a860336..c53b221c20 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -16,6 +16,7 @@ env: BENCH: yes ITERS: 2 MAKEFLAGS: -j2 + CHECK_SIDE_EFFECT_FREE: yes cat_logs_snippet: &CAT_LOGS always: @@ -62,7 +63,7 @@ task: - env: { STATICPRECOMPUTATION: no} - env: {BUILD: distcheck, WITH_VALGRIND: no, CTIMETEST: no, BENCH: no} - env: {CPPFLAGS: -DDETERMINISTIC} - - env: {CFLAGS: -O0, CTIMETEST: no} + - env: {CFLAGS: -O0, CTIMETEST: no, CHECK_SIDE_EFFECT_FREE: no} - env: CFLAGS: "-fsanitize=undefined -fno-omit-frame-pointer" LDFLAGS: "-fsanitize=undefined -fno-omit-frame-pointer" @@ -73,6 +74,7 @@ task: EXPERIMENTAL: yes SCHNORRSIG: yes CTIMETEST: no + CHECK_SIDE_EFFECT_FREE: auto - env: { ECMULTGENPRECISION: 2 } - env: { ECMULTGENPRECISION: 8 } - env: diff --git a/build-aux/m4/bitcoin_secp.m4 b/build-aux/m4/bitcoin_secp.m4 index e57888ca18..94c946e7bf 100644 --- a/build-aux/m4/bitcoin_secp.m4 +++ b/build-aux/m4/bitcoin_secp.m4 @@ -9,6 +9,37 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ AC_MSG_RESULT([$has_64bit_asm]) ]) +AC_DEFUN([SECP_CHECK_SIDE_EFFECT_FREE],[ +AC_MSG_CHECKING(whether compiler detects side effect free statements) +AC_LINK_IFELSE([AC_LANG_SOURCE([[ + #include + static int my_memcmp(const void *s1, const void *s2, size_t n) { + const unsigned char *p1 = s1, *p2 = s2; + size_t i; + for (i = 0; i < n; i++) { + int diff = p1[i] - p2[i]; + if (diff != 0) { + return diff; + } + } + return 0; + } + + static int side_effect_free(int val, const int* ptr) { + static const unsigned char foo[32] = {1,2,3}; + if (!my_memcmp(ptr, foo, val)) return 0; + return (((val + 13) * ptr[0] ^ 7) * ptr[ptr[1]] / 11) * (int)ptr == 0; + } + + extern int should_not_survive; + int main(int argc, char** argv) { + (void)(should_not_survive || side_effect_free(argc, (const int*)argv)); + return 0; + } + ]])],[has_check_side_effect_free=yes],[has_check_side_effect_free=no]) +AC_MSG_RESULT([$has_check_side_effect_free]) +]) + dnl AC_DEFUN([SECP_OPENSSL_CHECK],[ has_libcrypto=no diff --git a/ci/cirrus.sh b/ci/cirrus.sh index f26ca98d1d..b241af3f5b 100755 --- a/ci/cirrus.sh +++ b/ci/cirrus.sh @@ -19,6 +19,7 @@ valgrind --version || true --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \ --enable-module-schnorrsig="$SCHNORRSIG" \ --with-valgrind="$WITH_VALGRIND" \ + --enable-side-effect-free-check="$CHECK_SIDE_EFFECT_FREE" \ --host="$HOST" $EXTRAFLAGS # We have set "-j" in MAKEFLAGS. diff --git a/configure.ac b/configure.ac index e84005edf4..a48032e100 100644 --- a/configure.ac +++ b/configure.ac @@ -155,6 +155,11 @@ AC_ARG_ENABLE(external_default_callbacks, [use_external_default_callbacks=$enableval], [use_external_default_callbacks=no]) +AC_ARG_ENABLE(side_effect_free_check, + AS_HELP_STRING([--enable-side-effect-free-check],[enable checking assertions are side-effect free (yes/no/auto) [default=no]]), + [use_side_effect_free_check=$enableval], + [use_side_effect_free_check=no]) + # Test-only override of the (autodetected by the C code) "widemul" setting. # Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default). AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto]) @@ -210,6 +215,18 @@ else CFLAGS="-O2 $CFLAGS" fi +set_check_side_effect_free=no +if test x"$use_side_effect_free_check" != x"no"; then + SECP_CHECK_SIDE_EFFECT_FREE + if test x"$has_check_side_effect_free" = x"yes"; then + set_check_side_effect_free=yes + else + if test x"$use_side_effect_free_check" != x"auto"; then + AC_MSG_ERROR([Side effect free checking requested but not compatible with compiler]) + fi + fi +fi + if test x"$req_asm" = x"auto"; then SECP_64BIT_ASM_CHECK if test x"$has_64bit_asm" = x"yes"; then @@ -258,6 +275,16 @@ if test x"$use_external_asm" = x"yes"; then AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) fi +case $set_check_side_effect_free in +yes) + AC_DEFINE(CHECK_SIDE_EFFECT_FREE, 1, [Define this symbol to enable checks for side-effect free statements]) + ;; +no) + ;; +*) + AC_MSG_ERROR([invalid selection for check_side_effect_free]) + ;; +esac # Select wide multiplication implementation case $set_widemul in diff --git a/src/field_10x26_impl.h b/src/field_10x26_impl.h index c2802514dc..a75b4586c6 100644 --- a/src/field_10x26_impl.h +++ b/src/field_10x26_impl.h @@ -275,10 +275,9 @@ SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) { SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) { const uint32_t *t = a->n; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); -#endif + /* No secp256k1_fe_verify is needed here, because if all limbs are 0, + * the representation is always valid. This also allows using this function + * inside VERIFY_CHECK conditions itself without side effects. */ return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0; } @@ -455,11 +454,7 @@ void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a); #else -#ifdef VERIFY #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) -#else -#define VERIFY_BITS(x, n) do { } while(0) -#endif SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { uint64_t c, d; @@ -1135,9 +1130,7 @@ static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, } static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) { -#ifdef VERIFY - VERIFY_CHECK(a->normalized); -#endif + VERIFY_CHECK_ONLY(a->normalized); r->n[0] = a->n[0] | a->n[1] << 26; r->n[1] = a->n[1] >> 6 | a->n[2] << 20; r->n[2] = a->n[2] >> 12 | a->n[3] << 14; @@ -1206,9 +1199,7 @@ static void secp256k1_fe_to_signed30(secp256k1_modinv32_signed30 *r, const secp2 const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4], a5 = a->n[5], a6 = a->n[6], a7 = a->n[7], a8 = a->n[8], a9 = a->n[9]; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); -#endif + VERIFY_CHECK_ONLY(a->normalized); r->v[0] = (a0 | a1 << 26) & M30; r->v[1] = (a1 >> 4 | a2 << 22) & M30; diff --git a/src/field_5x52_impl.h b/src/field_5x52_impl.h index b73cfea206..167f5497d2 100644 --- a/src/field_5x52_impl.h +++ b/src/field_5x52_impl.h @@ -238,10 +238,9 @@ SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) { SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) { const uint64_t *t = a->n; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); -#endif + /* No secp256k1_fe_verify is needed here, because if all limbs are 0, + * the representation is always valid. This also allows using this function + * inside VERIFY_CHECK conditions itself without side effects. */ return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0; } @@ -478,9 +477,7 @@ static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, } static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) { -#ifdef VERIFY - VERIFY_CHECK(a->normalized); -#endif + VERIFY_CHECK_ONLY(a->normalized); r->n[0] = a->n[0] | a->n[1] << 52; r->n[1] = a->n[1] >> 12 | a->n[2] << 40; r->n[2] = a->n[2] >> 24 | a->n[3] << 28; @@ -529,9 +526,7 @@ static void secp256k1_fe_to_signed62(secp256k1_modinv64_signed62 *r, const secp2 const uint64_t M62 = UINT64_MAX >> 2; const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4]; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); -#endif + VERIFY_CHECK_ONLY(a->normalized); r->v[0] = (a0 | a1 << 52) & M62; r->v[1] = (a1 >> 10 | a2 << 42) & M62; @@ -555,9 +550,7 @@ static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *x) { secp256k1_modinv64(&s, &secp256k1_const_modinfo_fe); secp256k1_fe_from_signed62(r, &s); -#ifdef VERIFY VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == secp256k1_fe_normalizes_to_zero(&tmp)); -#endif } static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x) { @@ -570,9 +563,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x) { secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_fe); secp256k1_fe_from_signed62(r, &s); -#ifdef VERIFY VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == secp256k1_fe_normalizes_to_zero(&tmp)); -#endif } #endif /* SECP256K1_FIELD_REPR_IMPL_H */ diff --git a/src/field_5x52_int128_impl.h b/src/field_5x52_int128_impl.h index 314002ee39..15a1cf0d9f 100644 --- a/src/field_5x52_int128_impl.h +++ b/src/field_5x52_int128_impl.h @@ -9,11 +9,7 @@ #include -#ifdef VERIFY #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) -#else -#define VERIFY_BITS(x, n) do { } while(0) -#endif SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { uint128_t c, d; diff --git a/src/modinv32_impl.h b/src/modinv32_impl.h index aa7988c4bb..3ef44fe136 100644 --- a/src/modinv32_impl.h +++ b/src/modinv32_impl.h @@ -73,8 +73,8 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3 VERIFY_CHECK(r->v[i] >= -M30); VERIFY_CHECK(r->v[i] <= M30); } - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. @@ -144,7 +144,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3 r->v[7] = r7; r->v[8] = r8; -#ifdef VERIFY VERIFY_CHECK(r0 >> 30 == 0); VERIFY_CHECK(r1 >> 30 == 0); VERIFY_CHECK(r2 >> 30 == 0); @@ -154,9 +153,8 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3 VERIFY_CHECK(r6 >> 30 == 0); VERIFY_CHECK(r7 >> 30 == 0); VERIFY_CHECK(r8 >> 30 == 0); - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ } /* Data type for transition matrices (see section 3 of explanation). @@ -330,16 +328,14 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp int32_t di, ei, md, me, sd, se; int64_t cd, ce; int i; -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK((labs(u) + labs(v)) >= 0); /* |u|+|v| doesn't overflow */ VERIFY_CHECK((labs(q) + labs(r)) >= 0); /* |q|+|r| doesn't overflow */ VERIFY_CHECK((labs(u) + labs(v)) <= M30 + 1); /* |u|+|v| <= 2^30 */ VERIFY_CHECK((labs(q) + labs(r)) <= M30 + 1); /* |q|+|r| <= 2^30 */ -#endif /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d->v[8] >> 31; se = e->v[8] >> 31; @@ -374,12 +370,10 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp /* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */ d->v[8] = (int32_t)cd; e->v[8] = (int32_t)ce; -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ } /* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. @@ -469,35 +463,29 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m /* Update d,e using that transition matrix. */ secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ secp256k1_modinv32_update_fg_30(&f, &g, &t); -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* At this point sufficient iterations have been performed that g must have reached 0 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || - secp256k1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || - (secp256k1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - (secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || - secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || + secp256k1_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (secp256k1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || + secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv32_normalize_30(&d, f.v[8], modinfo); @@ -526,12 +514,10 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256 /* Update d,e using that transition matrix. */ secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t); /* If the bottom limb of g is 0, there is a chance g=0. */ if (g.v[0] == 0) { @@ -556,28 +542,24 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256 g.v[len - 2] |= (uint32_t)gn << 30; --len; } -#ifdef VERIFY - VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK_ONLY(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || - secp256k1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || - (secp256k1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && - (secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || - secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || + secp256k1_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (secp256k1_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || + secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv32_normalize_30(&d, f.v[len - 1], modinfo); diff --git a/src/modinv64_impl.h b/src/modinv64_impl.h index 78505fa183..a784520f71 100644 --- a/src/modinv64_impl.h +++ b/src/modinv64_impl.h @@ -78,8 +78,8 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6 VERIFY_CHECK(r->v[i] >= -M62); VERIFY_CHECK(r->v[i] <= M62); } - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. @@ -125,15 +125,13 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6 r->v[3] = r3; r->v[4] = r4; -#ifdef VERIFY VERIFY_CHECK(r0 >> 62 == 0); VERIFY_CHECK(r1 >> 62 == 0); VERIFY_CHECK(r2 >> 62 == 0); VERIFY_CHECK(r3 >> 62 == 0); VERIFY_CHECK(r4 >> 62 == 0); - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ } /* Data type for transition matrices (see section 3 of explanation). @@ -304,16 +302,14 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int64_t md, me, sd, se; int128_t cd, ce; -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ - VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */ - VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */ - VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */ - VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK_ONLY((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */ + VERIFY_CHECK_ONLY((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */ + VERIFY_CHECK_ONLY((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */ + VERIFY_CHECK_ONLY((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */ /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d4 >> 63; se = e4 >> 63; @@ -368,12 +364,11 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ d->v[4] = (int64_t)cd; e->v[4] = (int64_t)ce; -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ -#endif + + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ } /* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps. @@ -471,35 +466,29 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m /* Update d,e using that transition matrix. */ secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ secp256k1_modinv64_update_fg_62(&f, &g, &t); -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* At this point sufficient iterations have been performed that g must have reached 0 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || - secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || - (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - (secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || - secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || + secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || + secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo); @@ -528,12 +517,10 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256 /* Update d,e using that transition matrix. */ secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t); /* If the bottom limb of g is zero, there is a chance that g=0. */ if (g.v[0] == 0) { @@ -558,28 +545,24 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256 g.v[len - 2] |= (uint64_t)gn << 62; --len; } -#ifdef VERIFY - VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + VERIFY_CHECK_ONLY(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ } /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY /* g == 0 */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || - secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || - (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && - (secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || - secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); -#endif + VERIFY_CHECK_ONLY(secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || + secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || + secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo); diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index a1def26fca..b3e0d7c911 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -110,10 +110,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; -#ifdef VERIFY VERIFY_CHECK((t >> 64) == 0); VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { @@ -808,18 +806,14 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_ r->d[2] = a2 >> 4 | a3 << 58; r->d[3] = a3 >> 6 | a4 << 56; -#ifdef VERIFY VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif } static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) { const uint64_t M62 = UINT64_MAX >> 2; const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3]; -#ifdef VERIFY VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0); -#endif r->v[0] = a0 & M62; r->v[1] = (a0 >> 62 | a1 << 2) & M62; @@ -842,9 +836,7 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed62(r, &s); -#ifdef VERIFY - VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); -#endif + VERIFY_CHECK_ONLY(secp256k1_scalar_is_zero(r) == zero_in); } static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { @@ -856,9 +848,7 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed62(r, &s); -#ifdef VERIFY - VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); -#endif + VERIFY_CHECK_ONLY(secp256k1_scalar_is_zero(r) == zero_in); } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 62c7ae7156..7ce3b18309 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -158,10 +158,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); r->d[7] = t & 0xFFFFFFFFULL; -#ifdef VERIFY VERIFY_CHECK((t >> 32) == 0); VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { @@ -670,9 +668,7 @@ static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_ r->d[6] = a6 >> 12 | a7 << 18; r->d[7] = a7 >> 14 | a8 << 16; -#ifdef VERIFY VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif } static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_scalar *a) { @@ -680,9 +676,7 @@ static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const s const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3], a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7]; -#ifdef VERIFY VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0); -#endif r->v[0] = a0 & M30; r->v[1] = (a0 >> 30 | a1 << 2) & M30; @@ -709,9 +703,7 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed30(r, &s); -#ifdef VERIFY - VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); -#endif + VERIFY_CHECK_ONLY(secp256k1_scalar_is_zero(r) == zero_in); } static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { @@ -723,9 +715,7 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed30(r, &s); -#ifdef VERIFY - VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); -#endif + VERIFY_CHECK_ONLY(secp256k1_scalar_is_zero(r) == zero_in); } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { diff --git a/src/scalar_low_impl.h b/src/scalar_low_impl.h index 7176f0b2ca..8558e5bfb4 100644 --- a/src/scalar_low_impl.h +++ b/src/scalar_low_impl.h @@ -39,12 +39,10 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { if (flag && bit < 32) *r += ((uint32_t)1 << bit); -#ifdef VERIFY VERIFY_CHECK(bit < 32); /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */ VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER); VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { diff --git a/src/util.h b/src/util.h index f78846836c..f381bb963a 100644 --- a/src/util.h +++ b/src/util.h @@ -57,16 +57,38 @@ static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * } while(0) #endif -/* Like assert(), but when VERIFY is defined, and side-effect safe. */ +#ifdef CHECK_SIDE_EFFECT_FREE +/* When our compiler is capable to optimizing away references to variables + * that control execution of a side-effect free statement, use this to + * check that VERIFY_CHECK conditions are safe. + * + * Credit to: https://stackoverflow.com/a/35294344 */ +extern int secp256k1_not_supposed_to_survive; +#define ASSERT_NO_SIDE_EFFECT(cond) do { (void)(secp256k1_not_supposed_to_survive || (cond)); } while(0) +#else +/* If we can't use that, just run it in an unexecuted branch, to make sure it is still + * syntactically valid. */ +#define ASSERT_NO_SIDE_EFFECT(cond) do { if (0) { (void)(cond); } } while(0) +#endif + +/* VERIFY_CHECK is like assert(), but gated by -DVERIFY, and verified for + * side effect-freeness under -DCHECK_SIDE_EFFECT_FREE. + * + * VERIFY_CHECK_ONLY is similar, but less safe: it has no effect outside + * of -DVERIFY, and thus can be used with expensive conditions, or even + * conditions that wouldn't compile outside -DVERIFY). */ #if defined(COVERAGE) -#define VERIFY_CHECK(check) -#define VERIFY_SETUP(stmt) +# define VERIFY_CHECK(check) +# define VERIFY_CHECK_ONLY(check) +# define VERIFY_SETUP(stmt) #elif defined(VERIFY) -#define VERIFY_CHECK CHECK -#define VERIFY_SETUP(stmt) do { stmt; } while(0) +# define VERIFY_CHECK(cond) do { ASSERT_NO_SIDE_EFFECT(cond); CHECK(cond); } while (0) +# define VERIFY_CHECK_ONLY CHECK +# define VERIFY_SETUP(stmt) do { stmt; } while(0) #else -#define VERIFY_CHECK(cond) do { (void)(cond); } while(0) -#define VERIFY_SETUP(stmt) +# define VERIFY_CHECK(cond) do { ASSERT_NO_SIDE_EFFECT(cond); } while (0) +# define VERIFY_CHECK_ONLY(cond) +# define VERIFY_SETUP(stmt) #endif /* Define `VG_UNDEF` and `VG_CHECK` when VALGRIND is defined */