diff --git a/src/field_10x26_impl.h b/src/field_10x26_impl.h index 651500ee8e..9841e2ba4b 100644 --- a/src/field_10x26_impl.h +++ b/src/field_10x26_impl.h @@ -1164,4 +1164,513 @@ static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const se #endif } +static void secp256k1_fe_normalize_30(int32_t *r, int32_t cond_negate) { + /* P == 2^256 - 2^32 - C30 */ + const int32_t C30 = 0x3D1L; + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + int32_t r0 = r[0], r1 = r[1], r2 = r[2], r3 = r[3], r4 = r[4], + r5 = r[5], r6 = r[6], r7 = r[7], r8 = r[8]; + int32_t c, cond_add; + + cond_add = r8 >> 31; + + c = r0 - (C30 & cond_add); + r0 = c & M30; c >>= 30; + c += r1 - (4 & cond_add);; + r1 = c & M30; c >>= 30; + c += r2; + r2 = c & M30; c >>= 30; + c += r3; + r3 = c & M30; c >>= 30; + c += r4; + r4 = c & M30; c >>= 30; + c += r5; + r5 = c & M30; c >>= 30; + c += r6; + r6 = c & M30; c >>= 30; + c += r7; + r7 = c & M30; c >>= 30; + c += r8 + (65536 & cond_add); + r8 = c; + + cond_add = (c >> 31) ^ cond_negate; + + c = (r0 ^ cond_negate) - cond_negate - (C30 & cond_add); + r[0] = c & M30; c >>= 30; + c += (r1 ^ cond_negate) - cond_negate - (4 & cond_add); + r[1] = c & M30; c >>= 30; + c += (r2 ^ cond_negate) - cond_negate; + r[2] = c & M30; c >>= 30; + c += (r3 ^ cond_negate) - cond_negate; + r[3] = c & M30; c >>= 30; + c += (r4 ^ cond_negate) - cond_negate; + r[4] = c & M30; c >>= 30; + c += (r5 ^ cond_negate) - cond_negate; + r[5] = c & M30; c >>= 30; + c += (r6 ^ cond_negate) - cond_negate; + r[6] = c & M30; c >>= 30; + c += (r7 ^ cond_negate) - cond_negate; + r[7] = c & M30; c >>= 30; + c += (r8 ^ cond_negate) - cond_negate + (65536 & cond_add); + r[8] = c; + + VERIFY_CHECK(c >> 16 == 0); +} + +static void secp256k1_fe_decode_30(secp256k1_fe *r, const int32_t *a) { + + const uint32_t M26 = UINT32_MAX >> 6; + const uint32_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4], + a5 = a[5], a6 = a[6], a7 = a[7], a8 = a[8]; + + VERIFY_CHECK(a0 >> 30 == 0); + VERIFY_CHECK(a1 >> 30 == 0); + VERIFY_CHECK(a2 >> 30 == 0); + VERIFY_CHECK(a3 >> 30 == 0); + VERIFY_CHECK(a4 >> 30 == 0); + VERIFY_CHECK(a5 >> 30 == 0); + VERIFY_CHECK(a6 >> 30 == 0); + VERIFY_CHECK(a7 >> 30 == 0); + VERIFY_CHECK(a8 >> 16 == 0); + + r->n[0] = a0 & M26; + r->n[1] = (a0 >> 26 | a1 << 4) & M26; + r->n[2] = (a1 >> 22 | a2 << 8) & M26; + r->n[3] = (a2 >> 18 | a3 << 12) & M26; + r->n[4] = (a3 >> 14 | a4 << 16) & M26; + r->n[5] = (a4 >> 10 | a5 << 20) & M26; + r->n[6] = (a5 >> 6 | a6 << 24) & M26; + r->n[7] = (a6 >> 2 ) & M26; + r->n[8] = (a6 >> 28 | a7 << 2) & M26; + r->n[9] = (a7 >> 24 | a8 << 6); + +#ifdef VERIFY + r->magnitude = 1; + r->normalized = 1; + secp256k1_fe_verify(r); +#endif +} + +static void secp256k1_fe_encode_30(int32_t *r, const secp256k1_fe *a) { + + const uint32_t M30 = UINT32_MAX >> 2; + const uint32_t *n = &a->n[0]; + const uint64_t a0 = n[0], a1 = n[1], a2 = n[2], a3 = n[3], a4 = n[4], + a5 = n[5], a6 = n[6], a7 = n[7], a8 = n[8], a9 = n[9]; + +#ifdef VERIFY + VERIFY_CHECK(a->normalized); +#endif + + r[0] = (a0 | a1 << 26) & M30; + r[1] = (a1 >> 4 | a2 << 22) & M30; + r[2] = (a2 >> 8 | a3 << 18) & M30; + r[3] = (a3 >> 12 | a4 << 14) & M30; + r[4] = (a4 >> 16 | a5 << 10) & M30; + r[5] = (a5 >> 20 | a6 << 6) & M30; + r[6] = (a6 >> 24 | a7 << 2 + | a8 << 28) & M30; + r[7] = (a8 >> 2 | a9 << 24) & M30; + r[8] = a9 >> 6; +} + +static uint32_t secp256k1_fe_divsteps_30(uint32_t eta, uint32_t f0, uint32_t g0, int32_t *t) { + + uint32_t u = 1, v = 0, q = 0, r = 1; + uint32_t c1, c2, f = f0, g = g0, x, y, z; + int i; + + for (i = 0; i < 30; ++i) { + + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << i); + VERIFY_CHECK((q * f0 + r * g0) == g << i); + + c1 = (int32_t)eta >> 31; + c2 = -(g & 1); + + x = (f ^ c1) - c1; + y = (u ^ c1) - c1; + z = (v ^ c1) - c1; + + g += x & c2; + q += y & c2; + r += z & c2; + + c1 &= c2; + eta = (eta ^ c1) - (c1 + 1); + + f += g & c1; + u += q & c1; + v += r & c1; + + g >>= 1; + u <<= 1; + v <<= 1; + } + + t[0] = (int32_t)u; + t[1] = (int32_t)v; + t[2] = (int32_t)q; + t[3] = (int32_t)r; + + return eta; +} + +static uint32_t secp256k1_fe_divsteps_30_var(uint32_t eta, uint32_t f0, uint32_t g0, int32_t *t) { + +#if 1 + static const uint8_t debruijn[32] = { + 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, + 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, + 0x1E, 0x11, 0x08, 0x0E, 0x1D, 0x0D, 0x1C, 0x1B }; +#endif + + static const uint8_t inv256[128] = { + 0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59, + 0xD7, 0xED, 0xCB, 0x21, 0x1F, 0x75, 0x53, 0x69, 0xE7, 0x7D, 0x5B, 0x31, + 0x2F, 0x05, 0xE3, 0x79, 0xF7, 0x0D, 0xEB, 0x41, 0x3F, 0x95, 0x73, 0x89, + 0x07, 0x9D, 0x7B, 0x51, 0x4F, 0x25, 0x03, 0x99, 0x17, 0x2D, 0x0B, 0x61, + 0x5F, 0xB5, 0x93, 0xA9, 0x27, 0xBD, 0x9B, 0x71, 0x6F, 0x45, 0x23, 0xB9, + 0x37, 0x4D, 0x2B, 0x81, 0x7F, 0xD5, 0xB3, 0xC9, 0x47, 0xDD, 0xBB, 0x91, + 0x8F, 0x65, 0x43, 0xD9, 0x57, 0x6D, 0x4B, 0xA1, 0x9F, 0xF5, 0xD3, 0xE9, + 0x67, 0xFD, 0xDB, 0xB1, 0xAF, 0x85, 0x63, 0xF9, 0x77, 0x8D, 0x6B, 0xC1, + 0xBF, 0x15, 0xF3, 0x09, 0x87, 0x1D, 0xFB, 0xD1, 0xCF, 0xA5, 0x83, 0x19, + 0x97, 0xAD, 0x8B, 0xE1, 0xDF, 0x35, 0x13, 0x29, 0xA7, 0x3D, 0x1B, 0xF1, + 0xEF, 0xC5, 0xA3, 0x39, 0xB7, 0xCD, 0xAB, 0x01 + }; + + uint32_t u = 1, v = 0, q = 0, r = 1; + uint32_t f = f0, g = g0, m, w, x, y, z; + int i = 30, limit, zeros; + + for (;;) { + + /* Use a sentinel bit to count zeros only up to i. */ + x = g | (UINT32_MAX << i); + +#if 0 + zeros = __builtin_ctzl(x); +#else + zeros = debruijn[((x & -x) * 0x04D7651F) >> 27]; +#endif + + g >>= zeros; + u <<= zeros; + v <<= zeros; + eta -= zeros; + i -= zeros; + + if (i <= 0) { + break; + } + + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((g & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << (30 - i)); + VERIFY_CHECK((q * f0 + r * g0) == g << (30 - i)); + + if ((int32_t)eta < 0) { + eta = -eta; + x = f; f = g; g = -x; + y = u; u = q; q = -y; + z = v; v = r; r = -z; + } + + /* Handle up to 8 divsteps at once, subject to eta and i. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + m = (UINT32_MAX >> (32 - limit)) & 255U; + + w = (g * inv256[(f >> 1) & 127]) & m; + + g += f * w; + q += u * w; + r += v * w; + + VERIFY_CHECK((g & m) == 0); + } + + t[0] = (int32_t)u; + t[1] = (int32_t)v; + t[2] = (int32_t)q; + t[3] = (int32_t)r; + + return eta; +} + +static void secp256k1_fe_update_de_30(int32_t *d, int32_t *e, const int32_t *t) { + + /* P == 2^256 - 2^32 - C30 */ + const int32_t C30 = 0x3D1L; + /* I30 == P^-1 mod 2^30 */ + const int32_t I30 = 0x2DDACACFL; + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t[0], v = t[1], q = t[2], r = t[3]; + int32_t di, ei, md, me, sd, se; + int64_t cd, ce; + int i; + + /* + * On input, d/e must be in the range (-2.P, P). For initially negative d (resp. e), we add + * u and/or v (resp. q and/or r) multiples of the modulus to the corresponding output (prior + * to division by 2^30). This has the same effect as if we added the modulus to the input(s). + */ + + sd = d[8] >> 31; + se = e[8] >> 31; + + md = (u & sd) + (v & se); + me = (q & sd) + (r & se); + + di = d[0]; + ei = e[0]; + + cd = (int64_t)u * di + (int64_t)v * ei; + ce = (int64_t)q * di + (int64_t)r * ei; + + /* + * Subtract from md/me an extra term in the range [0, 2^30) such that the low 30 bits of each + * sum of products will be 0. This allows clean division by 2^30. On output, d/e are thus in + * the range (-2.P, P), consistent with the input constraint. + */ + + md -= (I30 * (int32_t)cd + md) & M30; + me -= (I30 * (int32_t)ce + me) & M30; + + cd -= (int64_t)C30 * md; + ce -= (int64_t)C30 * me; + + VERIFY_CHECK(((int32_t)cd & M30) == 0); cd >>= 30; + VERIFY_CHECK(((int32_t)ce & M30) == 0); ce >>= 30; + + cd -= (int64_t)4 * md; + ce -= (int64_t)4 * me; + + for (i = 1; i < 8; ++i) { + + di = d[i]; + ei = e[i]; + + cd += (int64_t)u * di + (int64_t)v * ei; + ce += (int64_t)q * di + (int64_t)r * ei; + + d[i - 1] = (int32_t)cd & M30; cd >>= 30; + e[i - 1] = (int32_t)ce & M30; ce >>= 30; + } + + { + di = d[8]; + ei = e[8]; + + cd += (int64_t)u * di + (int64_t)v * ei; + ce += (int64_t)q * di + (int64_t)r * ei; + + cd += (int64_t)65536 * md; + ce += (int64_t)65536 * me; + + d[7] = (int32_t)cd & M30; cd >>= 30; + e[7] = (int32_t)ce & M30; ce >>= 30; + } + + d[8] = (int32_t)cd; + e[8] = (int32_t)ce; +} + +static void secp256k1_fe_update_fg_30(int32_t *f, int32_t *g, const int32_t *t) { + + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t[0], v = t[1], q = t[2], r = t[3]; + int32_t fi, gi; + int64_t cf, cg; + int i; + + fi = f[0]; + gi = g[0]; + + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + + VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; + VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; + + for (i = 1; i < 9; ++i) { + + fi = f[i]; + gi = g[i]; + + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + + f[i - 1] = (int32_t)cf & M30; cf >>= 30; + g[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + + f[8] = (int32_t)cf; + g[8] = (int32_t)cg; +} + +static void secp256k1_fe_update_fg_30_var(int len, int32_t *f, int32_t *g, const int32_t *t) { + + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t[0], v = t[1], q = t[2], r = t[3]; + int32_t fi, gi; + int64_t cf, cg; + int i; + + VERIFY_CHECK(len > 0); + + fi = f[0]; + gi = g[0]; + + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + + VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; + VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; + + for (i = 1; i < len; ++i) { + + fi = f[i]; + gi = g[i]; + + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + + f[i - 1] = (int32_t)cf & M30; cf >>= 30; + g[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + + f[len - 1] = (int32_t)cf; + g[len - 1] = (int32_t)cg; +} + +static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { + + /* Modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. */ + + int32_t t[4]; + int32_t d[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + int32_t e[9] = { 1, 0, 0, 0, 0, 0, 0, 0, 0 }; + int32_t f[9] = { 0x3FFFFC2F, 0x3FFFFFFB, 0x3FFFFFFF, 0x3FFFFFFF, 0x3FFFFFFF, + 0x3FFFFFFF, 0x3FFFFFFF, 0x3FFFFFFF, 0xFFFF }; + int32_t g[9]; + secp256k1_fe b0; + int i; + uint32_t eta; +#ifdef VERIFY + int zero_in; +#endif + + b0 = *a; + secp256k1_fe_normalize(&b0); + secp256k1_fe_encode_30(g, &b0); + +#ifdef VERIFY + zero_in = secp256k1_fe_is_zero(&b0); +#endif + + /* The paper uses 'delta'; eta == -delta (a performance tweak). + * + * If the maximum bitlength of g is known to be less than 256, then eta can be set + * initially to -(1 + (256 - maxlen(g))), and only (741 - (256 - maxlen(g))) total + * divsteps are needed. */ + eta = -(uint32_t)1; + + for (i = 0; i < 25; ++i) { + eta = secp256k1_fe_divsteps_30(eta, f[0], g[0], t); + secp256k1_fe_update_de_30(d, e, t); + secp256k1_fe_update_fg_30(f, g, t); + } + + /* At this point sufficient iterations have been performed that g must have reached 0 + * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g + * values i.e. +/- 1, and d now contains +/- the modular inverse. */ + + VERIFY_CHECK((g[0] | g[1] | g[2] | g[3] | g[4] | g[5] | g[6] | g[7] | g[8]) == 0); + + secp256k1_fe_normalize_30(d, f[8] >> 31); + secp256k1_fe_decode_30(r, d); + +#ifdef VERIFY + VERIFY_CHECK(!secp256k1_fe_is_zero(r) == !zero_in); +#endif +} + +static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { + + /* Modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. */ + + int32_t t[4]; + int32_t d[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + int32_t e[9] = { 1, 0, 0, 0, 0, 0, 0, 0, 0 }; + int32_t f[9] = { 0x3FFFFC2F, 0x3FFFFFFB, 0x3FFFFFFF, 0x3FFFFFFF, 0x3FFFFFFF, + 0x3FFFFFFF, 0x3FFFFFFF, 0x3FFFFFFF, 0xFFFF }; + int32_t g[9]; + secp256k1_fe b; + int i, j, len = 9; + uint32_t eta; + int32_t cond, fn, gn; +#ifdef VERIFY + int zero_in; +#endif + + b = *a; + secp256k1_fe_normalize(&b); + secp256k1_fe_encode_30(g, &b); + +#ifdef VERIFY + zero_in = secp256k1_fe_is_zero(&b); +#endif + + /* The paper uses 'delta'; eta == -delta (a performance tweak). + * + * If g has leading zeros (w.r.t 256 bits), then eta can be set initially to + * -(1 + clz(g)), and the worst-case divstep count would be only (741 - clz(g)). */ + eta = -(uint32_t)1; + + for (i = 0; i < 25; ++i) { + + eta = secp256k1_fe_divsteps_30_var(eta, f[0], g[0], t); + secp256k1_fe_update_de_30(d, e, t); + secp256k1_fe_update_fg_30_var(len, f, g, t); + + if (g[0] == 0) { + cond = 0; + for (j = 1; j < len; ++j) { + cond |= g[j]; + } + if (cond == 0) { + break; + } + } + + fn = f[len - 1]; + gn = g[len - 1]; + + cond = ((int32_t)len - 2) >> 31; + cond |= fn ^ (fn >> 31); + cond |= gn ^ (gn >> 31); + + if (cond == 0) { + f[len - 2] |= (uint32_t)fn << 30; + g[len - 2] |= (uint32_t)gn << 30; + --len; + } + } + + VERIFY_CHECK(i < 25); + + /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of + * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ + + secp256k1_fe_normalize_30(d, f[len - 1] >> 31); + secp256k1_fe_decode_30(r, d); + +#ifdef VERIFY + VERIFY_CHECK(!secp256k1_fe_is_zero(r) == !zero_in); +#endif +} + #endif /* SECP256K1_FIELD_REPR_IMPL_H */ diff --git a/src/field_5x52_impl.h b/src/field_5x52_impl.h index 71a38f915b..1af2c15fce 100644 --- a/src/field_5x52_impl.h +++ b/src/field_5x52_impl.h @@ -498,4 +498,480 @@ static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const se #endif } +static void secp256k1_fe_normalize_62(int64_t *r, int64_t cond_negate) { + /* P == 2^256 - C62 */ + const int64_t C62 = 0x1000003D1LL; + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + int64_t r0 = r[0], r1 = r[1], r2 = r[2], r3 = r[3], r4 = r[4]; + int64_t c, cond_add; + + cond_add = r4 >> 63; + + c = r0 - (C62 & cond_add); + r0 = c & M62; c >>= 62; + c += r1; + r1 = c & M62; c >>= 62; + c += r2; + r2 = c & M62; c >>= 62; + c += r3; + r3 = c & M62; c >>= 62; + c += r4 + (256 & cond_add); + r4 = c; + + cond_add = (c >> 63) ^ cond_negate; + + c = (r0 ^ cond_negate) - cond_negate - (C62 & cond_add); + r[0] = c & M62; c >>= 62; + c += (r1 ^ cond_negate) - cond_negate; + r[1] = c & M62; c >>= 62; + c += (r2 ^ cond_negate) - cond_negate; + r[2] = c & M62; c >>= 62; + c += (r3 ^ cond_negate) - cond_negate; + r[3] = c & M62; c >>= 62; + c += (r4 ^ cond_negate) - cond_negate + (256 & cond_add); + r[4] = c; + + VERIFY_CHECK(c >> 8 == 0); +} + +static void secp256k1_fe_decode_62(secp256k1_fe *r, const int64_t *a) { + + const uint64_t M52 = UINT64_MAX >> 12; + const uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; + + VERIFY_CHECK(a0 >> 62 == 0); + VERIFY_CHECK(a1 >> 62 == 0); + VERIFY_CHECK(a2 >> 62 == 0); + VERIFY_CHECK(a3 >> 62 == 0); + VERIFY_CHECK(a4 >> 8 == 0); + + r->n[0] = a0 & M52; + r->n[1] = (a0 >> 52 | a1 << 10) & M52; + r->n[2] = (a1 >> 42 | a2 << 20) & M52; + r->n[3] = (a2 >> 32 | a3 << 30) & M52; + r->n[4] = (a3 >> 22 | a4 << 40); + +#ifdef VERIFY + r->magnitude = 1; + r->normalized = 1; + secp256k1_fe_verify(r); +#endif +} + +static void secp256k1_fe_encode_62(int64_t *r, const secp256k1_fe *a) { + + const uint64_t M62 = UINT64_MAX >> 2; + const uint64_t *n = &a->n[0]; + const uint64_t a0 = n[0], a1 = n[1], a2 = n[2], a3 = n[3], a4 = n[4]; + +#ifdef VERIFY + VERIFY_CHECK(a->normalized); +#endif + + r[0] = (a0 | a1 << 52) & M62; + r[1] = (a1 >> 10 | a2 << 42) & M62; + r[2] = (a2 >> 20 | a3 << 32) & M62; + r[3] = (a3 >> 30 | a4 << 22) & M62; + r[4] = a4 >> 40; +} + +static uint64_t secp256k1_fe_divsteps_62(uint64_t eta, uint64_t f0, uint64_t g0, int64_t *t) { + + uint64_t u = 1, v = 0, q = 0, r = 1; + uint64_t c1, c2, f = f0, g = g0, x, y, z; + int i; + + for (i = 0; i < 62; ++i) { + + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << i); + VERIFY_CHECK((q * f0 + r * g0) == g << i); + + c1 = (int64_t)eta >> 63; + c2 = -(g & 1); + + x = (f ^ c1) - c1; + y = (u ^ c1) - c1; + z = (v ^ c1) - c1; + + g += x & c2; + q += y & c2; + r += z & c2; + + c1 &= c2; + eta = (eta ^ c1) - (c1 + 1); + + f += g & c1; + u += q & c1; + v += r & c1; + + g >>= 1; + u <<= 1; + v <<= 1; + } + + t[0] = (int64_t)u; + t[1] = (int64_t)v; + t[2] = (int64_t)q; + t[3] = (int64_t)r; + + return eta; +} + +static uint64_t secp256k1_fe_divsteps_62_var(uint64_t eta, uint64_t f0, uint64_t g0, int64_t *t) { + +#if 1 + static const uint8_t debruijn[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; +#endif + + uint64_t u = 1, v = 0, q = 0, r = 1; + uint64_t f = f0, g = g0, m, w, x, y, z; + int i = 62, limit, zeros; + + for (;;) { + + x = g | (UINT64_MAX << i); + + /* Use a sentinel bit to count zeros only up to i. */ +#if 0 + zeros = __builtin_ctzll(x); +#else + zeros = debruijn[((x & -x) * 0x022FDD63CC95386D) >> 58]; +#endif + + g >>= zeros; + u <<= zeros; + v <<= zeros; + eta -= zeros; + i -= zeros; + + if (i <= 0) { + break; + } + + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((g & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i)); + VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i)); + + if ((int64_t)eta < 0) { + eta = -eta; + x = f; f = g; g = -x; + y = u; u = q; q = -y; + z = v; v = r; r = -z; + + /* Handle up to 6 divsteps at once, subject to eta and i. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + m = (UINT64_MAX >> (64 - limit)) & 63U; + + w = (f * g * (f * f - 2)) & m; + } else { + /* Handle up to 4 divsteps at once, subject to eta and i. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + m = (UINT64_MAX >> (64 - limit)) & 15U; + + w = f + (((f + 1) & 4) << 1); + w = (-w * g) & m; + } + + g += f * w; + q += u * w; + r += v * w; + + VERIFY_CHECK((g & m) == 0); + } + + t[0] = (int64_t)u; + t[1] = (int64_t)v; + t[2] = (int64_t)q; + t[3] = (int64_t)r; + + return eta; +} + +static void secp256k1_fe_update_de_62(int64_t *d, int64_t *e, const int64_t *t) { + + /* P == 2^256 - C62 */ + const int64_t C62 = 0x1000003D1LL; + /* I62 == P^-1 mod 2^62 */ + const int64_t I62 = 0x27C7F6E22DDACACFLL; + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t d0 = d[0], d1 = d[1], d2 = d[2], d3 = d[3], d4 = d[4]; + const int64_t e0 = e[0], e1 = e[1], e2 = e[2], e3 = e[3], e4 = e[4]; + const int64_t u = t[0], v = t[1], q = t[2], r = t[3]; + int64_t md, me, sd, se; + int128_t cd, ce; + + /* + * On input, d/e must be in the range (-2.P, P). For initially negative d (resp. e), we add + * u and/or v (resp. q and/or r) multiples of the modulus to the corresponding output (prior + * to division by 2^62). This has the same effect as if we added the modulus to the input(s). + */ + + sd = d4 >> 63; + se = e4 >> 63; + + md = (u & sd) + (v & se); + me = (q & sd) + (r & se); + + cd = (int128_t)u * d0 + (int128_t)v * e0; + ce = (int128_t)q * d0 + (int128_t)r * e0; + + /* + * Subtract from md/me an extra term in the range [0, 2^62) such that the low 62 bits of each + * sum of products will be 0. This allows clean division by 2^62. On output, d/e are thus in + * the range (-2.P, P), consistent with the input constraint. + */ + + md -= (I62 * (int64_t)cd + md) & M62; + me -= (I62 * (int64_t)ce + me) & M62; + + cd -= (int128_t)C62 * md; + ce -= (int128_t)C62 * me; + + VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62; + VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62; + + cd += (int128_t)u * d1 + (int128_t)v * e1; + ce += (int128_t)q * d1 + (int128_t)r * e1; + + d[0] = (int64_t)cd & M62; cd >>= 62; + e[0] = (int64_t)ce & M62; ce >>= 62; + + cd += (int128_t)u * d2 + (int128_t)v * e2; + ce += (int128_t)q * d2 + (int128_t)r * e2; + + d[1] = (int64_t)cd & M62; cd >>= 62; + e[1] = (int64_t)ce & M62; ce >>= 62; + + cd += (int128_t)u * d3 + (int128_t)v * e3; + ce += (int128_t)q * d3 + (int128_t)r * e3; + + d[2] = (int64_t)cd & M62; cd >>= 62; + e[2] = (int64_t)ce & M62; ce >>= 62; + + cd += (int128_t)u * d4 + (int128_t)v * e4; + ce += (int128_t)q * d4 + (int128_t)r * e4; + + cd += (int128_t)256 * md; + ce += (int128_t)256 * me; + + d[3] = (int64_t)cd & M62; cd >>= 62; + e[3] = (int64_t)ce & M62; ce >>= 62; + + d[4] = (int64_t)cd; + e[4] = (int64_t)ce; +} + +static void secp256k1_fe_update_fg_62(int64_t *f, int64_t *g, const int64_t *t) { + + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t f0 = f[0], f1 = f[1], f2 = f[2], f3 = f[3], f4 = f[4]; + const int64_t g0 = g[0], g1 = g[1], g2 = g[2], g3 = g[3], g4 = g[4]; + const int64_t u = t[0], v = t[1], q = t[2], r = t[3]; + int128_t cf, cg; + + cf = (int128_t)u * f0 + (int128_t)v * g0; + cg = (int128_t)q * f0 + (int128_t)r * g0; + + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + + cf += (int128_t)u * f1 + (int128_t)v * g1; + cg += (int128_t)q * f1 + (int128_t)r * g1; + + f[0] = (int64_t)cf & M62; cf >>= 62; + g[0] = (int64_t)cg & M62; cg >>= 62; + + cf += (int128_t)u * f2 + (int128_t)v * g2; + cg += (int128_t)q * f2 + (int128_t)r * g2; + + f[1] = (int64_t)cf & M62; cf >>= 62; + g[1] = (int64_t)cg & M62; cg >>= 62; + + cf += (int128_t)u * f3 + (int128_t)v * g3; + cg += (int128_t)q * f3 + (int128_t)r * g3; + + f[2] = (int64_t)cf & M62; cf >>= 62; + g[2] = (int64_t)cg & M62; cg >>= 62; + + cf += (int128_t)u * f4 + (int128_t)v * g4; + cg += (int128_t)q * f4 + (int128_t)r * g4; + + f[3] = (int64_t)cf & M62; cf >>= 62; + g[3] = (int64_t)cg & M62; cg >>= 62; + + f[4] = (int64_t)cf; + g[4] = (int64_t)cg; +} + +static void secp256k1_fe_update_fg_62_var(int len, int64_t *f, int64_t *g, const int64_t *t) { + + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t u = t[0], v = t[1], q = t[2], r = t[3]; + int64_t fi, gi; + int128_t cf, cg; + int i; + + VERIFY_CHECK(len > 0); + + fi = f[0]; + gi = g[0]; + + cf = (int128_t)u * fi + (int128_t)v * gi; + cg = (int128_t)q * fi + (int128_t)r * gi; + + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + + for (i = 1; i < len; ++i) { + + fi = f[i]; + gi = g[i]; + + cf += (int128_t)u * fi + (int128_t)v * gi; + cg += (int128_t)q * fi + (int128_t)r * gi; + + f[i - 1] = (int64_t)cf & M62; cf >>= 62; + g[i - 1] = (int64_t)cg & M62; cg >>= 62; + } + + f[len - 1] = (int64_t)cf; + g[len - 1] = (int64_t)cg; +} + +static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { + + /* Modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. */ + + int64_t t[4]; + int64_t d[5] = { 0, 0, 0, 0, 0 }; + int64_t e[5] = { 1, 0, 0, 0, 0 }; + int64_t f[5] = { 0x3FFFFFFEFFFFFC2FLL, 0x3FFFFFFFFFFFFFFFLL, 0x3FFFFFFFFFFFFFFFLL, + 0x3FFFFFFFFFFFFFFFLL, 0xFFLL }; + int64_t g[5]; + secp256k1_fe b0; + int i; + uint64_t eta; +#ifdef VERIFY + int zero_in; +#endif + + b0 = *a; + secp256k1_fe_normalize(&b0); + secp256k1_fe_encode_62(g, &b0); + +#ifdef VERIFY + zero_in = secp256k1_fe_is_zero(&b0); +#endif + + /* The paper uses 'delta'; eta == -delta (a performance tweak). + * + * If the maximum bitlength of g is known to be less than 256, then eta can be set + * initially to -(1 + (256 - maxlen(g))), and only (741 - (256 - maxlen(g))) total + * divsteps are needed. */ + eta = -(uint64_t)1; + + for (i = 0; i < 12; ++i) { + eta = secp256k1_fe_divsteps_62(eta, f[0], g[0], t); + secp256k1_fe_update_de_62(d, e, t); + secp256k1_fe_update_fg_62(f, g, t); + } + + /* At this point sufficient iterations have been performed that g must have reached 0 + * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g + * values i.e. +/- 1, and d now contains +/- the modular inverse. */ + + VERIFY_CHECK((g[0] | g[1] | g[2] | g[3] | g[4]) == 0); + + secp256k1_fe_normalize_62(d, f[4] >> 63); + secp256k1_fe_decode_62(r, d); + +#ifdef VERIFY + VERIFY_CHECK(!secp256k1_fe_is_zero(r) == !zero_in); +#endif +} + +static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { + + /* Modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. */ + + int64_t t[4]; + int64_t d[5] = { 0, 0, 0, 0, 0 }; + int64_t e[5] = { 1, 0, 0, 0, 0 }; + int64_t f[5] = { 0x3FFFFFFEFFFFFC2FLL, 0x3FFFFFFFFFFFFFFFLL, 0x3FFFFFFFFFFFFFFFLL, + 0x3FFFFFFFFFFFFFFFLL, 0xFFLL }; + int64_t g[5]; + secp256k1_fe b; + int i, j, len = 5; + uint64_t eta; + int64_t cond, fn, gn; +#ifdef VERIFY + int zero_in; +#endif + + b = *a; + secp256k1_fe_normalize(&b); + secp256k1_fe_encode_62(g, &b); + +#ifdef VERIFY + zero_in = secp256k1_fe_is_zero(&b); +#endif + + /* The paper uses 'delta'; eta == -delta (a performance tweak). + * + * If g has leading zeros (w.r.t 256 bits), then eta can be set initially to + * -(1 + clz(g)), and the worst-case divstep count would be only (741 - clz(g)). */ + eta = -(uint64_t)1; + + for (i = 0; i < 12; ++i) { + + eta = secp256k1_fe_divsteps_62_var(eta, f[0], g[0], t); + secp256k1_fe_update_de_62(d, e, t); + secp256k1_fe_update_fg_62_var(len, f, g, t); + + if (g[0] == 0) { + cond = 0; + for (j = 1; j < len; ++j) { + cond |= g[j]; + } + if (cond == 0) { + break; + } + } + + fn = f[len - 1]; + gn = g[len - 1]; + + cond = ((int64_t)len - 2) >> 63; + cond |= fn ^ (fn >> 63); + cond |= gn ^ (gn >> 63); + + if (cond == 0) { + f[len - 2] |= fn << 62; + g[len - 2] |= gn << 62; + --len; + } + } + + VERIFY_CHECK(i < 12); + + /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of + * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ + + secp256k1_fe_normalize_62(d, f[len - 1] >> 63); + secp256k1_fe_decode_62(r, d); + +#ifdef VERIFY + VERIFY_CHECK(!secp256k1_fe_is_zero(r) == !zero_in); +#endif +} + #endif /* SECP256K1_FIELD_REPR_IMPL_H */ diff --git a/src/field_impl.h b/src/field_impl.h index 485921a60e..ef15a0fc85 100644 --- a/src/field_impl.h +++ b/src/field_impl.h @@ -136,6 +136,7 @@ static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) { return secp256k1_fe_equal(&t1, a); } +#if defined(SECP256K1_FE_INV_DEFAULT) static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j; @@ -225,7 +226,9 @@ static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { } secp256k1_fe_mul(r, a, &t1); } +#endif +#if defined(SECP256K1_FE_INV_VAR_DEFAULT) static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { #if defined(USE_FIELD_INV_BUILTIN) secp256k1_fe_inv(r, a); @@ -262,6 +265,7 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { #error "Please select field inverse implementation" #endif } +#endif static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len) { secp256k1_fe u; diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 8f539c4bc6..a053aa70c7 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -957,4 +957,483 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); } +static const secp256k1_scalar SECP256K1_SCALAR_NEG_TWO_POW_256 = SECP256K1_SCALAR_CONST( + 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFDUL, + 0x755DB9CDUL, 0x5E914077UL, 0x7FA4BD19UL, 0xA06C8282UL +); + +static void secp256k1_scalar_normalize_62(int64_t *r, int64_t cond_negate) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t P[5] = { 0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256 }; + int64_t r0 = r[0], r1 = r[1], r2 = r[2], r3 = r[3], r4 = r[4]; + int64_t c, cond_add; + + cond_add = r4 >> 63; + + c = r0 + (P[0] & cond_add); + r0 = c & M62; c >>= 62; + c += r1 + (P[1] & cond_add); + r1 = c & M62; c >>= 62; + c += r2 + (P[2] & cond_add); + r2 = c & M62; c >>= 62; + c += r3; + r3 = c & M62; c >>= 62; + c += r4 + (P[4] & cond_add); + r4 = c; + + cond_add = (c >> 63) ^ cond_negate; + + c = (r0 ^ cond_negate) - cond_negate + (P[0] & cond_add); + r[0] = c & M62; c >>= 62; + c += (r1 ^ cond_negate) - cond_negate + (P[1] & cond_add); + r[1] = c & M62; c >>= 62; + c += (r2 ^ cond_negate) - cond_negate + (P[2] & cond_add); + r[2] = c & M62; c >>= 62; + c += (r3 ^ cond_negate) - cond_negate; + r[3] = c & M62; c >>= 62; + c += (r4 ^ cond_negate) - cond_negate + (P[4] & cond_add); + r[4] = c; + + VERIFY_CHECK(c >> 8 == 0); +} + +static void secp256k1_scalar_decode_62(secp256k1_scalar *r, const int64_t *a) { + + const uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; + + VERIFY_CHECK(a0 >> 62 == 0); + VERIFY_CHECK(a1 >> 62 == 0); + VERIFY_CHECK(a2 >> 62 == 0); + VERIFY_CHECK(a3 >> 62 == 0); + VERIFY_CHECK(a4 >> 8 == 0); + + r->d[0] = a0 | a1 << 62; + r->d[1] = a1 >> 2 | a2 << 60; + r->d[2] = a2 >> 4 | a3 << 58; + r->d[3] = a3 >> 6 | a4 << 56; +} + +static void secp256k1_scalar_encode_62(int64_t *r, const secp256k1_scalar *a) { + + const uint64_t M62 = UINT64_MAX >> 2; + const uint64_t *d = &a->d[0]; + const uint64_t a0 = d[0], a1 = d[1], a2 = d[2], a3 = d[3]; + +#ifdef VERIFY + VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0); +#endif + + r[0] = a0 & M62; + r[1] = (a0 >> 62 | a1 << 2) & M62; + r[2] = (a1 >> 60 | a2 << 4) & M62; + r[3] = (a2 >> 58 | a3 << 6) & M62; + r[4] = a3 >> 56; +} + +static uint64_t secp256k1_scalar_divsteps_62(uint64_t eta, uint64_t f0, uint64_t g0, int64_t *t) { + + uint64_t u = 1, v = 0, q = 0, r = 1; + uint64_t c1, c2, f = f0, g = g0, x, y, z; + int i; + + for (i = 0; i < 62; ++i) { + + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << i); + VERIFY_CHECK((q * f0 + r * g0) == g << i); + + c1 = (int64_t)eta >> 63; + c2 = -(g & 1); + + x = (f ^ c1) - c1; + y = (u ^ c1) - c1; + z = (v ^ c1) - c1; + + g += x & c2; + q += y & c2; + r += z & c2; + + c1 &= c2; + eta = (eta ^ c1) - (c1 + 1); + + f += g & c1; + u += q & c1; + v += r & c1; + + g >>= 1; + u <<= 1; + v <<= 1; + } + + t[0] = (int64_t)u; + t[1] = (int64_t)v; + t[2] = (int64_t)q; + t[3] = (int64_t)r; + + return eta; +} + +static uint64_t secp256k1_scalar_divsteps_62_var(uint64_t eta, uint64_t f0, uint64_t g0, int64_t *t) { + +#if 1 + static const uint8_t debruijn[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; +#endif + + uint64_t u = 1, v = 0, q = 0, r = 1; + uint64_t f = f0, g = g0, m, w, x, y, z; + int i = 62, limit, zeros; + + for (;;) { + + x = g | (UINT64_MAX << i); + + /* Use a sentinel bit to count zeros only up to i. */ +#if 0 + zeros = __builtin_ctzll(x); +#else + zeros = debruijn[((x & -x) * 0x022FDD63CC95386D) >> 58]; +#endif + + g >>= zeros; + u <<= zeros; + v <<= zeros; + eta -= zeros; + i -= zeros; + + if (i <= 0) { + break; + } + + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((g & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i)); + VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i)); + + if ((int64_t)eta < 0) { + eta = -eta; + x = f; f = g; g = -x; + y = u; u = q; q = -y; + z = v; v = r; r = -z; + + /* Handle up to 6 divsteps at once, subject to eta and i. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + m = (UINT64_MAX >> (64 - limit)) & 63U; + + w = (f * g * (f * f - 2)) & m; + } else { + /* Handle up to 4 divsteps at once, subject to eta and i. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + m = (UINT64_MAX >> (64 - limit)) & 15U; + + w = f + (((f + 1) & 4) << 1); + w = (-w * g) & m; + } + + g += f * w; + q += u * w; + r += v * w; + + VERIFY_CHECK((g & m) == 0); + } + + t[0] = (int64_t)u; + t[1] = (int64_t)v; + t[2] = (int64_t)q; + t[3] = (int64_t)r; + + return eta; +} + +static void secp256k1_scalar_update_de_62(int64_t *d, int64_t *e, const int64_t *t) { + + /* I62 == P^-1 mod 2^62 */ + const int64_t I62 = 0x34F20099AA774EC1LL; + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t P[5] = { 0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256 }; + const int64_t d0 = d[0], d1 = d[1], d2 = d[2], d3 = d[3], d4 = d[4]; + const int64_t e0 = e[0], e1 = e[1], e2 = e[2], e3 = e[3], e4 = e[4]; + const int64_t u = t[0], v = t[1], q = t[2], r = t[3]; + int64_t md, me, sd, se; + int128_t cd, ce; + + /* + * On input, d/e must be in the range (-2.P, P). For initially negative d (resp. e), we add + * u and/or v (resp. q and/or r) multiples of the modulus to the corresponding output (prior + * to division by 2^62). This has the same effect as if we added the modulus to the input(s). + */ + + sd = d4 >> 63; + se = e4 >> 63; + + md = (u & sd) + (v & se); + me = (q & sd) + (r & se); + + cd = (int128_t)u * d0 + (int128_t)v * e0; + ce = (int128_t)q * d0 + (int128_t)r * e0; + + /* + * Subtract from md/me an extra term in the range [0, 2^62) such that the low 62 bits of each + * sum of products will be 0. This allows clean division by 2^62. On output, d/e are thus in + * the range (-2.P, P), consistent with the input constraint. + */ + + md -= (I62 * (int64_t)cd + md) & M62; + me -= (I62 * (int64_t)ce + me) & M62; + + cd += (int128_t)P[0] * md; + ce += (int128_t)P[0] * me; + + VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62; + VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62; + + cd += (int128_t)u * d1 + (int128_t)v * e1; + ce += (int128_t)q * d1 + (int128_t)r * e1; + + cd += (int128_t)P[1] * md; + ce += (int128_t)P[1] * me; + + d[0] = (int64_t)cd & M62; cd >>= 62; + e[0] = (int64_t)ce & M62; ce >>= 62; + + cd += (int128_t)u * d2 + (int128_t)v * e2; + ce += (int128_t)q * d2 + (int128_t)r * e2; + + cd += (int128_t)P[2] * md; + ce += (int128_t)P[2] * me; + + d[1] = (int64_t)cd & M62; cd >>= 62; + e[1] = (int64_t)ce & M62; ce >>= 62; + + cd += (int128_t)u * d3 + (int128_t)v * e3; + ce += (int128_t)q * d3 + (int128_t)r * e3; + + d[2] = (int64_t)cd & M62; cd >>= 62; + e[2] = (int64_t)ce & M62; ce >>= 62; + + cd += (int128_t)u * d4 + (int128_t)v * e4; + ce += (int128_t)q * d4 + (int128_t)r * e4; + + cd += (int128_t)P[4] * md; + ce += (int128_t)P[4] * me; + + d[3] = (int64_t)cd & M62; cd >>= 62; + e[3] = (int64_t)ce & M62; ce >>= 62; + + d[4] = (int64_t)cd; + e[4] = (int64_t)ce; +} + +static void secp256k1_scalar_update_fg_62(int64_t *f, int64_t *g, const int64_t *t) { + + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t f0 = f[0], f1 = f[1], f2 = f[2], f3 = f[3], f4 = f[4]; + const int64_t g0 = g[0], g1 = g[1], g2 = g[2], g3 = g[3], g4 = g[4]; + const int64_t u = t[0], v = t[1], q = t[2], r = t[3]; + int128_t cf, cg; + + cf = (int128_t)u * f0 + (int128_t)v * g0; + cg = (int128_t)q * f0 + (int128_t)r * g0; + + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + + cf += (int128_t)u * f1 + (int128_t)v * g1; + cg += (int128_t)q * f1 + (int128_t)r * g1; + + f[0] = (int64_t)cf & M62; cf >>= 62; + g[0] = (int64_t)cg & M62; cg >>= 62; + + cf += (int128_t)u * f2 + (int128_t)v * g2; + cg += (int128_t)q * f2 + (int128_t)r * g2; + + f[1] = (int64_t)cf & M62; cf >>= 62; + g[1] = (int64_t)cg & M62; cg >>= 62; + + cf += (int128_t)u * f3 + (int128_t)v * g3; + cg += (int128_t)q * f3 + (int128_t)r * g3; + + f[2] = (int64_t)cf & M62; cf >>= 62; + g[2] = (int64_t)cg & M62; cg >>= 62; + + cf += (int128_t)u * f4 + (int128_t)v * g4; + cg += (int128_t)q * f4 + (int128_t)r * g4; + + f[3] = (int64_t)cf & M62; cf >>= 62; + g[3] = (int64_t)cg & M62; cg >>= 62; + + f[4] = (int64_t)cf; + g[4] = (int64_t)cg; +} + +static void secp256k1_scalar_update_fg_62_var(int len, int64_t *f, int64_t *g, const int64_t *t) { + + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t u = t[0], v = t[1], q = t[2], r = t[3]; + int64_t fi, gi; + int128_t cf, cg; + int i; + + VERIFY_CHECK(len > 0); + + fi = f[0]; + gi = g[0]; + + cf = (int128_t)u * fi + (int128_t)v * gi; + cg = (int128_t)q * fi + (int128_t)r * gi; + + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + + for (i = 1; i < len; ++i) { + + fi = f[i]; + gi = g[i]; + + cf += (int128_t)u * fi + (int128_t)v * gi; + cg += (int128_t)q * fi + (int128_t)r * gi; + + f[i - 1] = (int64_t)cf & M62; cf >>= 62; + g[i - 1] = (int64_t)cg & M62; cg >>= 62; + } + + f[len - 1] = (int64_t)cf; + g[len - 1] = (int64_t)cg; +} + +static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { +#if defined(EXHAUSTIVE_TEST_ORDER) + int i; + *r = 0; + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) + if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) + *r = i; + /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus + * have a composite group order; fix it in exhaustive_tests.c). */ + VERIFY_CHECK(*r != 0); +} +#else + + /* Modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. */ + + int64_t t[4]; + int64_t d[5] = { 0, 0, 0, 0, 0 }; + int64_t e[5] = { 1, 0, 0, 0, 0 }; + int64_t f[5] = { 0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, 0x3FFFFFFFFFFFFFEBLL, + 0x3FFFFFFFFFFFFFFFLL, 0xFFLL }; + int64_t g[5]; + int i; + uint64_t eta; +#ifdef VERIFY + int zero_in = secp256k1_scalar_is_zero(x); +#endif + + secp256k1_scalar_encode_62(g, x); + + /* The paper uses 'delta'; eta == -delta (a performance tweak). + * + * If the maximum bitlength of g is known to be less than 256, then eta can be set + * initially to -(1 + (256 - maxlen(g))), and only (741 - (256 - maxlen(g))) total + * divsteps are needed. */ + eta = -(uint64_t)1; + + for (i = 0; i < 12; ++i) { + eta = secp256k1_scalar_divsteps_62(eta, f[0], g[0], t); + secp256k1_scalar_update_de_62(d, e, t); + secp256k1_scalar_update_fg_62(f, g, t); + } + + /* At this point sufficient iterations have been performed that g must have reached 0 + * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g + * values i.e. +/- 1, and d now contains +/- the modular inverse. */ + + VERIFY_CHECK((g[0] | g[1] | g[2] | g[3] | g[4]) == 0); + + secp256k1_scalar_normalize_62(d, f[4] >> 63); + secp256k1_scalar_decode_62(r, d); + +#ifdef VERIFY + VERIFY_CHECK(!secp256k1_scalar_is_zero(r) == !zero_in); +#endif +} + +SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { + return !(a->d[0] & 1); +} +#endif + +static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { + + /* Modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. */ + + int64_t t[4]; + int64_t d[5] = { 0, 0, 0, 0, 0 }; + int64_t e[5] = { 1, 0, 0, 0, 0 }; + int64_t f[5] = { 0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, 0x3FFFFFFFFFFFFFEBLL, + 0x3FFFFFFFFFFFFFFFLL, 0xFFLL }; + int64_t g[5]; + int i, j, len = 5; + uint64_t eta; + int64_t cond, fn, gn; +#ifdef VERIFY + int zero_in = secp256k1_scalar_is_zero(x); +#endif + + secp256k1_scalar_encode_62(g, x); + + /* The paper uses 'delta'; eta == -delta (a performance tweak). + * + * If g has leading zeros (w.r.t 256 bits), then eta can be set initially to + * -(1 + clz(g)), and the worst-case divstep count would be only (741 - clz(g)). */ + eta = -(uint64_t)1; + + for (i = 0; i < 12; ++i) { + + eta = secp256k1_scalar_divsteps_62_var(eta, f[0], g[0], t); + secp256k1_scalar_update_de_62(d, e, t); + secp256k1_scalar_update_fg_62_var(len, f, g, t); + + if (g[0] == 0) { + cond = 0; + for (j = 1; j < len; ++j) { + cond |= g[j]; + } + if (cond == 0) { + break; + } + } + + fn = f[len - 1]; + gn = g[len - 1]; + + cond = ((int64_t)len - 2) >> 63; + cond |= fn ^ (fn >> 63); + cond |= gn ^ (gn >> 63); + + if (cond == 0) { + f[len - 2] |= fn << 62; + g[len - 2] |= gn << 62; + --len; + } + } + + VERIFY_CHECK(i < 12); + + /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of + * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ + + secp256k1_scalar_normalize_62(d, f[len - 1] >> 63); + secp256k1_scalar_decode_62(r, d); + +#ifdef VERIFY + VERIFY_CHECK(!secp256k1_scalar_is_zero(r) == !zero_in); +#endif +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 3c372f34fe..ba52b7d54a 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -733,4 +733,499 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); } +static const secp256k1_scalar SECP256K1_SCALAR_NEG_TWO_POW_256 = SECP256K1_SCALAR_CONST( + 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFDUL, + 0x755DB9CDUL, 0x5E914077UL, 0x7FA4BD19UL, 0xA06C8282UL +); + +static void secp256k1_scalar_normalize_30(int32_t *r, int32_t cond_negate) { + const int32_t P[9] = { 0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, + 0, 0, 0, 65536 }; + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + int32_t r0 = r[0], r1 = r[1], r2 = r[2], r3 = r[3], r4 = r[4], + r5 = r[5], r6 = r[6], r7 = r[7], r8 = r[8]; + int32_t c, cond_add; + + cond_add = r8 >> 31; + + c = r0 + (P[0] & cond_add); + r0 = c & M30; c >>= 30; + c += r1 + (P[1] & cond_add); + r1 = c & M30; c >>= 30; + c += r2 + (P[2] & cond_add); + r2 = c & M30; c >>= 30; + c += r3 + (P[3] & cond_add); + r3 = c & M30; c >>= 30; + c += r4 + (P[4] & cond_add); + r4 = c & M30; c >>= 30; + c += r5; + r5 = c & M30; c >>= 30; + c += r6; + r6 = c & M30; c >>= 30; + c += r7; + r7 = c & M30; c >>= 30; + c += r8 + (P[8] & cond_add); + r8 = c; + + cond_add = (c >> 31) ^ cond_negate; + + c = (r0 ^ cond_negate) - cond_negate + (P[0] & cond_add); + r[0] = c & M30; c >>= 30; + c += (r1 ^ cond_negate) - cond_negate + (P[1] & cond_add); + r[1] = c & M30; c >>= 30; + c += (r2 ^ cond_negate) - cond_negate + (P[2] & cond_add); + r[2] = c & M30; c >>= 30; + c += (r3 ^ cond_negate) - cond_negate + (P[3] & cond_add); + r[3] = c & M30; c >>= 30; + c += (r4 ^ cond_negate) - cond_negate + (P[4] & cond_add); + r[4] = c & M30; c >>= 30; + c += (r5 ^ cond_negate) - cond_negate; + r[5] = c & M30; c >>= 30; + c += (r6 ^ cond_negate) - cond_negate; + r[6] = c & M30; c >>= 30; + c += (r7 ^ cond_negate) - cond_negate; + r[7] = c & M30; c >>= 30; + c += (r8 ^ cond_negate) - cond_negate + (P[8] & cond_add); + r[8] = c; + + VERIFY_CHECK(c >> 16 == 0); +} + + +static void secp256k1_scalar_decode_30(secp256k1_scalar *r, const int32_t *a) { + + const uint32_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4], + a5 = a[5], a6 = a[6], a7 = a[7], a8 = a[8]; + + VERIFY_CHECK(a0 >> 30 == 0); + VERIFY_CHECK(a1 >> 30 == 0); + VERIFY_CHECK(a2 >> 30 == 0); + VERIFY_CHECK(a3 >> 30 == 0); + VERIFY_CHECK(a4 >> 30 == 0); + VERIFY_CHECK(a5 >> 30 == 0); + VERIFY_CHECK(a6 >> 30 == 0); + VERIFY_CHECK(a7 >> 30 == 0); + VERIFY_CHECK(a8 >> 16 == 0); + + r->d[0] = a0 | a1 << 30; + r->d[1] = a1 >> 2 | a2 << 28; + r->d[2] = a2 >> 4 | a3 << 26; + r->d[3] = a3 >> 6 | a4 << 24; + r->d[4] = a4 >> 8 | a5 << 22; + r->d[5] = a5 >> 10 | a6 << 20; + r->d[6] = a6 >> 12 | a7 << 18; + r->d[7] = a7 >> 14 | a8 << 16; +} + +static void secp256k1_scalar_encode_30(int32_t *r, const secp256k1_scalar *a) { + + const uint32_t M30 = UINT32_MAX >> 2; + const uint32_t *d = &a->d[0]; + const uint32_t a0 = d[0], a1 = d[1], a2 = d[2], a3 = d[3], + a4 = d[4], a5 = d[5], a6 = d[6], a7 = d[7]; + +#ifdef VERIFY + VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0); +#endif + + r[0] = a0 & M30; + r[1] = (a0 >> 30 | a1 << 2) & M30; + r[2] = (a1 >> 28 | a2 << 4) & M30; + r[3] = (a2 >> 26 | a3 << 6) & M30; + r[4] = (a3 >> 24 | a4 << 8) & M30; + r[5] = (a4 >> 22 | a5 << 10) & M30; + r[6] = (a5 >> 20 | a6 << 12) & M30; + r[7] = (a6 >> 18 | a7 << 14) & M30; + r[8] = a7 >> 16; +} + +static uint32_t secp256k1_scalar_divsteps_30(uint32_t eta, uint32_t f0, uint32_t g0, int32_t *t) { + + uint32_t u = 1, v = 0, q = 0, r = 1; + uint32_t c1, c2, f = f0, g = g0, x, y, z; + int i; + + for (i = 0; i < 30; ++i) { + + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << i); + VERIFY_CHECK((q * f0 + r * g0) == g << i); + + c1 = (int32_t)eta >> 31; + c2 = -(g & 1); + + x = (f ^ c1) - c1; + y = (u ^ c1) - c1; + z = (v ^ c1) - c1; + + g += x & c2; + q += y & c2; + r += z & c2; + + c1 &= c2; + eta = (eta ^ c1) - (c1 + 1); + + f += g & c1; + u += q & c1; + v += r & c1; + + g >>= 1; + u <<= 1; + v <<= 1; + } + + t[0] = (int32_t)u; + t[1] = (int32_t)v; + t[2] = (int32_t)q; + t[3] = (int32_t)r; + + return eta; +} + +static uint32_t secp256k1_scalar_divsteps_30_var(uint32_t eta, uint32_t f0, uint32_t g0, int32_t *t) { + +#if 1 + static const uint8_t debruijn[32] = { + 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, + 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, + 0x1E, 0x11, 0x08, 0x0E, 0x1D, 0x0D, 0x1C, 0x1B }; +#endif + + static const uint8_t inv256[128] = { + 0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59, + 0xD7, 0xED, 0xCB, 0x21, 0x1F, 0x75, 0x53, 0x69, 0xE7, 0x7D, 0x5B, 0x31, + 0x2F, 0x05, 0xE3, 0x79, 0xF7, 0x0D, 0xEB, 0x41, 0x3F, 0x95, 0x73, 0x89, + 0x07, 0x9D, 0x7B, 0x51, 0x4F, 0x25, 0x03, 0x99, 0x17, 0x2D, 0x0B, 0x61, + 0x5F, 0xB5, 0x93, 0xA9, 0x27, 0xBD, 0x9B, 0x71, 0x6F, 0x45, 0x23, 0xB9, + 0x37, 0x4D, 0x2B, 0x81, 0x7F, 0xD5, 0xB3, 0xC9, 0x47, 0xDD, 0xBB, 0x91, + 0x8F, 0x65, 0x43, 0xD9, 0x57, 0x6D, 0x4B, 0xA1, 0x9F, 0xF5, 0xD3, 0xE9, + 0x67, 0xFD, 0xDB, 0xB1, 0xAF, 0x85, 0x63, 0xF9, 0x77, 0x8D, 0x6B, 0xC1, + 0xBF, 0x15, 0xF3, 0x09, 0x87, 0x1D, 0xFB, 0xD1, 0xCF, 0xA5, 0x83, 0x19, + 0x97, 0xAD, 0x8B, 0xE1, 0xDF, 0x35, 0x13, 0x29, 0xA7, 0x3D, 0x1B, 0xF1, + 0xEF, 0xC5, 0xA3, 0x39, 0xB7, 0xCD, 0xAB, 0x01 + }; + + uint32_t u = 1, v = 0, q = 0, r = 1; + uint32_t f = f0, g = g0, m, w, x, y, z; + int i = 30, limit, zeros; + + for (;;) { + + /* Use a sentinel bit to count zeros only up to i. */ + x = g | (UINT32_MAX << i); + +#if 0 + zeros = __builtin_ctzl(x); +#else + zeros = debruijn[((x & -x) * 0x04D7651F) >> 27]; +#endif + + g >>= zeros; + u <<= zeros; + v <<= zeros; + eta -= zeros; + i -= zeros; + + if (i <= 0) { + break; + } + + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((g & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << (30 - i)); + VERIFY_CHECK((q * f0 + r * g0) == g << (30 - i)); + + if ((int32_t)eta < 0) { + eta = -eta; + x = f; f = g; g = -x; + y = u; u = q; q = -y; + z = v; v = r; r = -z; + } + + /* Handle up to 8 divsteps at once, subject to eta and i. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + m = (UINT32_MAX >> (32 - limit)) & 255U; + + w = (g * inv256[(f >> 1) & 127]) & m; + + g += f * w; + q += u * w; + r += v * w; + + VERIFY_CHECK((g & m) == 0); + } + + t[0] = (int32_t)u; + t[1] = (int32_t)v; + t[2] = (int32_t)q; + t[3] = (int32_t)r; + + return eta; +} + +static void secp256k1_scalar_update_de_30(int32_t *d, int32_t *e, const int32_t *t) { + + /* I30 == P^-1 mod 2^30 */ + const int32_t I30 = 0x2A774EC1L; + const int32_t P[9] = { 0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, + 0, 0, 0, 65536 }; + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t[0], v = t[1], q = t[2], r = t[3]; + int32_t di, ei, md, me, sd, se; + int64_t cd, ce; + int i; + + /* + * On input, d/e must be in the range (-2.P, P). For initially negative d (resp. e), we add + * u and/or v (resp. q and/or r) multiples of the modulus to the corresponding output (prior + * to division by 2^30). This has the same effect as if we added the modulus to the input(s). + */ + + sd = d[8] >> 31; + se = e[8] >> 31; + + md = (u & sd) + (v & se); + me = (q & sd) + (r & se); + + di = d[0]; + ei = e[0]; + + cd = (int64_t)u * di + (int64_t)v * ei; + ce = (int64_t)q * di + (int64_t)r * ei; + + /* + * Subtract from md/me an extra term in the range [0, 2^30) such that the low 30 bits of each + * sum of products will be 0. This allows clean division by 2^30. On output, d/e are thus in + * the range (-2.P, P), consistent with the input constraint. + */ + + md -= (I30 * (int32_t)cd + md) & M30; + me -= (I30 * (int32_t)ce + me) & M30; + + cd += (int64_t)P[0] * md; + ce += (int64_t)P[0] * me; + + VERIFY_CHECK(((int32_t)cd & M30) == 0); cd >>= 30; + VERIFY_CHECK(((int32_t)ce & M30) == 0); ce >>= 30; + + for (i = 1; i < 9; ++i) { + + di = d[i]; + ei = e[i]; + + cd += (int64_t)u * di + (int64_t)v * ei; + ce += (int64_t)q * di + (int64_t)r * ei; + + cd += (int64_t)P[i] * md; + ce += (int64_t)P[i] * me; + + d[i - 1] = (int32_t)cd & M30; cd >>= 30; + e[i - 1] = (int32_t)ce & M30; ce >>= 30; + } + + d[8] = (int32_t)cd; + e[8] = (int32_t)ce; +} + +static void secp256k1_scalar_update_fg_30(int32_t *f, int32_t *g, int32_t *t) { + + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + int32_t u = t[0], v = t[1], q = t[2], r = t[3], fi, gi; + int64_t cf, cg; + int i; + + fi = f[0]; + gi = g[0]; + + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + + VERIFY_CHECK(((int32_t)cf & M30) == 0); + VERIFY_CHECK(((int32_t)cg & M30) == 0); + + cf >>= 30; + cg >>= 30; + + for (i = 1; i < 9; ++i) { + + fi = f[i]; + gi = g[i]; + + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + + f[i - 1] = (int32_t)cf & M30; cf >>= 30; + g[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + + f[8] = (int32_t)cf; + g[8] = (int32_t)cg; +} + +static void secp256k1_scalar_update_fg_30_var(int len, int32_t *f, int32_t *g, const int32_t *t) { + + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t[0], v = t[1], q = t[2], r = t[3]; + int32_t fi, gi; + int64_t cf, cg; + int i; + + VERIFY_CHECK(len > 0); + + fi = f[0]; + gi = g[0]; + + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + + VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; + VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; + + for (i = 1; i < len; ++i) { + + fi = f[i]; + gi = g[i]; + + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + + f[i - 1] = (int32_t)cf & M30; cf >>= 30; + g[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + + f[len - 1] = (int32_t)cf; + g[len - 1] = (int32_t)cg; +} + +static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { +#if defined(EXHAUSTIVE_TEST_ORDER) + int i; + *r = 0; + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) + if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) + *r = i; + /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus + * have a composite group order; fix it in exhaustive_tests.c). */ + VERIFY_CHECK(*r != 0); +} +#else + + /* Modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. */ + + int32_t t[4]; + int32_t d[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + int32_t e[9] = { 1, 0, 0, 0, 0, 0, 0, 0, 0 }; + int32_t f[9] = { 0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, + 0x3FFFFEBAL, 0x3FFFFFFFL, 0x3FFFFFFFL, 0x3FFFFFFFL, 0xFFFFL }; + int32_t g[9]; + int i; + uint32_t eta; +#ifdef VERIFY + int zero_in = secp256k1_scalar_is_zero(x); +#endif + + secp256k1_scalar_encode_30(g, x); + + /* The paper uses 'delta'; eta == -delta (a performance tweak). + * + * If the maximum bitlength of g is known to be less than 256, then eta can be set + * initially to -(1 + (256 - maxlen(g))), and only (741 - (256 - maxlen(g))) total + * divsteps are needed. */ + eta = -(uint32_t)1; + + for (i = 0; i < 25; ++i) { + eta = secp256k1_scalar_divsteps_30(eta, f[0], g[0], t); + secp256k1_scalar_update_de_30(d, e, t); + secp256k1_scalar_update_fg_30(f, g, t); + } + + /* At this point sufficient iterations have been performed that g must have reached 0 + * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g + * values i.e. +/- 1, and d now contains +/- the modular inverse. */ + + VERIFY_CHECK((g[0] | g[1] | g[2] | g[3] | g[4] | g[5] | g[6] | g[7] | g[8]) == 0); + + secp256k1_scalar_normalize_30(d, f[8] >> 31); + secp256k1_scalar_decode_30(r, d); + +#ifdef VERIFY + VERIFY_CHECK(!secp256k1_scalar_is_zero(r) == !zero_in); +#endif +} + +SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { + return !(a->d[0] & 1); +} +#endif + +static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { + + /* Modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. */ + + int32_t t[4]; + int32_t d[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + int32_t e[9] = { 1, 0, 0, 0, 0, 0, 0, 0, 0 }; + int32_t f[9] = { 0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, + 0x3FFFFEBAL, 0x3FFFFFFFL, 0x3FFFFFFFL, 0x3FFFFFFFL, 0xFFFFL }; + int32_t g[9]; + int i, j, len = 9; + uint32_t eta; + int32_t cond, fn, gn; +#ifdef VERIFY + int zero_in = secp256k1_scalar_is_zero(x); +#endif + + secp256k1_scalar_encode_30(g, x); + + /* The paper uses 'delta'; eta == -delta (a performance tweak). + * + * If g has leading zeros (w.r.t 256 bits), then eta can be set initially to + * -(1 + clz(g)), and the worst-case divstep count would be only (741 - clz(g)). */ + eta = -(uint32_t)1; + + for (i = 0; i < 25; ++i) { + + eta = secp256k1_scalar_divsteps_30_var(eta, f[0], g[0], t); + secp256k1_scalar_update_de_30(d, e, t); + secp256k1_scalar_update_fg_30_var(len, f, g, t); + + if (g[0] == 0) { + cond = 0; + for (j = 1; j < len; ++j) { + cond |= g[j]; + } + if (cond == 0) { + break; + } + } + + fn = f[len - 1]; + gn = g[len - 1]; + + cond = ((int32_t)len - 2) >> 31; + cond |= fn ^ (fn >> 31); + cond |= gn ^ (gn >> 31); + + if (cond == 0) { + f[len - 2] |= (uint32_t)fn << 30; + g[len - 2] |= (uint32_t)gn << 30; + --len; + } + } + + VERIFY_CHECK(i < 25); + + /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of + * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ + + secp256k1_scalar_normalize_30(d, f[len - 1] >> 31); + secp256k1_scalar_decode_30(r, d); + +#ifdef VERIFY + VERIFY_CHECK(!secp256k1_scalar_is_zero(r) == !zero_in); +#endif +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/src/scalar_impl.h b/src/scalar_impl.h index 70cd73db06..69f31f6c51 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -61,6 +61,7 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c return (!overflow) & (!secp256k1_scalar_is_zero(r)); } +#if defined(SECP256K1_SCALAR_INV_DEFAULT) static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { #if defined(EXHAUSTIVE_TEST_ORDER) int i; @@ -230,7 +231,9 @@ SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) return !(a->d[0] & 1); } #endif +#endif +#if defined(SECP256K1_SCALAR_INV_VAR_DEFAULT) static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { #if defined(USE_SCALAR_INV_BUILTIN) secp256k1_scalar_inverse(r, x); @@ -251,6 +254,7 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc #error "Please select scalar inverse implementation" #endif } +#endif #ifdef USE_ENDOMORPHISM #if defined(EXHAUSTIVE_TEST_ORDER) diff --git a/src/scalar_low.h b/src/scalar_low.h index 2794a7f171..53ea913203 100644 --- a/src/scalar_low.h +++ b/src/scalar_low.h @@ -14,4 +14,7 @@ typedef uint32_t secp256k1_scalar; #define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) (d0) +#define SECP256K1_SCALAR_INV_DEFAULT +#define SECP256K1_SCALAR_INV_VAR_DEFAULT + #endif /* SECP256K1_SCALAR_REPR_H */ diff --git a/src/util.h b/src/util.h index 8289e23e0c..b4f7b77344 100644 --- a/src/util.h +++ b/src/util.h @@ -176,6 +176,7 @@ static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_siz # else # define SECP256K1_GNUC_EXT # endif +SECP256K1_GNUC_EXT typedef __int128 int128_t; SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t; #endif