12
12
#include "ecmult_const.h"
13
13
#include "ecmult_impl.h"
14
14
15
- #define WNAF_BITS 256
15
+ #ifdef USE_ENDOMORPHISM
16
+ #define WNAF_BITS 128
17
+ #else
18
+ #define WNAF_BITS 256
19
+ #endif
16
20
#define WNAF_SIZE (w ) ((WNAF_BITS + (w) - 1) / (w))
17
21
18
22
/* This is like `ECMULT_TABLE_GET_GE` but is constant time */
49
53
*
50
54
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
51
55
*/
52
- static void secp256k1_wnaf_const (int * wnaf , const secp256k1_scalar_t * a , int w ) {
53
- secp256k1_scalar_t s = * a ;
54
- /* Negate to force oddness */
55
- int is_even = secp256k1_scalar_is_even (& s );
56
- int global_sign = secp256k1_scalar_cond_negate (& s , is_even );
57
-
56
+ static int secp256k1_wnaf_const (int * wnaf , secp256k1_scalar_t s , int w ) {
57
+ int global_sign = 1 ;
58
+ int skew = 0 ;
58
59
int word = 0 ;
59
60
/* 1 2 3 */
60
- int u_last = secp256k1_scalar_shr_int ( & s , w ) ;
61
+ int u_last ;
61
62
int u ;
63
+
64
+ #ifdef USE_ENDOMORPHISM
65
+ /* If we are using the endomorphism, we cannot handle even numbers by negating
66
+ * them, since we are working with 128-bit numbers whose negations would be 256
67
+ * bits, eliminating the performance advantage. Instead we use a technique from
68
+ * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even)
69
+ * or 2 (for odd) to the number we are encoding, then compensating after the
70
+ * multiplication. */
71
+ /* Negative 128-bit numbers will be negated, since otherwise they are 256-bit */
72
+ int flip = secp256k1_scalar_is_high (& s );
73
+ /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
74
+ int bit = flip ^ (s .d [0 ] & 1 );
75
+ /* We check for negative one, since adding 2 to it will cause an overflow */
76
+ secp256k1_scalar_t neg_s ;
77
+ int not_neg_one ;
78
+ secp256k1_scalar_negate (& neg_s , & s );
79
+ not_neg_one = !secp256k1_scalar_is_one (& neg_s );
80
+ secp256k1_scalar_cadd_bit (& s , bit , not_neg_one );
81
+ /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
82
+ * that we added two to it and flipped it. In fact for -1 these operations are
83
+ * identical. We only flipped, but since skewing is required (in the sense that
84
+ * the skew must be 1 or 2, never zero) and flipping is not, we need to change
85
+ * our flags to claim that we only skewed. */
86
+ global_sign = secp256k1_scalar_cond_negate (& s , flip );
87
+ global_sign *= not_neg_one * 2 - 1 ;
88
+ skew = 1 << bit ;
89
+ #else
90
+ /* Otherwise, we just negate to force oddness */
91
+ int is_even = secp256k1_scalar_is_even (& s );
92
+ global_sign = secp256k1_scalar_cond_negate (& s , is_even );
93
+ #endif
94
+
62
95
/* 4 */
96
+ u_last = secp256k1_scalar_shr_int (& s , w );
63
97
while (word * w < WNAF_BITS ) {
64
98
int sign ;
65
99
int even ;
@@ -81,6 +115,7 @@ static void secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar_t *a, int w)
81
115
82
116
VERIFY_CHECK (secp256k1_scalar_is_zero (& s ));
83
117
VERIFY_CHECK (word == WNAF_SIZE (w ));
118
+ return skew ;
84
119
}
85
120
86
121
@@ -89,17 +124,37 @@ static void secp256k1_ecmult_const(secp256k1_gej_t *r, const secp256k1_ge_t *a,
89
124
secp256k1_ge_t tmpa ;
90
125
secp256k1_fe_t Z ;
91
126
127
+ #ifdef USE_ENDOMORPHISM
128
+ secp256k1_ge_t pre_a_lam [ECMULT_TABLE_SIZE (WINDOW_A )];
129
+ int wnaf_1 [1 + WNAF_SIZE (WINDOW_A - 1 )];
130
+ int wnaf_lam [1 + WNAF_SIZE (WINDOW_A - 1 )];
131
+ int skew_1 ;
132
+ int skew_lam ;
133
+ secp256k1_scalar_t q_1 , q_lam ;
134
+ #else
92
135
int wnaf [1 + WNAF_SIZE (WINDOW_A - 1 )];
136
+ #endif
93
137
94
138
int i ;
95
- int is_zero = secp256k1_scalar_is_zero (scalar );
96
139
secp256k1_scalar_t sc = * scalar ;
140
+
141
+ /* build wnaf representation for q. */
142
+ #ifdef USE_ENDOMORPHISM
143
+ /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
144
+ secp256k1_scalar_split_lambda (& q_1 , & q_lam , & sc );
145
+ /* no need for zero correction when using endomorphism since even
146
+ * numbers have one added to them anyway */
147
+ skew_1 = secp256k1_wnaf_const (wnaf_1 , q_1 , WINDOW_A - 1 );
148
+ skew_lam = secp256k1_wnaf_const (wnaf_lam , q_lam , WINDOW_A - 1 );
149
+ #else
150
+ int is_zero = secp256k1_scalar_is_zero (scalar );
97
151
/* the wNAF ladder cannot handle zero, so bump this to one .. we will
98
152
* correct the result after the fact */
99
153
sc .d [0 ] += is_zero ;
154
+ VERIFY_CHECK (!secp256k1_scalar_is_zero (& sc ));
100
155
101
- /* build wnaf representation for q. */
102
- secp256k1_wnaf_const ( wnaf , & sc , WINDOW_A - 1 );
156
+ secp256k1_wnaf_const ( wnaf , sc , WINDOW_A - 1 );
157
+ #endif
103
158
104
159
/* Calculate odd multiples of a.
105
160
* All multiples are brought to the same Z 'denominator', which is stored
@@ -109,31 +164,91 @@ static void secp256k1_ecmult_const(secp256k1_gej_t *r, const secp256k1_ge_t *a,
109
164
*/
110
165
secp256k1_gej_set_ge (r , a );
111
166
secp256k1_ecmult_odd_multiples_table_globalz_windowa (pre_a , & Z , r );
167
+ #ifdef USE_ENDOMORPHISM
168
+ for (i = 0 ; i < ECMULT_TABLE_SIZE (WINDOW_A ); i ++ ) {
169
+ secp256k1_ge_mul_lambda (& pre_a_lam [i ], & pre_a [i ]);
170
+ }
171
+ #endif
112
172
113
173
/* first loop iteration (separated out so we can directly set r, rather
114
174
* than having it start at infinity, get doubled several times, then have
115
175
* its new value added to it) */
176
+ #ifdef USE_ENDOMORPHISM
177
+ i = wnaf_1 [WNAF_SIZE (WINDOW_A - 1 )];
178
+ VERIFY_CHECK (i != 0 );
179
+ ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a , i , WINDOW_A );
180
+ secp256k1_gej_set_ge (r , & tmpa );
181
+
182
+ i = wnaf_lam [WNAF_SIZE (WINDOW_A - 1 )];
183
+ VERIFY_CHECK (i != 0 );
184
+ ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a_lam , i , WINDOW_A );
185
+ secp256k1_gej_add_ge (r , r , & tmpa );
186
+ #else
116
187
i = wnaf [WNAF_SIZE (WINDOW_A - 1 )];
117
188
VERIFY_CHECK (i != 0 );
118
189
ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a , i , WINDOW_A );
119
190
secp256k1_gej_set_ge (r , & tmpa );
191
+ #endif
120
192
/* remaining loop iterations */
121
193
for (i = WNAF_SIZE (WINDOW_A - 1 ) - 1 ; i >= 0 ; i -- ) {
122
194
int n ;
123
195
int j ;
124
196
for (j = 0 ; j < WINDOW_A - 1 ; ++ j ) {
125
197
secp256k1_gej_double_nonzero (r , r , NULL );
126
198
}
199
+ #ifdef USE_ENDOMORPHISM
200
+ n = wnaf_1 [i ];
201
+ ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a , n , WINDOW_A );
202
+ VERIFY_CHECK (n != 0 );
203
+ secp256k1_gej_add_ge (r , r , & tmpa );
204
+
205
+ n = wnaf_lam [i ];
206
+ ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a_lam , n , WINDOW_A );
207
+ VERIFY_CHECK (n != 0 );
208
+ secp256k1_gej_add_ge (r , r , & tmpa );
209
+ #else
127
210
n = wnaf [i ];
128
211
VERIFY_CHECK (n != 0 );
129
212
ECMULT_CONST_TABLE_GET_GE (& tmpa , pre_a , n , WINDOW_A );
130
213
secp256k1_gej_add_ge (r , r , & tmpa );
214
+ #endif
131
215
}
132
216
133
217
secp256k1_fe_mul (& r -> z , & r -> z , & Z );
134
218
219
+ #ifdef USE_ENDOMORPHISM
220
+ {
221
+ /* Correct for wNAF skew */
222
+ secp256k1_ge_t correction = * a ;
223
+ secp256k1_ge_storage_t correction_1_stor ;
224
+ secp256k1_ge_storage_t correction_lam_stor ;
225
+ secp256k1_ge_storage_t a2_stor ;
226
+ secp256k1_gej_t tmpj ;
227
+ secp256k1_gej_set_ge (& tmpj , & correction );
228
+ secp256k1_gej_double_var (& tmpj , & tmpj , NULL );
229
+ secp256k1_ge_set_gej (& correction , & tmpj );
230
+ secp256k1_ge_to_storage (& correction_1_stor , a );
231
+ secp256k1_ge_to_storage (& correction_lam_stor , a );
232
+ secp256k1_ge_to_storage (& a2_stor , & correction );
233
+
234
+ /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */
235
+ secp256k1_ge_storage_cmov (& correction_1_stor , & a2_stor , skew_1 == 2 );
236
+ secp256k1_ge_storage_cmov (& correction_lam_stor , & a2_stor , skew_lam == 2 );
237
+
238
+ /* Apply the correction */
239
+ secp256k1_ge_from_storage (& correction , & correction_1_stor );
240
+ secp256k1_ge_neg (& correction , & correction );
241
+ secp256k1_gej_add_ge (r , r , & correction );
242
+
243
+ secp256k1_ge_from_storage (& correction , & correction_lam_stor );
244
+ secp256k1_ge_neg (& correction , & correction );
245
+ secp256k1_ge_mul_lambda (& correction , & correction );
246
+ secp256k1_gej_add_ge (r , r , & correction );
247
+ }
248
+ #else
135
249
/* correct for zero */
136
250
r -> infinity |= is_zero ;
251
+ #endif
137
252
}
138
253
139
254
#endif
0 commit comments