@@ -28,16 +28,9 @@ static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx)
28
28
29
29
static void secp256k1_ecmult_gen_context_build (secp256k1_ecmult_gen_context * ctx , void * * prealloc ) {
30
30
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
31
- #ifdef USE_COMB
32
31
secp256k1_ge prec [COMB_POINTS_TOTAL + COMB_OFFSET ];
33
32
secp256k1_gej u , sum ;
34
33
int block , index , spacing , stride , tooth ;
35
- #else
36
- secp256k1_ge prec [ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G ];
37
- secp256k1_gej gj ;
38
- secp256k1_gej nums_gej ;
39
- int i , j ;
40
- #endif
41
34
size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE ;
42
35
void * const base = * prealloc ;
43
36
#endif
@@ -46,7 +39,6 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
46
39
return ;
47
40
}
48
41
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
49
- #ifdef USE_COMB
50
42
ctx -> prec = (secp256k1_ge_storage (* )[COMB_BLOCKS ][COMB_POINTS ])manual_alloc (prealloc , prealloc_size , base , prealloc_size );
51
43
52
44
/* get the generator */
@@ -95,72 +87,12 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx
95
87
ctx -> offset = prec [COMB_POINTS_TOTAL ];
96
88
#endif
97
89
98
- #else
99
- ctx -> prec = (secp256k1_ge_storage (* )[ECMULT_GEN_PREC_N ][ECMULT_GEN_PREC_G ])manual_alloc (prealloc , prealloc_size , base , prealloc_size );
100
-
101
- /* get the generator */
102
- secp256k1_gej_set_ge (& gj , & secp256k1_ge_const_g );
103
-
104
- /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
105
- {
106
- static const unsigned char nums_b32 [33 ] = "The scalar for this x is unknown" ;
107
- secp256k1_fe nums_x ;
108
- secp256k1_ge nums_ge ;
109
- int r ;
110
- r = secp256k1_fe_set_b32 (& nums_x , nums_b32 );
111
- (void )r ;
112
- VERIFY_CHECK (r );
113
- r = secp256k1_ge_set_xo_var (& nums_ge , & nums_x , 0 );
114
- (void )r ;
115
- VERIFY_CHECK (r );
116
- secp256k1_gej_set_ge (& nums_gej , & nums_ge );
117
- /* Add G to make the bits in x uniformly distributed. */
118
- secp256k1_gej_add_ge_var (& nums_gej , & nums_gej , & secp256k1_ge_const_g , NULL );
119
- }
120
-
121
- /* compute prec. */
122
- {
123
- secp256k1_gej precj [ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G ]; /* Jacobian versions of prec. */
124
- secp256k1_gej gbase ;
125
- secp256k1_gej numsbase ;
126
- gbase = gj ; /* PREC_G^j * G */
127
- numsbase = nums_gej ; /* 2^j * nums. */
128
- for (j = 0 ; j < ECMULT_GEN_PREC_N ; j ++ ) {
129
- /* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */
130
- precj [j * ECMULT_GEN_PREC_G ] = numsbase ;
131
- for (i = 1 ; i < ECMULT_GEN_PREC_G ; i ++ ) {
132
- secp256k1_gej_add_var (& precj [j * ECMULT_GEN_PREC_G + i ], & precj [j * ECMULT_GEN_PREC_G + i - 1 ], & gbase , NULL );
133
- }
134
- /* Multiply gbase by PREC_G. */
135
- for (i = 0 ; i < ECMULT_GEN_PREC_B ; i ++ ) {
136
- secp256k1_gej_double_var (& gbase , & gbase , NULL );
137
- }
138
- /* Multiply numbase by 2. */
139
- secp256k1_gej_double_var (& numsbase , & numsbase , NULL );
140
- if (j == ECMULT_GEN_PREC_N - 2 ) {
141
- /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
142
- secp256k1_gej_neg (& numsbase , & numsbase );
143
- secp256k1_gej_add_var (& numsbase , & numsbase , & nums_gej , NULL );
144
- }
145
- }
146
- secp256k1_ge_set_all_gej_var (prec , precj , ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G );
147
- }
148
- for (j = 0 ; j < ECMULT_GEN_PREC_N ; j ++ ) {
149
- for (i = 0 ; i < ECMULT_GEN_PREC_G ; i ++ ) {
150
- secp256k1_ge_to_storage (& (* ctx -> prec )[j ][i ], & prec [j * ECMULT_GEN_PREC_G + i ]);
151
- }
152
- }
153
- #endif
154
90
#else
155
91
(void )prealloc ;
156
- #if USE_COMB
157
92
ctx -> prec = (secp256k1_ge_storage (* )[COMB_BLOCKS ][COMB_POINTS ])secp256k1_ecmult_gen_ctx_prec ;
158
93
#if COMB_OFFSET
159
94
secp256k1_ge_from_storage (& ctx -> offset , & secp256k1_ecmult_gen_ctx_offset );
160
95
#endif
161
- #else
162
- ctx -> prec = (secp256k1_ge_storage (* )[ECMULT_GEN_PREC_N ][ECMULT_GEN_PREC_G ])secp256k1_ecmult_static_context ;
163
- #endif
164
96
#endif
165
97
secp256k1_ecmult_gen_blind (ctx , NULL );
166
98
}
@@ -171,28 +103,20 @@ static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_cont
171
103
172
104
static void secp256k1_ecmult_gen_context_finalize_memcpy (secp256k1_ecmult_gen_context * dst , const secp256k1_ecmult_gen_context * src ) {
173
105
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
174
- #ifdef USE_COMB
175
106
if (src -> prec != NULL ) {
176
107
/* We cast to void* first to suppress a -Wcast-align warning. */
177
108
dst -> prec = (secp256k1_ge_storage (* )[COMB_BLOCKS ][COMB_POINTS ])(void * )((unsigned char * )dst + ((unsigned char * )src -> prec - (unsigned char * )src ));
178
109
}
179
110
#if COMB_OFFSET
180
111
dst -> offset = src -> offset ;
181
112
#endif
182
- #else
183
- if (src -> prec != NULL ) {
184
- dst -> prec = (secp256k1_ge_storage (* )[ECMULT_GEN_PREC_N ][ECMULT_GEN_PREC_G ])(void * )((unsigned char * )dst + ((unsigned char * )src -> prec - (unsigned char * )src ));
185
- }
186
- #endif
187
113
#endif
188
114
(void )dst , (void )src ;
189
115
}
190
116
191
117
static void secp256k1_ecmult_gen_context_clear (secp256k1_ecmult_gen_context * ctx ) {
192
- #ifdef USE_COMB
193
118
#if COMB_OFFSET
194
119
secp256k1_ge_clear (& ctx -> offset );
195
- #endif
196
120
#endif
197
121
secp256k1_scalar_clear (& ctx -> blind );
198
122
secp256k1_gej_clear (& ctx -> initial );
@@ -205,8 +129,6 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
205
129
secp256k1_scalar gnb ;
206
130
uint32_t bits ;
207
131
208
- #ifdef USE_COMB
209
-
210
132
uint32_t abs , bit_pos , block , comb_off , index , sign ;
211
133
#if !COMB_GROUPED
212
134
uint32_t bit , tooth ;
@@ -245,6 +167,16 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
245
167
VERIFY_CHECK (abs < COMB_POINTS );
246
168
247
169
for (index = 0 ; index < COMB_POINTS ; ++ index ) {
170
+ /** This uses a conditional move to avoid any secret data in array indexes.
171
+ * _Any_ use of secret indexes has been demonstrated to result in timing
172
+ * sidechannels, even when the cache-line access patterns are uniform.
173
+ * See also:
174
+ * "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
175
+ * (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
176
+ * "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
177
+ * by Dag Arne Osvik, Adi Shamir, and Eran Tromer
178
+ * (http://www.tau.ac.il/~tromer/papers/cache.pdf)
179
+ */
248
180
secp256k1_ge_storage_cmov (& adds , & (* ctx -> prec )[block ][index ], index == abs );
249
181
}
250
182
@@ -266,33 +198,6 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
266
198
memset (recoded , 0 , sizeof (recoded ));
267
199
abs = 0 ;
268
200
sign = 0 ;
269
-
270
- #else
271
- int i , j ;
272
- memset (& adds , 0 , sizeof (adds ));
273
- * r = ctx -> initial ;
274
- /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
275
- secp256k1_scalar_add (& gnb , gn , & ctx -> blind );
276
- add .infinity = 0 ;
277
- for (j = 0 ; j < ECMULT_GEN_PREC_N ; j ++ ) {
278
- bits = secp256k1_scalar_get_bits (& gnb , j * ECMULT_GEN_PREC_B , ECMULT_GEN_PREC_B );
279
- for (i = 0 ; i < ECMULT_GEN_PREC_G ; i ++ ) {
280
- /** This uses a conditional move to avoid any secret data in array indexes.
281
- * _Any_ use of secret indexes has been demonstrated to result in timing
282
- * sidechannels, even when the cache-line access patterns are uniform.
283
- * See also:
284
- * "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
285
- * (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
286
- * "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
287
- * by Dag Arne Osvik, Adi Shamir, and Eran Tromer
288
- * (https://www.tau.ac.il/~tromer/papers/cache.pdf)
289
- */
290
- secp256k1_ge_storage_cmov (& adds , & (* ctx -> prec )[j ][i ], (uint32_t )i == bits );
291
- }
292
- secp256k1_ge_from_storage (& add , & adds );
293
- secp256k1_gej_add_ge (r , r , & add );
294
- }
295
- #endif
296
201
bits = 0 ;
297
202
secp256k1_ge_clear (& add );
298
203
memset (& adds , 0 , sizeof (adds ));
@@ -301,9 +206,7 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
301
206
302
207
/* Setup blinding values for secp256k1_ecmult_gen. */
303
208
static void secp256k1_ecmult_gen_blind (secp256k1_ecmult_gen_context * ctx , const unsigned char * seed32 ) {
304
- #ifdef USE_COMB
305
209
int spacing ;
306
- #endif
307
210
secp256k1_scalar b ;
308
211
secp256k1_gej gb ;
309
212
secp256k1_fe s ;
@@ -316,13 +219,11 @@ static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const
316
219
secp256k1_gej_set_ge (& ctx -> initial , & secp256k1_ge_const_g );
317
220
secp256k1_gej_neg (& ctx -> initial , & ctx -> initial );
318
221
secp256k1_scalar_set_int (& ctx -> blind , 1 );
319
- #ifdef USE_COMB
320
222
for (spacing = 1 ; spacing < COMB_SPACING ; ++ spacing ) {
321
223
secp256k1_scalar_add (& ctx -> blind , & ctx -> blind , & ctx -> blind );
322
224
}
323
225
#if COMB_OFFSET
324
226
secp256k1_gej_add_ge (& ctx -> initial , & ctx -> initial , & ctx -> offset );
325
- #endif
326
227
#endif
327
228
}
328
229
/* The prior blinding value (if not reset) is chained forward by including it in the hash. */
@@ -355,13 +256,11 @@ static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const
355
256
secp256k1_scalar_negate (& b , & b );
356
257
ctx -> blind = b ;
357
258
ctx -> initial = gb ;
358
- #ifdef USE_COMB
359
259
for (spacing = 1 ; spacing < COMB_SPACING ; ++ spacing ) {
360
260
secp256k1_scalar_add (& ctx -> blind , & ctx -> blind , & ctx -> blind );
361
261
}
362
262
#if COMB_OFFSET
363
263
secp256k1_gej_add_ge (& ctx -> initial , & ctx -> initial , & ctx -> offset );
364
- #endif
365
264
#endif
366
265
secp256k1_scalar_clear (& b );
367
266
secp256k1_gej_clear (& gb );
0 commit comments