@@ -493,7 +493,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
493
493
uint64_t p0 , p1 , p2 , p3 , p4 ;
494
494
495
495
/* Reduce 512 bits into 385. */
496
- /* m[0..6] = l[0..3] + n[0..3 ] * SECP256K1_N_C. */
496
+ /* m[0..6] = l[0..3] + l[4..7 ] * SECP256K1_N_C. */
497
497
c = (uint128_t )n4 * SECP256K1_N_C_0 ;
498
498
c += l [0 ];
499
499
m0 = (uint64_t )c ; c >>= 64 ;
@@ -504,29 +504,29 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
504
504
c += (uint64_t )u ; u >>= 64 ;
505
505
m1 = (uint64_t )c ; c >>= 64 ;
506
506
507
- c += n4 ;
507
+ c += n4 ; /* SECP256K1_N_C_2 == 1 */
508
508
u += (uint128_t )n5 * SECP256K1_N_C_1 ;
509
509
u += l [2 ];
510
510
v = (uint128_t )n6 * SECP256K1_N_C_0 ;
511
511
c += (uint64_t )u ; u >>= 64 ;
512
512
c += (uint64_t )v ; v >>= 64 ;
513
513
m2 = (uint64_t )c ; c >>= 64 ;
514
514
515
- c += n5 ;
515
+ c += n5 ; /* SECP256K1_N_C_2 == 1 */
516
516
u += (uint128_t )n6 * SECP256K1_N_C_1 ;
517
517
u += l [3 ];
518
518
v += (uint128_t )n7 * SECP256K1_N_C_0 ;
519
519
c += (uint64_t )u ; u >>= 64 ;
520
520
c += (uint64_t )v ; v >>= 64 ;
521
521
m3 = (uint64_t )c ; c >>= 64 ;
522
522
523
- c += n6 ;
523
+ c += n6 ; /* SECP256K1_N_C_2 == 1 */
524
524
u += (uint128_t )n7 * SECP256K1_N_C_1 ;
525
525
c += (uint64_t )u ; u >>= 64 ;
526
526
c += (uint64_t )v ;
527
527
m4 = (uint64_t )c ; c >>= 64 ;
528
528
529
- c += n7 ;
529
+ c += n7 ; /* SECP256K1_N_C_2 == 1 */
530
530
c += (uint64_t )u ;
531
531
m5 = (uint64_t )c ; c >>= 64 ;
532
532
@@ -546,25 +546,25 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
546
546
c += (uint64_t )u ; u >>= 64 ;
547
547
p1 = (uint64_t )c ; c >>= 64 ;
548
548
549
- c += m4 ;
549
+ c += m4 ; /* SECP256K1_N_C_2 == 1 */
550
550
u += (uint128_t )m5 * SECP256K1_N_C_1 ;
551
551
u += m2 ;
552
- c += ( m6 & SECP256K1_N_C_0 ) ;
552
+ c += m6 & SECP256K1_N_C_0 ;
553
553
c += (uint64_t )u ; u >>= 64 ;
554
554
p2 = (uint64_t )c ; c >>= 64 ;
555
555
556
- c += m5 ;
557
- c += ( m6 & SECP256K1_N_C_1 ) ;
556
+ c += m5 ; /* SECP256K1_N_C_2 == 1 */
557
+ c += m6 & SECP256K1_N_C_1 ;
558
558
c += m3 ;
559
559
c += (uint64_t )u ;
560
560
p3 = (uint64_t )c ; c >>= 64 ;
561
561
562
- p4 = (uint64_t )c - m6 ;;
562
+ p4 = (uint64_t )c - m6 ; /* SECP256K1_N_C_2 == 1 */
563
563
VERIFY_CHECK (p4 <= 3 );
564
564
565
565
/* Effectively add an extra SECP256K1_N_C during the next pass.
566
- * Values that would have landed in the range [SECP256K_N, 2^256)
567
- * will instead "wrap" and carry back to p4 */
566
+ * Values that would have landed in the range [SECP256K_N, 2^256) will
567
+ * instead "wrap" and carry back to p4 */
568
568
++ p4 ;
569
569
570
570
/* Reduce 258 bits into 256. */
@@ -575,19 +575,20 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
575
575
c += (uint128_t )SECP256K1_N_C_1 * p4 ;
576
576
c += p1 ;
577
577
p1 = (uint64_t )c ; c >>= 64 ;
578
- c += p4 ;
578
+ c += p4 ; /* SECP256K1_N_C_2 == 1 */
579
579
c += p2 ;
580
580
p2 = (uint64_t )c ; c >>= 64 ;
581
581
c += p3 ;
582
582
p3 = (uint64_t )c ; c >>= 64 ;
583
- VERIFY_CHECK ((uint64_t )c <= 1 );
584
583
p4 = (uint64_t )c ;
584
+ VERIFY_CHECK (p4 <= 1 );
585
585
586
586
/* Recover the extra SECP256K1_N_C from the previous pass.
587
587
* If p4 is 1, it becomes a 0 mask - the final pass is a no-op
588
588
* If p4 is 0, the decrement creates a UINT64_MAX mask that enables the
589
- * addition of SECP256K_N in the final pass, which must result
590
- * in a final carry, which balances the accounts. */
589
+ * addition of SECP256K_N in the final pass, which MUST result in a final
590
+ * carry (because the current value in p[0..3] is >= SECP256K1_N_C), which
591
+ * can then be dropped to balance the accounts. */
591
592
-- p4 ;
592
593
593
594
c = p4 & SECP256K1_N_0 ;
0 commit comments