@@ -492,7 +492,7 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
492
492
}
493
493
494
494
static void secp256k1_gej_add_ge_var (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b , secp256k1_fe * rzr ) {
495
- /* 8 mul, 3 sqr, 13 add/negate/normalize_weak /normalizes_to_zero (ignoring special cases) */
495
+ /* Operations: 8 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
496
496
secp256k1_fe z12 , u1 , u2 , s1 , s2 , h , i , h2 , h3 , t ;
497
497
secp256k1_gej_verify (a );
498
498
secp256k1_ge_verify (b );
@@ -510,11 +510,11 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
510
510
}
511
511
512
512
secp256k1_fe_sqr (& z12 , & a -> z );
513
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 );
513
+ u1 = a -> x ;
514
514
secp256k1_fe_mul (& u2 , & b -> x , & z12 );
515
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 );
515
+ s1 = a -> y ;
516
516
secp256k1_fe_mul (& s2 , & b -> y , & z12 ); secp256k1_fe_mul (& s2 , & s2 , & a -> z );
517
- secp256k1_fe_negate (& h , & u1 , 1 ); secp256k1_fe_add (& h , & u2 );
517
+ secp256k1_fe_negate (& h , & u1 , 6 ); secp256k1_fe_add (& h , & u2 );
518
518
secp256k1_fe_negate (& i , & s2 , 1 ); secp256k1_fe_add (& i , & s1 );
519
519
if (secp256k1_fe_normalizes_to_zero_var (& h )) {
520
520
if (secp256k1_fe_normalizes_to_zero_var (& i )) {
@@ -553,7 +553,7 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
553
553
}
554
554
555
555
static void secp256k1_gej_add_zinv_var (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b , const secp256k1_fe * bzinv ) {
556
- /* 9 mul, 3 sqr, 13 add/negate/normalize_weak /normalizes_to_zero (ignoring special cases) */
556
+ /* Operations: 9 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
557
557
secp256k1_fe az , z12 , u1 , u2 , s1 , s2 , h , i , h2 , h3 , t ;
558
558
559
559
secp256k1_gej_verify (a );
@@ -586,11 +586,11 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
586
586
secp256k1_fe_mul (& az , & a -> z , bzinv );
587
587
588
588
secp256k1_fe_sqr (& z12 , & az );
589
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 );
589
+ u1 = a -> x ;
590
590
secp256k1_fe_mul (& u2 , & b -> x , & z12 );
591
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 );
591
+ s1 = a -> y ;
592
592
secp256k1_fe_mul (& s2 , & b -> y , & z12 ); secp256k1_fe_mul (& s2 , & s2 , & az );
593
- secp256k1_fe_negate (& h , & u1 , 1 ); secp256k1_fe_add (& h , & u2 );
593
+ secp256k1_fe_negate (& h , & u1 , 6 ); secp256k1_fe_add (& h , & u2 );
594
594
secp256k1_fe_negate (& i , & s2 , 1 ); secp256k1_fe_add (& i , & s1 );
595
595
if (secp256k1_fe_normalizes_to_zero_var (& h )) {
596
596
if (secp256k1_fe_normalizes_to_zero_var (& i )) {
@@ -623,7 +623,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
623
623
624
624
625
625
static void secp256k1_gej_add_ge (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b ) {
626
- /* Operations: 7 mul, 5 sqr, 24 add/cmov/half/mul_int/negate/normalize_weak /normalizes_to_zero */
626
+ /* Operations: 7 mul, 5 sqr, 21 add/cmov/half/mul_int/negate/normalizes_to_zero */
627
627
secp256k1_fe zz , u1 , u2 , s1 , s2 , t , tt , m , n , q , rr ;
628
628
secp256k1_fe m_alt , rr_alt ;
629
629
int degenerate ;
@@ -683,17 +683,17 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
683
683
*/
684
684
685
685
secp256k1_fe_sqr (& zz , & a -> z ); /* z = Z1^2 */
686
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 ); /* u1 = U1 = X1*Z2^2 (1 ) */
686
+ u1 = a -> x ; /* u1 = U1 = X1*Z2^2 (6 ) */
687
687
secp256k1_fe_mul (& u2 , & b -> x , & zz ); /* u2 = U2 = X2*Z1^2 (1) */
688
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 ); /* s1 = S1 = Y1*Z2^3 (1 ) */
688
+ s1 = a -> y ; /* s1 = S1 = Y1*Z2^3 (4 ) */
689
689
secp256k1_fe_mul (& s2 , & b -> y , & zz ); /* s2 = Y2*Z1^2 (1) */
690
690
secp256k1_fe_mul (& s2 , & s2 , & a -> z ); /* s2 = S2 = Y2*Z1^3 (1) */
691
- t = u1 ; secp256k1_fe_add (& t , & u2 ); /* t = T = U1+U2 (2 ) */
692
- m = s1 ; secp256k1_fe_add (& m , & s2 ); /* m = M = S1+S2 (2 ) */
691
+ t = u1 ; secp256k1_fe_add (& t , & u2 ); /* t = T = U1+U2 (7 ) */
692
+ m = s1 ; secp256k1_fe_add (& m , & s2 ); /* m = M = S1+S2 (5 ) */
693
693
secp256k1_fe_sqr (& rr , & t ); /* rr = T^2 (1) */
694
- secp256k1_fe_negate (& m_alt , & u2 , 1 ); /* Malt = -X2*Z1^2 */
695
- secp256k1_fe_mul (& tt , & u1 , & m_alt ); /* tt = -U1*U2 (2 ) */
696
- secp256k1_fe_add (& rr , & tt ); /* rr = R = T^2-U1*U2 (3 ) */
694
+ secp256k1_fe_negate (& m_alt , & u2 , 1 ); /* Malt = -X2*Z1^2 (2) */
695
+ secp256k1_fe_mul (& tt , & u1 , & m_alt ); /* tt = -U1*U2 (1 ) */
696
+ secp256k1_fe_add (& rr , & tt ); /* rr = R = T^2-U1*U2 (2 ) */
697
697
/* If lambda = R/M = R/0 we have a problem (except in the "trivial"
698
698
* case that Z = z1z2 = 0, and this is special-cased later on). */
699
699
degenerate = secp256k1_fe_normalizes_to_zero (& m );
@@ -703,34 +703,34 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
703
703
* non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
704
704
* so we set R/M equal to this. */
705
705
rr_alt = s1 ;
706
- secp256k1_fe_mul_int (& rr_alt , 2 ); /* rr = Y1*Z2^3 - Y2*Z1^3 (2 ) */
707
- secp256k1_fe_add (& m_alt , & u1 ); /* Malt = X1*Z2^2 - X2*Z1^2 */
706
+ secp256k1_fe_mul_int (& rr_alt , 2 ); /* rr_alt = Y1*Z2^3 - Y2*Z1^3 (8 ) */
707
+ secp256k1_fe_add (& m_alt , & u1 ); /* Malt = X1*Z2^2 - X2*Z1^2 (8) */
708
708
709
- secp256k1_fe_cmov (& rr_alt , & rr , !degenerate );
710
- secp256k1_fe_cmov (& m_alt , & m , !degenerate );
709
+ secp256k1_fe_cmov (& rr_alt , & rr , !degenerate ); /* rr_alt (8) */
710
+ secp256k1_fe_cmov (& m_alt , & m , !degenerate ); /* m_alt (5) */
711
711
/* Now Ralt / Malt = lambda and is guaranteed not to be Ralt / 0.
712
712
* From here on out Ralt and Malt represent the numerator
713
713
* and denominator of lambda; R and M represent the explicit
714
714
* expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
715
715
secp256k1_fe_sqr (& n , & m_alt ); /* n = Malt^2 (1) */
716
- secp256k1_fe_negate (& q , & t , 2 ); /* q = -T (3 ) */
716
+ secp256k1_fe_negate (& q , & t , 7 ); /* q = -T (8 ) */
717
717
secp256k1_fe_mul (& q , & q , & n ); /* q = Q = -T*Malt^2 (1) */
718
718
/* These two lines use the observation that either M == Malt or M == 0,
719
719
* so M^3 * Malt is either Malt^4 (which is computed by squaring), or
720
720
* zero (which is "computed" by cmov). So the cost is one squaring
721
721
* versus two multiplications. */
722
- secp256k1_fe_sqr (& n , & n );
723
- secp256k1_fe_cmov (& n , & m , degenerate ); /* n = M^3 * Malt (2 ) */
722
+ secp256k1_fe_sqr (& n , & n ); /* n = Malt^4 (1) */
723
+ secp256k1_fe_cmov (& n , & m , degenerate ); /* n = M^3 * Malt (5 ) */
724
724
secp256k1_fe_sqr (& t , & rr_alt ); /* t = Ralt^2 (1) */
725
725
secp256k1_fe_mul (& r -> z , & a -> z , & m_alt ); /* r->z = Z3 = Malt*Z (1) */
726
726
secp256k1_fe_add (& t , & q ); /* t = Ralt^2 + Q (2) */
727
727
r -> x = t ; /* r->x = X3 = Ralt^2 + Q (2) */
728
728
secp256k1_fe_mul_int (& t , 2 ); /* t = 2*X3 (4) */
729
729
secp256k1_fe_add (& t , & q ); /* t = 2*X3 + Q (5) */
730
730
secp256k1_fe_mul (& t , & t , & rr_alt ); /* t = Ralt*(2*X3 + Q) (1) */
731
- secp256k1_fe_add (& t , & n ); /* t = Ralt*(2*X3 + Q) + M^3*Malt (3 ) */
732
- secp256k1_fe_negate (& r -> y , & t , 3 ); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (4 ) */
733
- secp256k1_fe_half (& r -> y ); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 (3 ) */
731
+ secp256k1_fe_add (& t , & n ); /* t = Ralt*(2*X3 + Q) + M^3*Malt (6 ) */
732
+ secp256k1_fe_negate (& r -> y , & t , 6 ); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (7 ) */
733
+ secp256k1_fe_half (& r -> y ); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 (4 ) */
734
734
735
735
/* In case a->infinity == 1, replace r with (b->x, b->y, 1). */
736
736
secp256k1_fe_cmov (& r -> x , & b -> x , a -> infinity );
0 commit comments