@@ -23,23 +23,26 @@ static SECP256K1_INLINE size_t secp256k1_heap_child2(size_t i) {
23
23
return secp256k1_heap_child1 (i )+ 1 ;
24
24
}
25
25
26
- static SECP256K1_INLINE void secp256k1_heap_swap64 (unsigned char * a , size_t i , size_t j , size_t stride ) {
26
+ static SECP256K1_INLINE void secp256k1_heap_swap64 (unsigned char * a , unsigned char * b , size_t len ) {
27
27
unsigned char tmp [64 ];
28
- VERIFY_CHECK (stride <= 64 );
29
- memcpy (tmp , a + i * stride , stride );
30
- memmove (a + i * stride , a + j * stride , stride );
31
- memcpy (a + j * stride , tmp , stride );
28
+ VERIFY_CHECK (len <= 64 );
29
+ memcpy (tmp , a , len );
30
+ memmove (a , b , len );
31
+ memcpy (b , tmp , len );
32
32
}
33
33
34
- static SECP256K1_INLINE void secp256k1_heap_swap (unsigned char * a , size_t i , size_t j , size_t stride ) {
35
- while (64 < stride ) {
36
- secp256k1_heap_swap64 (a + (stride - 64 ), i , j , 64 );
37
- stride -= 64 ;
34
+ static SECP256K1_INLINE void secp256k1_heap_swap (unsigned char * arr , size_t i , size_t j , size_t stride ) {
35
+ unsigned char * a = arr + i * stride ;
36
+ unsigned char * b = arr + j * stride ;
37
+ size_t len = stride ;
38
+ while (64 < len ) {
39
+ secp256k1_heap_swap64 (a + (len - 64 ), b + (len - 64 ), 64 );
40
+ len -= 64 ;
38
41
}
39
- secp256k1_heap_swap64 (a , i , j , stride );
42
+ secp256k1_heap_swap64 (a , b , len );
40
43
}
41
44
42
- static SECP256K1_INLINE void secp256k1_heap_down (unsigned char * a , size_t i , size_t heap_size , size_t stride ,
45
+ static SECP256K1_INLINE void secp256k1_heap_down (unsigned char * arr , size_t i , size_t heap_size , size_t stride ,
43
46
int (* cmp )(const void * , const void * , void * ), void * cmp_data ) {
44
47
while (i < heap_size /2 ) {
45
48
VERIFY_CHECK (i <= SIZE_MAX /2 - 1 );
@@ -69,18 +72,18 @@ static SECP256K1_INLINE void secp256k1_heap_down(unsigned char *a, size_t i, siz
69
72
* Else if [child2(i)] > [i], swap [i] with [child2(i)].
70
73
*/
71
74
if (secp256k1_heap_child2 (i ) < heap_size
72
- && 0 <= cmp (a + secp256k1_heap_child2 (i )* stride , a + secp256k1_heap_child1 (i )* stride , cmp_data )) {
73
- if (0 < cmp (a + secp256k1_heap_child2 (i )* stride , a + i * stride , cmp_data )) {
74
- secp256k1_heap_swap (a , i , secp256k1_heap_child2 (i ), stride );
75
+ && 0 <= cmp (arr + secp256k1_heap_child2 (i )* stride , arr + secp256k1_heap_child1 (i )* stride , cmp_data )) {
76
+ if (0 < cmp (arr + secp256k1_heap_child2 (i )* stride , arr + i * stride , cmp_data )) {
77
+ secp256k1_heap_swap (arr , i , secp256k1_heap_child2 (i ), stride );
75
78
i = secp256k1_heap_child2 (i );
76
79
} else {
77
80
/* At this point we have [child2(i)] >= [child1(i)] and we have
78
81
* [child2(i)] <= [i], and thus [child1(i)] <= [i] which means
79
82
* that the next comparison can be skipped. */
80
83
return ;
81
84
}
82
- } else if (0 < cmp (a + secp256k1_heap_child1 (i )* stride , a + i * stride , cmp_data )) {
83
- secp256k1_heap_swap (a , i , secp256k1_heap_child1 (i ), stride );
85
+ } else if (0 < cmp (arr + secp256k1_heap_child1 (i )* stride , arr + i * stride , cmp_data )) {
86
+ secp256k1_heap_swap (arr , i , secp256k1_heap_child1 (i ), stride );
84
87
i = secp256k1_heap_child1 (i );
85
88
} else {
86
89
return ;
0 commit comments