10
10
11
11
.arch armv8-a+crc
12
12
13
- .macro byteorder, reg, be
14
- .if \be
15
- CPU_LE( rev \reg, \reg )
16
- .else
17
- CPU_BE( rev \reg, \reg )
18
- .endif
13
+ .macro bitle, reg
19
14
.endm
20
15
21
- .macro byteorder16, reg, be
22
- .if \be
23
- CPU_LE( rev16 \reg, \reg )
24
- .else
25
- CPU_BE( rev16 \reg, \reg )
26
- .endif
16
+ .macro bitbe, reg
17
+ rbit \reg, \reg
27
18
.endm
28
19
29
- .macro bitorder, reg, be
30
- .if \be
31
- rbit \reg, \reg
32
- .endif
20
+ .macro bytele, reg
33
21
.endm
34
22
35
- .macro bitorder16, reg, be
36
- .if \be
23
+ .macro bytebe, reg
37
24
rbit \reg, \reg
38
- lsr \reg, \reg, #16
39
- .endif
25
+ lsr \reg, \reg, #24
26
+ .endm
27
+
28
+ .macro hwordle, reg
29
+ CPU_BE( rev16 \reg, \reg )
40
30
.endm
41
31
42
- .macro bitorder8 , reg, be
43
- .if \be
32
+ .macro hwordbe , reg
33
+ CPU_LE( rev \reg, \reg )
44
34
rbit \reg, \reg
45
- lsr \reg, \reg, #24
46
- .endif
35
+ CPU_BE( lsr \reg, \reg, #16 )
36
+ .endm
37
+
38
+ .macro le, regs: vararg
39
+ .irp r, \regs
40
+ CPU_BE( rev \r, \r )
41
+ .endr
42
+ .endm
43
+
44
+ .macro be, regs: vararg
45
+ .irp r, \regs
46
+ CPU_LE( rev \r, \r )
47
+ .endr
48
+ .irp r, \regs
49
+ rbit \r, \r
50
+ .endr
47
51
.endm
48
52
49
- .macro __crc32, c, be = 0
50
- bitorder w0, \be
53
+ .macro __crc32, c, order = le
54
+ bit\order w0
51
55
cmp x2, #16
52
56
b.lt 8f // less than 16 bytes
53
57
@@ -60,14 +64,7 @@ CPU_BE( rev16 \reg, \reg )
60
64
add x8, x8, x1
61
65
add x1, x1, x7
62
66
ldp x5, x6, [x8]
63
- byteorder x3, \be
64
- byteorder x4, \be
65
- byteorder x5, \be
66
- byteorder x6, \be
67
- bitorder x3, \be
68
- bitorder x4, \be
69
- bitorder x5, \be
70
- bitorder x6, \be
67
+ \order x3, x4, x5, x6
71
68
72
69
tst x7, #8
73
70
crc32 \c\()x w8, w0, x3
@@ -95,42 +92,32 @@ CPU_BE( rev16 \reg, \reg )
95
92
32: ldp x3, x4, [x1], #32
96
93
sub x2, x2, #32
97
94
ldp x5, x6, [x1, #-16]
98
- byteorder x3, \be
99
- byteorder x4, \be
100
- byteorder x5, \be
101
- byteorder x6, \be
102
- bitorder x3, \be
103
- bitorder x4, \be
104
- bitorder x5, \be
105
- bitorder x6, \be
95
+ \order x3, x4, x5, x6
106
96
crc32 \c\()x w0, w0, x3
107
97
crc32 \c\()x w0, w0, x4
108
98
crc32 \c\()x w0, w0, x5
109
99
crc32 \c\()x w0, w0, x6
110
100
cbnz x2, 32b
111
- 0: bitorder w0, \be
101
+ 0: bit\order w0
112
102
ret
113
103
114
104
8: tbz x2, #3, 4f
115
105
ldr x3, [x1], #8
116
- byteorder x3, \be
117
- bitorder x3, \be
106
+ \order x3
118
107
crc32 \c\()x w0, w0, x3
119
108
4: tbz x2, #2, 2f
120
109
ldr w3, [x1], #4
121
- byteorder w3, \be
122
- bitorder w3, \be
110
+ \order w3
123
111
crc32 \c\()w w0, w0, w3
124
112
2: tbz x2, #1, 1f
125
113
ldrh w3, [x1], #2
126
- byteorder16 w3, \be
127
- bitorder16 w3, \be
114
+ hword\order w3
128
115
crc32 \c\()h w0, w0, w3
129
116
1: tbz x2, #0, 0f
130
117
ldrb w3, [x1]
131
- bitorder8 w3, \be
118
+ byte\order w3
132
119
crc32 \c\()b w0, w0, w3
133
- 0: bitorder w0, \be
120
+ 0: bit\order w0
134
121
ret
135
122
.endm
136
123
@@ -146,5 +133,5 @@ SYM_FUNC_END(crc32c_le_arm64)
146
133
147
134
.align 5
148
135
SYM_FUNC_START(crc32_be_arm64)
149
- __crc32 be = 1
136
+ __crc32 order = be
150
137
SYM_FUNC_END(crc32_be_arm64)
0 commit comments