Skip to content

Commit 9ac589c

Browse files
ardbiesheuvelherbertx
authored andcommitted
crypto: x86/crc32 - Use local .L symbols for code
Avoid cluttering up the kallsyms symbol table with entries that should not end up in things like backtraces, as they have undescriptive and generated identifiers. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 1d4b0ff commit 9ac589c

File tree

2 files changed

+41
-42
lines changed

2 files changed

+41
-42
lines changed

arch/x86/crypto/crc32-pclmul_asm.S

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -90,15 +90,15 @@ SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligne
9090
sub $0x40, LEN
9191
add $0x40, BUF
9292
cmp $0x40, LEN
93-
jb less_64
93+
jb .Lless_64
9494

9595
#ifdef __x86_64__
9696
movdqa .Lconstant_R2R1(%rip), CONSTANT
9797
#else
9898
movdqa .Lconstant_R2R1, CONSTANT
9999
#endif
100100

101-
loop_64:/* 64 bytes Full cache line folding */
101+
.Lloop_64:/* 64 bytes Full cache line folding */
102102
prefetchnta 0x40(BUF)
103103
movdqa %xmm1, %xmm5
104104
movdqa %xmm2, %xmm6
@@ -139,8 +139,8 @@ loop_64:/* 64 bytes Full cache line folding */
139139
sub $0x40, LEN
140140
add $0x40, BUF
141141
cmp $0x40, LEN
142-
jge loop_64
143-
less_64:/* Folding cache line into 128bit */
142+
jge .Lloop_64
143+
.Lless_64:/* Folding cache line into 128bit */
144144
#ifdef __x86_64__
145145
movdqa .Lconstant_R4R3(%rip), CONSTANT
146146
#else
@@ -167,8 +167,8 @@ less_64:/* Folding cache line into 128bit */
167167
pxor %xmm4, %xmm1
168168

169169
cmp $0x10, LEN
170-
jb fold_64
171-
loop_16:/* Folding rest buffer into 128bit */
170+
jb .Lfold_64
171+
.Lloop_16:/* Folding rest buffer into 128bit */
172172
movdqa %xmm1, %xmm5
173173
pclmulqdq $0x00, CONSTANT, %xmm1
174174
pclmulqdq $0x11, CONSTANT, %xmm5
@@ -177,9 +177,9 @@ loop_16:/* Folding rest buffer into 128bit */
177177
sub $0x10, LEN
178178
add $0x10, BUF
179179
cmp $0x10, LEN
180-
jge loop_16
180+
jge .Lloop_16
181181

182-
fold_64:
182+
.Lfold_64:
183183
/* perform the last 64 bit fold, also adds 32 zeroes
184184
* to the input stream */
185185
pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */

arch/x86/crypto/crc32c-pcl-intel-asm_64.S

Lines changed: 33 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -49,15 +49,15 @@
4949
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
5050

5151
.macro LABEL prefix n
52-
\prefix\n\():
52+
.L\prefix\n\():
5353
.endm
5454

5555
.macro JMPTBL_ENTRY i
56-
.quad crc_\i
56+
.quad .Lcrc_\i
5757
.endm
5858

5959
.macro JNC_LESS_THAN j
60-
jnc less_than_\j
60+
jnc .Lless_than_\j
6161
.endm
6262

6363
# Define threshold where buffers are considered "small" and routed to more
@@ -108,30 +108,30 @@ SYM_FUNC_START(crc_pcl)
108108
neg %bufp
109109
and $7, %bufp # calculate the unalignment amount of
110110
# the address
111-
je proc_block # Skip if aligned
111+
je .Lproc_block # Skip if aligned
112112

113113
## If len is less than 8 and we're unaligned, we need to jump
114114
## to special code to avoid reading beyond the end of the buffer
115115
cmp $8, len
116-
jae do_align
116+
jae .Ldo_align
117117
# less_than_8 expects length in upper 3 bits of len_dw
118118
# less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
119119
shl $32-3+1, len_dw
120-
jmp less_than_8_post_shl1
120+
jmp .Lless_than_8_post_shl1
121121

122-
do_align:
122+
.Ldo_align:
123123
#### Calculate CRC of unaligned bytes of the buffer (if any)
124124
movq (bufptmp), tmp # load a quadward from the buffer
125125
add %bufp, bufptmp # align buffer pointer for quadword
126126
# processing
127127
sub %bufp, len # update buffer length
128-
align_loop:
128+
.Lalign_loop:
129129
crc32b %bl, crc_init_dw # compute crc32 of 1-byte
130130
shr $8, tmp # get next byte
131131
dec %bufp
132-
jne align_loop
132+
jne .Lalign_loop
133133

134-
proc_block:
134+
.Lproc_block:
135135

136136
################################################################
137137
## 2) PROCESS BLOCKS:
@@ -141,11 +141,11 @@ proc_block:
141141
movq len, tmp # save num bytes in tmp
142142

143143
cmpq $128*24, len
144-
jae full_block
144+
jae .Lfull_block
145145

146-
continue_block:
146+
.Lcontinue_block:
147147
cmpq $SMALL_SIZE, len
148-
jb small
148+
jb .Lsmall
149149

150150
## len < 128*24
151151
movq $2731, %rax # 2731 = ceil(2^16 / 24)
@@ -175,7 +175,7 @@ continue_block:
175175
################################################################
176176
## 2a) PROCESS FULL BLOCKS:
177177
################################################################
178-
full_block:
178+
.Lfull_block:
179179
movl $128,%eax
180180
lea 128*8*2(block_0), block_1
181181
lea 128*8*3(block_0), block_2
@@ -190,7 +190,6 @@ full_block:
190190
## 3) CRC Array:
191191
################################################################
192192

193-
crc_array:
194193
i=128
195194
.rept 128-1
196195
.altmacro
@@ -243,28 +242,28 @@ LABEL crc_ 0
243242
ENDBR
244243
mov tmp, len
245244
cmp $128*24, tmp
246-
jae full_block
245+
jae .Lfull_block
247246
cmp $24, tmp
248-
jae continue_block
247+
jae .Lcontinue_block
249248

250-
less_than_24:
249+
.Lless_than_24:
251250
shl $32-4, len_dw # less_than_16 expects length
252251
# in upper 4 bits of len_dw
253-
jnc less_than_16
252+
jnc .Lless_than_16
254253
crc32q (bufptmp), crc_init
255254
crc32q 8(bufptmp), crc_init
256-
jz do_return
255+
jz .Ldo_return
257256
add $16, bufptmp
258257
# len is less than 8 if we got here
259258
# less_than_8 expects length in upper 3 bits of len_dw
260259
# less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
261260
shl $2, len_dw
262-
jmp less_than_8_post_shl1
261+
jmp .Lless_than_8_post_shl1
263262

264263
#######################################################################
265264
## 6) LESS THAN 256-bytes REMAIN AT THIS POINT (8-bits of len are full)
266265
#######################################################################
267-
small:
266+
.Lsmall:
268267
shl $32-8, len_dw # Prepare len_dw for less_than_256
269268
j=256
270269
.rept 5 # j = {256, 128, 64, 32, 16}
@@ -280,32 +279,32 @@ LABEL less_than_ %j # less_than_j: Length should be in
280279
crc32q i(bufptmp), crc_init # Compute crc32 of 8-byte data
281280
i=i+8
282281
.endr
283-
jz do_return # Return if remaining length is zero
282+
jz .Ldo_return # Return if remaining length is zero
284283
add $j, bufptmp # Advance buf
285284
.endr
286285

287-
less_than_8: # Length should be stored in
286+
.Lless_than_8: # Length should be stored in
288287
# upper 3 bits of len_dw
289288
shl $1, len_dw
290-
less_than_8_post_shl1:
291-
jnc less_than_4
289+
.Lless_than_8_post_shl1:
290+
jnc .Lless_than_4
292291
crc32l (bufptmp), crc_init_dw # CRC of 4 bytes
293-
jz do_return # return if remaining data is zero
292+
jz .Ldo_return # return if remaining data is zero
294293
add $4, bufptmp
295-
less_than_4: # Length should be stored in
294+
.Lless_than_4: # Length should be stored in
296295
# upper 2 bits of len_dw
297296
shl $1, len_dw
298-
jnc less_than_2
297+
jnc .Lless_than_2
299298
crc32w (bufptmp), crc_init_dw # CRC of 2 bytes
300-
jz do_return # return if remaining data is zero
299+
jz .Ldo_return # return if remaining data is zero
301300
add $2, bufptmp
302-
less_than_2: # Length should be stored in the MSB
301+
.Lless_than_2: # Length should be stored in the MSB
303302
# of len_dw
304303
shl $1, len_dw
305-
jnc less_than_1
304+
jnc .Lless_than_1
306305
crc32b (bufptmp), crc_init_dw # CRC of 1 byte
307-
less_than_1: # Length should be zero
308-
do_return:
306+
.Lless_than_1: # Length should be zero
307+
.Ldo_return:
309308
movq crc_init, %rax
310309
popq %rsi
311310
popq %rdi

0 commit comments

Comments
 (0)