|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | +/* |
| 3 | + * Checksum routines |
| 4 | + * |
| 5 | + * Copyright (C) 2023 Rivos Inc. |
| 6 | + */ |
| 7 | +#ifndef __ASM_RISCV_CHECKSUM_H |
| 8 | +#define __ASM_RISCV_CHECKSUM_H |
| 9 | + |
| 10 | +#include <linux/in6.h> |
| 11 | +#include <linux/uaccess.h> |
| 12 | + |
| 13 | +#define ip_fast_csum ip_fast_csum |
| 14 | + |
| 15 | +/* Define riscv versions of functions before importing asm-generic/checksum.h */ |
| 16 | +#include <asm-generic/checksum.h> |
| 17 | + |
| 18 | +/** |
| 19 | + * Quickly compute an IP checksum with the assumption that IPv4 headers will |
| 20 | + * always be in multiples of 32-bits, and have an ihl of at least 5. |
| 21 | + * |
| 22 | + * @ihl: the number of 32 bit segments and must be greater than or equal to 5. |
| 23 | + * @iph: assumed to be word aligned given that NET_IP_ALIGN is set to 2 on |
| 24 | + * riscv, defining IP headers to be aligned. |
| 25 | + */ |
| 26 | +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
| 27 | +{ |
| 28 | + unsigned long csum = 0; |
| 29 | + int pos = 0; |
| 30 | + |
| 31 | + do { |
| 32 | + csum += ((const unsigned int *)iph)[pos]; |
| 33 | + if (IS_ENABLED(CONFIG_32BIT)) |
| 34 | + csum += csum < ((const unsigned int *)iph)[pos]; |
| 35 | + } while (++pos < ihl); |
| 36 | + |
| 37 | + /* |
| 38 | + * ZBB only saves three instructions on 32-bit and five on 64-bit so not |
| 39 | + * worth checking if supported without Alternatives. |
| 40 | + */ |
| 41 | + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && |
| 42 | + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { |
| 43 | + unsigned long fold_temp; |
| 44 | + |
| 45 | + asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, |
| 46 | + RISCV_ISA_EXT_ZBB, 1) |
| 47 | + : |
| 48 | + : |
| 49 | + : |
| 50 | + : no_zbb); |
| 51 | + |
| 52 | + if (IS_ENABLED(CONFIG_32BIT)) { |
| 53 | + asm(".option push \n\ |
| 54 | + .option arch,+zbb \n\ |
| 55 | + not %[fold_temp], %[csum] \n\ |
| 56 | + rori %[csum], %[csum], 16 \n\ |
| 57 | + sub %[csum], %[fold_temp], %[csum] \n\ |
| 58 | + .option pop" |
| 59 | + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); |
| 60 | + } else { |
| 61 | + asm(".option push \n\ |
| 62 | + .option arch,+zbb \n\ |
| 63 | + rori %[fold_temp], %[csum], 32 \n\ |
| 64 | + add %[csum], %[fold_temp], %[csum] \n\ |
| 65 | + srli %[csum], %[csum], 32 \n\ |
| 66 | + not %[fold_temp], %[csum] \n\ |
| 67 | + roriw %[csum], %[csum], 16 \n\ |
| 68 | + subw %[csum], %[fold_temp], %[csum] \n\ |
| 69 | + .option pop" |
| 70 | + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); |
| 71 | + } |
| 72 | + return (__force __sum16)(csum >> 16); |
| 73 | + } |
| 74 | +no_zbb: |
| 75 | +#ifndef CONFIG_32BIT |
| 76 | + csum += ror64(csum, 32); |
| 77 | + csum >>= 32; |
| 78 | +#endif |
| 79 | + return csum_fold((__force __wsum)csum); |
| 80 | +} |
| 81 | + |
| 82 | +#endif /* __ASM_RISCV_CHECKSUM_H */ |
0 commit comments