> -----Original Message----- > From: Charlie Jenkins <charlie@xxxxxxxxxxxx> > Sent: Saturday, October 28, 2023 6:44 AM > To: Charlie Jenkins <charlie@xxxxxxxxxxxx>; Palmer Dabbelt > <palmer@xxxxxxxxxxx>; Conor Dooley <conor@xxxxxxxxxx>; Samuel Holland > <samuel.holland@xxxxxxxxxx>; David Laight <David.Laight@xxxxxxxxxx>; > Wang, Xiao W <xiao.w.wang@xxxxxxxxx>; Evan Green <evan@xxxxxxxxxxxx>; > linux-riscv@xxxxxxxxxxxxxxxxxxx; linux-kernel@xxxxxxxxxxxxxxx; linux- > arch@xxxxxxxxxxxxxxx > Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx>; Albert Ou > <aou@xxxxxxxxxxxxxxxxx>; Arnd Bergmann <arnd@xxxxxxxx>; Conor Dooley > <conor.dooley@xxxxxxxxxxxxx> > Subject: [PATCH v8 4/5] riscv: Add checksum library > > Provide a 32 and 64 bit version of do_csum. When compiled for 32-bit > will load from the buffer in groups of 32 bits, and when compiled for > 64-bit will load in groups of 64 bits. > > Signed-off-by: Charlie Jenkins <charlie@xxxxxxxxxxxx> > Acked-by: Conor Dooley <conor.dooley@xxxxxxxxxxxxx> > --- > arch/riscv/lib/Makefile | 1 + > arch/riscv/lib/csum.c | 339 > ++++++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 340 insertions(+) > > diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile > index 26cb2502ecf8..2aa1a4ad361f 100644 > --- a/arch/riscv/lib/Makefile > +++ b/arch/riscv/lib/Makefile > @@ -6,6 +6,7 @@ lib-y += memmove.o > lib-y += strcmp.o > lib-y += strlen.o > lib-y += strncmp.o > +lib-y += csum.o > lib-$(CONFIG_MMU) += uaccess.o > lib-$(CONFIG_64BIT) += tishift.o > lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o > diff --git a/arch/riscv/lib/csum.c b/arch/riscv/lib/csum.c > new file mode 100644 > index 000000000000..f90e73606597 > --- /dev/null > +++ b/arch/riscv/lib/csum.c > @@ -0,0 +1,339 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * IP checksum library Same comment as patch 3/5. > + * > + * Influenced by arch/arm64/lib/csum.c > + * Copyright (C) 2023 Rivos Inc. > + */ > +#include <linux/bitops.h> > +#include <linux/compiler.h> > +#include <asm/cpufeature.h> > +#include <linux/jump_label.h> > +#include <linux/kasan-checks.h> > +#include <linux/kernel.h> > + > +#include <net/checksum.h> > + > +/* Default version is sufficient for 32 bit */ > +#ifndef CONFIG_32BIT Why not use the same #if macro "#ifdef CONFIG_64BIT" as in checksum.h > +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, > + const struct in6_addr *daddr, > + __u32 len, __u8 proto, __wsum csum) > +{ > + unsigned int ulen, uproto; > + unsigned long sum = csum; > + > + sum += saddr->s6_addr32[0]; > + sum += saddr->s6_addr32[1]; > + sum += saddr->s6_addr32[2]; > + sum += saddr->s6_addr32[3]; > + > + sum += daddr->s6_addr32[0]; > + sum += daddr->s6_addr32[1]; > + sum += daddr->s6_addr32[2]; > + sum += daddr->s6_addr32[3]; > + > + ulen = htonl((unsigned int)len); > + sum += ulen; > + > + uproto = htonl(proto); > + sum += uproto; > + > + /* > + * Zbb support saves 4 instructions, so not worth checking without > + * alternatives if supported > + */ > + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && > + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { > + unsigned long fold_temp; > + > + /* > + * Zbb is likely available when the kernel is compiled with Zbb > + * support, so nop when Zbb is available and jump when Zbb > is > + * not available. > + */ > + asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, > + RISCV_ISA_EXT_ZBB, 1) > + : > + : > + : > + : no_zbb); > + asm(".option push \n\ > + .option arch,+zbb \n\ > + rori %[fold_temp], %[sum], 32 \n\ > + add %[sum], %[fold_temp], %[sum] > \n\ > + srli %[sum], %[sum], 32 \n\ > + not %[fold_temp], %[sum] \n\ > + roriw %[sum], %[sum], 16 \n\ > + subw %[sum], %[fold_temp], %[sum] > \n\ > + .option pop" > + : [sum] "+r" (sum), [fold_temp] "=&r" (fold_temp)); > + return (__force __sum16)(sum >> 16); > + } > +no_zbb: > + sum += ror64(sum, 32); > + sum >>= 32; > + return csum_fold((__force __wsum)sum); > +} > +EXPORT_SYMBOL(csum_ipv6_magic); > +#endif /* !CONFIG_32BIT */ > + > +#ifdef CONFIG_32BIT > +#define OFFSET_MASK 3 > +#elif CONFIG_64BIT > +#define OFFSET_MASK 7 > +#endif > + > +/* > + * Algorithm accounts for buff being misaligned. > + * If buff is not aligned, will over-read bytes but not use the bytes that it > + * shouldn't. The same thing will occur on the tail-end of the read. > + */ > +static inline __no_sanitize_address unsigned int > do_csum_with_alignment(const unsigned char *buff, int len) > +{ > + unsigned int offset, shift; > + unsigned long csum = 0, carry = 0, data; > + const unsigned long *ptr, *end; > + > + end = (const unsigned long *)(buff + len); > + > + /* > + * Align address to closest word (double word on rv64) that comes > before > + * buff. This should always be in the same page and cache line. > + * Directly call KASAN with the alignment we will be using. > + */ > + offset = (unsigned long)buff & OFFSET_MASK; > + kasan_check_read(buff, len); > + ptr = (const unsigned long *)(buff - offset); > + > + /* > + * Clear the most significant bytes that were over-read if buff was not > + * aligned. > + */ > + shift = offset * 8; > + data = *(ptr++); > +#ifdef __LITTLE_ENDIAN > + data = (data >> shift) << shift; > +#else > + data = (data << shift) >> shift; > +#endif > + /* > + * Do 32-bit reads on RV32 and 64-bit reads otherwise. This should be > + * faster than doing 32-bit reads on architectures that support larger > + * reads. > + */ > + while (ptr < end) { > + csum += data; > + carry += csum < data; > + len -= sizeof(long); > + data = *(ptr++); > + } > + > + /* > + * Perform alignment (and over-read) bytes on the tail if any bytes > + * leftover. > + */ > + shift = ((long)ptr - (long)end) * 8; > +#ifdef __LITTLE_ENDIAN > + data = (data << shift) >> shift; > +#else > + data = (data >> shift) << shift; > +#endif > + csum += data; > + carry += csum < data; > + csum += carry; > + csum += csum < carry; > + > + /* > + * Zbb support saves 6 instructions, so not worth checking without > + * alternatives if supported > + */ > + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && > + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { > + unsigned long fold_temp; > + > + /* > + * Zbb is likely available when the kernel is compiled with Zbb > + * support, so nop when Zbb is available and jump when Zbb > is > + * not available. > + */ > + asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, > + RISCV_ISA_EXT_ZBB, 1) > + : > + : > + : > + : no_zbb); > + > +#ifdef CONFIG_32BIT > + asm_volatile_goto(".option push \n\ > + .option arch,+zbb \n\ > + rori %[fold_temp], %[csum], 16 \n\ > + andi %[offset], %[offset], 1 \n\ > + add %[csum], %[fold_temp], %[csum] \n\ > + beq %[offset], zero, %l[end] \n\ > + rev8 %[csum], %[csum] \n\ > + .option pop" > + : [csum] "+r" (csum), > + [fold_temp] "=&r" (fold_temp) > + : [offset] "r" (offset) > + : > + : end); > + > + return (unsigned short)csum; > +#else /* !CONFIG_32BIT */ > + asm_volatile_goto(".option push \n\ > + .option arch,+zbb \n\ > + rori %[fold_temp], %[csum], 32 \n\ > + add %[csum], %[fold_temp], %[csum] \n\ > + srli %[csum], %[csum], 32 \n\ > + roriw %[fold_temp], %[csum], 16 \n\ > + addw %[csum], %[fold_temp], %[csum] \n\ > + andi %[offset], %[offset], 1 \n\ > + beq %[offset], zero, %l[end] \n\ > + rev8 %[csum], %[csum] \n\ > + .option pop" > + : [csum] "+r" (csum), > + [fold_temp] "=&r" (fold_temp) > + : [offset] "r" (offset) > + : > + : end); > + > + return (csum << 16) >> 48; > +#endif /* !CONFIG_32BIT */ > +end: > + return csum >> 16; > + } > +no_zbb: > +#ifndef CONFIG_32BIT > + csum += ror64(csum, 32); > + csum >>= 32; > +#endif > + csum = (u32)csum + ror32((u32)csum, 16); > + if (offset & 1) > + return (u16)swab32(csum); > + return csum >> 16; > +} > + > +/* > + * Does not perform alignment, should only be used if machine has fast > + * misaligned accesses, because buff may be misaligned. > + */ > +static inline unsigned int do_csum_no_alignment(const unsigned char *buff, > int len) > +{ > + unsigned int offset, shift; > + unsigned long csum = 0, carry = 0, data; > + const unsigned long *ptr, *end; > + > + end = (const unsigned long *)(buff + len); > + kasan_check_read() missing in this function. > + ptr = (const unsigned long *)(buff); > + > + data = *(ptr++); > + > + /* > + * Do 32-bit reads on RV32 and 64-bit reads otherwise. This should be > + * faster than doing 32-bit reads on architectures that support larger > + * reads. > + */ > + while (ptr < end) { > + csum += data; > + carry += csum < data; > + len -= sizeof(long); > + data = *(ptr++); > + } > + > + /* > + * Perform alignment (and over-read) bytes on the tail if any bytes > + * leftover. > + */ > + shift = ((long)ptr - (long)end) * 8; > +#ifdef __LITTLE_ENDIAN > + data = (data << shift) >> shift; > +#else > + data = (data >> shift) << shift; > +#endif > + csum += data; > + carry += csum < data; > + csum += carry; > + csum += csum < carry; > + > + /* > + * Zbb support saves 6 instructions, so not worth checking without > + * alternatives if supported > + */ > + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && > + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { > + unsigned long fold_temp; > + > + /* > + * Zbb is likely available when the kernel is compiled with Zbb > + * support, so nop when Zbb is available and jump when Zbb > is > + * not available. > + */ > + asm_volatile_goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, > + RISCV_ISA_EXT_ZBB, 1) > + : > + : > + : > + : no_zbb); > + > +#ifdef CONFIG_32BIT > + asm (".option push \n\ > + .option arch,+zbb \n\ > + rori %[fold_temp], %[csum], 16 \n\ > + andi %[offset], %[offset], 1 \n\ > + add %[csum], %[fold_temp], %[csum] \n\ > + .option pop" > + : [csum] "+r" (csum), > + [fold_temp] "=&r" (fold_temp) It's better to align the indention here, or we can follow the below CONFIG_64BIT case. > + : [offset] "r" (offset) > + : ); > + > +#else /* !CONFIG_32BIT */ > + asm (".option push \n\ > + .option arch,+zbb \n\ > + rori %[fold_temp], %[csum], 32 \n\ > + add %[csum], %[fold_temp], %[csum] \n\ > + srli %[csum], %[csum], 32 \n\ > + roriw %[fold_temp], %[csum], 16 \n\ > + addw %[csum], %[fold_temp], %[csum] \n\ > + .option pop" > + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp) > + : [offset] "r" (offset) > + : ); > +#endif /* !CONFIG_32BIT */ > + return csum >> 16; > + } > +no_zbb: > +#ifndef CONFIG_32BIT > + csum += ror64(csum, 32); > + csum >>= 32; > +#endif > + csum = (u32)csum + ror32((u32)csum, 16); > + return csum >> 16; > +} > + > +/* > + * Perform a checksum on an arbitrary memory address. > + * Will do a light-weight address alignment if buff is misaligned, unless > + * cpu supports fast misaligned accesses. > + */ > +unsigned int do_csum(const unsigned char *buff, int len) > +{ > + if (unlikely(len <= 0)) > + return 0; > + > + /* > + * Very significant performance gains can be seen by not doing > alignment > + * on machines with fast misaligned accesses. > + * > + * There is some duplicate code between the "with_alignment" and > + * "no_alignment" implmentations, but the overlap is too awkward to > be > + * able to fit in one function without introducing multiple static > + * branches. > + */ > + if (static_branch_likely(&fast_misaligned_access_speed_key)) > + return do_csum_no_alignment(buff, len); When CPU doesn't support fast misaligned access but the buff addr is aligned (checking by buff & OFFSET_MASK == 0), did it worth adding this check and then possibly calling do_csum_no_alignment()? BRs, Xiao > + > + return do_csum_with_alignment(buff, len); > +} > > -- > 2.42.0