On Wed, Jul 05, 2023 at 06:05:35PM -0300, Thadeu Lima de Souza Cascardo wrote: > diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c > index 9a85e797ed58..e596d1a842f7 100644 > --- a/net/netfilter/nft_byteorder.c > +++ b/net/netfilter/nft_byteorder.c > @@ -30,11 +30,11 @@ void nft_byteorder_eval(const struct nft_expr *expr, > const struct nft_byteorder *priv = nft_expr_priv(expr); > u32 *src = ®s->data[priv->sreg]; > u32 *dst = ®s->data[priv->dreg]; > - union { u32 u32; u16 u16; } *s, *d; > + u16 *s16, *d16; > unsigned int i; > > - s = (void *)src; > - d = (void *)dst; > + s16 = (void *)src; > + d16 = (void *)dst; > > switch (priv->size) { > case 8: { This patch is correct, but shouldn't we fix the code for 64 bit writes as well? net/netfilter/nft_byteorder.c 26 void nft_byteorder_eval(const struct nft_expr *expr, 27 struct nft_regs *regs, 28 const struct nft_pktinfo *pkt) 29 { 30 const struct nft_byteorder *priv = nft_expr_priv(expr); 31 u32 *src = ®s->data[priv->sreg]; 32 u32 *dst = ®s->data[priv->dreg]; 33 u16 *s16, *d16; 34 unsigned int i; 35 36 s16 = (void *)src; 37 d16 = (void *)dst; 38 39 switch (priv->size) { 40 case 8: { 41 u64 src64; 42 43 switch (priv->op) { 44 case NFT_BYTEORDER_NTOH: 45 for (i = 0; i < priv->len / 8; i++) { 46 src64 = nft_reg_load64(&src[i]); 47 nft_reg_store64(&dst[i], 48 be64_to_cpu((__force __be64)src64)); We're writing 8 bytes, then moving forward 4 bytes and writing 8 bytes again. Each subsequent write over-writes 4 bytes from the previous write. 49 } 50 break; 51 case NFT_BYTEORDER_HTON: 52 for (i = 0; i < priv->len / 8; i++) { 53 src64 = (__force __u64) 54 cpu_to_be64(nft_reg_load64(&src[i])); 55 nft_reg_store64(&dst[i], src64); Same. 56 } 57 break; 58 } 59 break; 60 } 61 case 4: 62 switch (priv->op) { 63 case NFT_BYTEORDER_NTOH: 64 for (i = 0; i < priv->len / 4; i++) 65 dst[i] = ntohl((__force __be32)src[i]); 66 break; 67 case NFT_BYTEORDER_HTON: regards, dan carpenter