Re: [PATCH bpf-next 12/15] bpf: Preserve boundaries and track scalars on narrowing fill

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 2023-12-20 at 23:40 +0200, Maxim Mikityanskiy wrote:

[...]

The two tests below were added by the following commit:
ef979017b837 ("bpf: selftest: Add verifier tests for <8-byte scalar spill and refill")

As far as I understand, the original intent was to check the behavior
for stack read/write with non-matching size.
I think these tests are redundant after patch #13. Wdyt?

> diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> index 809a09732168..de03e72e07a9 100644
> --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c
> @@ -217,7 +217,7 @@ __naked void uninit_u32_from_the_stack(void)
>  
>  SEC("tc")
>  __description("Spill a u32 const scalar.  Refill as u16.  Offset to skb->data")
> -__failure __msg("invalid access to packet")
> +__success __retval(0)
>  __naked void u16_offset_to_skb_data(void)
>  {
>  	asm volatile ("					\
> @@ -225,19 +225,24 @@ __naked void u16_offset_to_skb_data(void)
>  	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
>  	w4 = 20;					\
>  	*(u32*)(r10 - 8) = r4;				\
> -	r4 = *(u16*)(r10 - 8);				\
> +	r4 = *(u16*)(r10 - %[offset]);			\
>  	r0 = r2;					\
> -	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
> +	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\
>  	r0 += r4;					\
> -	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
> +	/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
>  	if r0 > r3 goto l0_%=;				\
> -	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
> +	/* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
>  	r0 = *(u32*)(r2 + 0);				\
>  l0_%=:	r0 = 0;						\
>  	exit;						\
>  "	:
>  	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
> -	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
> +	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
> +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
> +	  __imm_const(offset, 8)
> +#else
> +	  __imm_const(offset, 6)
> +#endif
>  	: __clobber_all);
>  }
>  
> @@ -270,7 +275,7 @@ l0_%=:	r0 = 0;						\
>  }
>  
>  SEC("tc")
> -__description("Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data")
> +__description("Spill a u32 const scalar.  Refill as u16 from MSB.  Offset to skb->data")
>  __failure __msg("invalid access to packet")
>  __naked void _6_offset_to_skb_data(void)
>  {
> @@ -279,7 +284,7 @@ __naked void _6_offset_to_skb_data(void)
>  	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
>  	w4 = 20;					\
>  	*(u32*)(r10 - 8) = r4;				\
> -	r4 = *(u16*)(r10 - 6);				\
> +	r4 = *(u16*)(r10 - %[offset]);			\
>  	r0 = r2;					\
>  	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
>  	r0 += r4;					\
> @@ -291,7 +296,12 @@ l0_%=:	r0 = 0;						\
>  	exit;						\
>  "	:
>  	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
> -	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
> +	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
> +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
> +	  __imm_const(offset, 6)
> +#else
> +	  __imm_const(offset, 8)
> +#endif
>  	: __clobber_all);
>  }
>  








[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux