@@ -28,6 +28,20 @@ struct {
28
28
__uint (map_flags , BPF_F_WRONLY_PROG );
29
29
} map_array_wo SEC (".maps" );
30
30
31
+ struct {
32
+ __uint (type , BPF_MAP_TYPE_PERCPU_ARRAY );
33
+ __uint (max_entries , 2 );
34
+ __type (key , __u32 );
35
+ __type (value , struct test_val );
36
+ } map_array_pcpu SEC (".maps" );
37
+
38
+ struct {
39
+ __uint (type , BPF_MAP_TYPE_ARRAY );
40
+ __uint (max_entries , 2 );
41
+ __type (key , __u32 );
42
+ __type (value , struct test_val );
43
+ } map_array SEC (".maps" );
44
+
31
45
struct {
32
46
__uint (type , BPF_MAP_TYPE_HASH );
33
47
__uint (max_entries , 1 );
@@ -525,4 +539,178 @@ l0_%=: exit; \
525
539
: __clobber_all );
526
540
}
527
541
542
+ SEC ("socket" )
543
+ __description ("valid map access into an array using constant without nullness" )
544
+ __success __retval (4 ) __log_level (2 )
545
+ __msg ("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}" )
546
+ unsigned int an_array_with_a_constant_no_nullness (void )
547
+ {
548
+ /* Need 8-byte alignment for spill tracking */
549
+ __u32 __attribute__((aligned (8 ))) key = 1 ;
550
+ struct test_val * val ;
551
+
552
+ val = bpf_map_lookup_elem (& map_array , & key );
553
+ val -> index = offsetof(struct test_val , foo );
554
+
555
+ return val -> index ;
556
+ }
557
+
558
+ SEC ("socket" )
559
+ __description ("valid multiple map access into an array using constant without nullness" )
560
+ __success __retval (8 ) __log_level (2 )
561
+ __msg ("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -16) = {{(0|r[0-9])}}" )
562
+ __msg ("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}" )
563
+ unsigned int multiple_array_with_a_constant_no_nullness (void )
564
+ {
565
+ __u32 __attribute__((aligned (8 ))) key = 1 ;
566
+ __u32 __attribute__((aligned (8 ))) key2 = 0 ;
567
+ struct test_val * val , * val2 ;
568
+
569
+ val = bpf_map_lookup_elem (& map_array , & key );
570
+ val -> index = offsetof(struct test_val , foo );
571
+
572
+ val2 = bpf_map_lookup_elem (& map_array , & key2 );
573
+ val2 -> index = offsetof(struct test_val , foo );
574
+
575
+ return val -> index + val2 -> index ;
576
+ }
577
+
578
+ SEC ("socket" )
579
+ __description ("valid map access into an array using natural aligned 32-bit constant 0 without nullness" )
580
+ __success __retval (4 )
581
+ unsigned int an_array_with_a_32bit_constant_0_no_nullness (void )
582
+ {
583
+ /* Unlike the above tests, 32-bit zeroing is precisely tracked even
584
+ * if writes are not aligned to BPF_REG_SIZE. This tests that our
585
+ * STACK_ZERO handling functions.
586
+ */
587
+ struct test_val * val ;
588
+ __u32 key = 0 ;
589
+
590
+ val = bpf_map_lookup_elem (& map_array , & key );
591
+ val -> index = offsetof(struct test_val , foo );
592
+
593
+ return val -> index ;
594
+ }
595
+
596
+ SEC ("socket" )
597
+ __description ("valid map access into a pcpu array using constant without nullness" )
598
+ __success __retval (4 ) __log_level (2 )
599
+ __msg ("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}" )
600
+ unsigned int a_pcpu_array_with_a_constant_no_nullness (void )
601
+ {
602
+ __u32 __attribute__((aligned (8 ))) key = 1 ;
603
+ struct test_val * val ;
604
+
605
+ val = bpf_map_lookup_elem (& map_array_pcpu , & key );
606
+ val -> index = offsetof(struct test_val , foo );
607
+
608
+ return val -> index ;
609
+ }
610
+
611
+ SEC ("socket" )
612
+ __description ("invalid map access into an array using constant without nullness" )
613
+ __failure __msg ("R0 invalid mem access 'map_value_or_null'" )
614
+ unsigned int an_array_with_a_constant_no_nullness_out_of_bounds (void )
615
+ {
616
+ /* Out of bounds */
617
+ __u32 __attribute__((aligned (8 ))) key = 3 ;
618
+ struct test_val * val ;
619
+
620
+ val = bpf_map_lookup_elem (& map_array , & key );
621
+ val -> index = offsetof(struct test_val , foo );
622
+
623
+ return val -> index ;
624
+ }
625
+
626
+ SEC ("socket" )
627
+ __description ("invalid map access into an array using constant smaller than key_size" )
628
+ __failure __msg ("R0 invalid mem access 'map_value_or_null'" )
629
+ unsigned int an_array_with_a_constant_too_small (void )
630
+ {
631
+ __u32 __attribute__((aligned (8 ))) key ;
632
+ struct test_val * val ;
633
+
634
+ /* Mark entire key as STACK_MISC */
635
+ bpf_probe_read_user (& key , sizeof (key ), NULL );
636
+
637
+ /* Spilling only the bottom byte results in a tnum const of 1.
638
+ * We want to check that the verifier rejects it, as the spill is < 4B.
639
+ */
640
+ * (__u8 * )& key = 1 ;
641
+ val = bpf_map_lookup_elem (& map_array , & key );
642
+
643
+ /* Should fail, as verifier cannot prove in-bound lookup */
644
+ val -> index = offsetof(struct test_val , foo );
645
+
646
+ return val -> index ;
647
+ }
648
+
649
+ SEC ("socket" )
650
+ __description ("invalid map access into an array using constant larger than key_size" )
651
+ __failure __msg ("R0 invalid mem access 'map_value_or_null'" )
652
+ unsigned int an_array_with_a_constant_too_big (void )
653
+ {
654
+ struct test_val * val ;
655
+ __u64 key = 1 ;
656
+
657
+ /* Even if the constant value is < max_entries, if the spill size is
658
+ * larger than the key size, the set bits may not be where we expect them
659
+ * to be on different endian architectures.
660
+ */
661
+ val = bpf_map_lookup_elem (& map_array , & key );
662
+ val -> index = offsetof(struct test_val , foo );
663
+
664
+ return val -> index ;
665
+ }
666
+
667
+ SEC ("socket" )
668
+ __description ("invalid elided lookup using const and non-const key" )
669
+ __failure __msg ("R0 invalid mem access 'map_value_or_null'" )
670
+ unsigned int mixed_const_and_non_const_key_lookup (void )
671
+ {
672
+ __u32 __attribute__((aligned (8 ))) key ;
673
+ struct test_val * val ;
674
+ __u32 rand ;
675
+
676
+ rand = bpf_get_prandom_u32 ();
677
+ key = rand > 42 ? 1 : rand ;
678
+ val = bpf_map_lookup_elem (& map_array , & key );
679
+
680
+ return val -> index ;
681
+ }
682
+
683
+ SEC ("socket" )
684
+ __failure __msg ("invalid read from stack R2 off=4096 size=4" )
685
+ __naked void key_lookup_at_invalid_fp (void )
686
+ {
687
+ asm volatile (" \
688
+ r1 = %[map_array] ll; \
689
+ r2 = r10; \
690
+ r2 += 4096; \
691
+ call %[bpf_map_lookup_elem]; \
692
+ r0 = *(u64*)(r0 + 0); \
693
+ exit; \
694
+ " :
695
+ : __imm (bpf_map_lookup_elem ),
696
+ __imm_addr (map_array )
697
+ : __clobber_all );
698
+ }
699
+
700
+ volatile __u32 __attribute__((aligned (8 ))) global_key ;
701
+
702
+ SEC ("socket" )
703
+ __description ("invalid elided lookup using non-stack key" )
704
+ __failure __msg ("R0 invalid mem access 'map_value_or_null'" )
705
+ unsigned int non_stack_key_lookup (void )
706
+ {
707
+ struct test_val * val ;
708
+
709
+ global_key = 1 ;
710
+ val = bpf_map_lookup_elem (& map_array , (void * )& global_key );
711
+ val -> index = offsetof(struct test_val , foo );
712
+
713
+ return val -> index ;
714
+ }
715
+
528
716
char _license [] SEC ("license" ) = "GPL" ;
0 commit comments