diff --git a/go.mod b/go.mod index 214cec9f4100..17780363c7aa 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,8 @@ go 1.24 toolchain go1.24.2 +replace github.com/aquasecurity/tracee/types => ./types + require ( github.com/IBM/fluent-forward-go v0.3.0 github.com/Masterminds/sprig/v3 v3.3.0 diff --git a/go.sum b/go.sum index b705cc610efa..c0647e6d0ab3 100644 --- a/go.sum +++ b/go.sum @@ -407,8 +407,6 @@ github.com/aquasecurity/tracee/api v0.0.0-20250423121028-213b81a1b8f5 h1:zseTkmE github.com/aquasecurity/tracee/api v0.0.0-20250423121028-213b81a1b8f5/go.mod h1:fCLvZ7yle7SJoMNFSUCNVZo6Qf6xWXUmP0isGvRrIL8= github.com/aquasecurity/tracee/signatures/helpers v0.0.0-20250423143044-dcfcaf219805 h1:ZvXdP2rPm+7fTS102MAz/TcW++KalkMVIgQF0x3x5rQ= github.com/aquasecurity/tracee/signatures/helpers v0.0.0-20250423143044-dcfcaf219805/go.mod h1:yftFWA6fBKn0r2gCmO8DYKKSpaZjt/LYK7QgUF9XENo= -github.com/aquasecurity/tracee/types v0.0.0-20250624132442-3fa6c15acc67 h1:APUSeNvyugFPBMNqDATTPXti5Ry2jiHy72JZD3hvikg= -github.com/aquasecurity/tracee/types v0.0.0-20250624132442-3fa6c15acc67/go.mod h1:Garhl9pem8cnEgD0iHHwGcHn2HD5dteENk3YcOBPYU4= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= diff --git a/pkg/bufferdecoder/decoder.go b/pkg/bufferdecoder/decoder.go index 3b3c81581999..956c43b465ff 100644 --- a/pkg/bufferdecoder/decoder.go +++ b/pkg/bufferdecoder/decoder.go @@ -159,8 +159,8 @@ func (decoder *EbpfDecoder) DecodeContext(eCtx *EventContext) error { eCtx.Retval = int64(binary.LittleEndian.Uint64(decoder.buffer[offset+120 : offset+128])) eCtx.StackID = binary.LittleEndian.Uint32(decoder.buffer[offset+128 : offset+132]) eCtx.ProcessorId = binary.LittleEndian.Uint16(decoder.buffer[offset+132 : offset+134]) - eCtx.PoliciesVersion = binary.LittleEndian.Uint16(decoder.buffer[offset+134 : offset+136]) - eCtx.MatchedPolicies = binary.LittleEndian.Uint64(decoder.buffer[offset+136 : offset+144]) + eCtx.RulesVersion = binary.LittleEndian.Uint16(decoder.buffer[offset+134 : offset+136]) + eCtx.MatchedRules = binary.LittleEndian.Uint64(decoder.buffer[offset+136 : offset+144]) // event_context end decoder.cursor += eCtx.GetSizeBytes() diff --git a/pkg/bufferdecoder/decoder_test.go b/pkg/bufferdecoder/decoder_test.go index 2b96ecca7828..212c38e6cd36 100644 --- a/pkg/bufferdecoder/decoder_test.go +++ b/pkg/bufferdecoder/decoder_test.go @@ -40,8 +40,8 @@ func TestDecodeContext(t *testing.T) { Retval: 0, StackID: 0, ProcessorId: 5, - PoliciesVersion: 11, - MatchedPolicies: 1917, + RulesVersion: 11, + MatchedRules: 1917, } err := binary.Write(buf, binary.LittleEndian, eCtxExpected) assert.Equal(t, nil, err) diff --git a/pkg/bufferdecoder/protocol.go b/pkg/bufferdecoder/protocol.go index fb0aeb88df64..3bac9481f0f5 100644 --- a/pkg/bufferdecoder/protocol.go +++ b/pkg/bufferdecoder/protocol.go @@ -43,13 +43,13 @@ type EventContext struct { ParentStartTime uint64 // task_context end - EventID events.ID // int32 - Syscall int32 - Retval int64 - StackID uint32 - ProcessorId uint16 - PoliciesVersion uint16 - MatchedPolicies uint64 + EventID events.ID // int32 + Syscall int32 + Retval int64 + StackID uint32 + ProcessorId uint16 + RulesVersion uint16 + MatchedRules uint64 } func (EventContext) GetSizeBytes() int { diff --git a/pkg/cmd/flags/policy.go b/pkg/cmd/flags/policy.go index 4eea38e5a760..9e3c135e11ec 100644 --- a/pkg/cmd/flags/policy.go +++ b/pkg/cmd/flags/policy.go @@ -20,10 +20,6 @@ func PrepareFilterMapsFromPolicies(policies []k8s.PolicyInterface) (PolicyScopeM return nil, nil, errfmt.Errorf("no policies provided") } - if len(policies) > policy.PolicyMax { - return nil, nil, errfmt.Errorf("too many policies provided, there is a limit of %d policies", policy.PolicyMax) - } - policyNames := make(map[string]bool) for pIdx, p := range policies { @@ -111,7 +107,7 @@ func CreatePolicies(policyScopeMap PolicyScopeMap, policyEventsMap PolicyEventMa return nil, InvalidFlagEmpty() } - pol, err := createSinglePolicy(policyIdx, policyScope, policyEvents, newBinary) + pol, err := createSinglePolicy(policyScope, policyEvents, newBinary) if err != nil { return nil, err } @@ -121,9 +117,8 @@ func CreatePolicies(policyScopeMap PolicyScopeMap, policyEventsMap PolicyEventMa return policies, nil } -func createSinglePolicy(policyIdx int, policyScope policyScopes, policyEvents policyEvents, newBinary bool) (*policy.Policy, error) { +func createSinglePolicy(policyScope policyScopes, policyEvents policyEvents, newBinary bool) (*policy.Policy, error) { p := policy.NewPolicy() - p.ID = policyIdx p.Name = policyScope.policyName if err := parseScopeFilters(p, policyScope.scopeFlags, newBinary); err != nil { diff --git a/pkg/cmd/flags/policy_test.go b/pkg/cmd/flags/policy_test.go index 6e5d33613423..e8adc50aa1a6 100644 --- a/pkg/cmd/flags/policy_test.go +++ b/pkg/cmd/flags/policy_test.go @@ -2161,7 +2161,6 @@ func TestCreateSinglePolicy(t *testing.T) { }, wantPolicy: func() *policy.Policy { p := policy.NewPolicy() - p.ID = 1 p.Name = "test-policy" p.CommFilter = filters.NewStringFilter(nil) _ = p.CommFilter.Parse("=bash") @@ -2209,7 +2208,6 @@ func TestCreateSinglePolicy(t *testing.T) { }, wantPolicy: func() *policy.Policy { p := policy.NewPolicy() - p.ID = 2 p.Name = "multi-filter" p.UIDFilter = filters.NewUInt32Filter() _ = p.UIDFilter.Parse("=1000") @@ -2239,7 +2237,7 @@ func TestCreateSinglePolicy(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() - got, err := createSinglePolicy(tc.policyIdx, tc.scope, tc.events, tc.newBinary) + got, err := createSinglePolicy(tc.scope, tc.events, tc.newBinary) if tc.wantErr != nil { require.Error(t, err) diff --git a/pkg/ebpf/c/common/context.h b/pkg/ebpf/c/common/context.h index 3bbba512165a..397f82a0ea13 100644 --- a/pkg/ebpf/c/common/context.h +++ b/pkg/ebpf/c/common/context.h @@ -15,7 +15,6 @@ statfunc void init_proc_info_scratch(u32, scratch_t *); statfunc proc_info_t *init_proc_info(u32, u32); statfunc void init_task_info_scratch(u32, scratch_t *); statfunc task_info_t *init_task_info(u32, u32); -statfunc event_config_t *get_event_config(u32, u16); statfunc int init_program_data(program_data_t *, void *, u32); statfunc int init_tailcall_program_data(program_data_t *, void *); statfunc bool reset_event(event_data_t *, u32); @@ -103,16 +102,6 @@ statfunc task_info_t *init_task_info(u32 tid, u32 scratch_idx) return bpf_map_lookup_elem(&task_info_map, &tid); } -statfunc event_config_t *get_event_config(u32 event_id, u16 policies_version) -{ - // TODO: we can remove this extra lookup by moving to per event rules_version - void *inner_events_map = bpf_map_lookup_elem(&events_map_version, &policies_version); - if (inner_events_map == NULL) - return NULL; - - return bpf_map_lookup_elem(inner_events_map, &event_id); -} - // clang-format off statfunc int init_program_data(program_data_t *p, void *ctx, u32 event_id) { @@ -182,31 +171,20 @@ statfunc int init_program_data(program_data_t *p, void *ctx, u32 event_id) } } - if (unlikely(p->event->context.policies_version != p->config->policies_version)) { - // copy policies_config to event data - long ret = bpf_probe_read_kernel( - &p->event->policies_config, sizeof(policies_config_t), &p->config->policies_config); - if (unlikely(ret != 0)) - return 0; - - p->event->context.policies_version = p->config->policies_version; - } + event_config_t *event_config = bpf_map_lookup_elem(&events_config_map, &event_id); + if (unlikely(event_config == NULL)) + return 0; - // default to match all policies until an event is selected - p->event->config.submit_for_policies = ~0ULL; + // copy event config to event data so we can have a constant event config across the program execution + // we do this since event configuration might be updated by userspace + long ret = bpf_probe_read_kernel(&p->event->config, sizeof(event_config_t), event_config); + if (unlikely(ret != 0)) + return 0; - if (event_id != NO_EVENT_SUBMIT) { - p->event->config.submit_for_policies = 0; - event_config_t *event_config = get_event_config(event_id, p->event->context.policies_version); - if (event_config != NULL) { - p->event->config.field_types = event_config->field_types; - p->event->config.submit_for_policies = event_config->submit_for_policies; - p->event->config.data_filter = event_config->data_filter; - } - } + p->event->context.rules_version = p->event->config.rules_version; - // initialize matched_policies to the policies that actually requested this event - p->event->context.matched_policies = p->event->config.submit_for_policies; + // initialize active_rules to the rules that actually requested this event + p->event->context.active_rules = p->event->config.submit_for_rules; return 1; } @@ -254,16 +232,17 @@ statfunc bool reset_event(event_data_t *event, u32 event_id) { event->context.eventid = event_id; reset_event_args_buf(event); - event->config.submit_for_policies = ~0ULL; - event_config_t *event_config = get_event_config(event_id, event->context.policies_version); - if (event_config == NULL) + event_config_t *event_config = bpf_map_lookup_elem(&events_config_map, &event_id); + if (unlikely(event_config == NULL)) + return false; + + long ret = bpf_probe_read_kernel(&event->config, sizeof(event_config_t), event_config); + if (unlikely(ret != 0)) return false; - event->config.field_types = event_config->field_types; - event->config.submit_for_policies = event_config->submit_for_policies; - event->context.matched_policies = event_config->submit_for_policies; - event->config.data_filter = event_config->data_filter; + event->context.rules_version = event->config.rules_version; + event->context.active_rules = event->config.submit_for_rules; return true; } diff --git a/pkg/ebpf/c/common/filtering.h b/pkg/ebpf/c/common/filtering.h index ef7752c5f8f5..2e202fe503d5 100644 --- a/pkg/ebpf/c/common/filtering.h +++ b/pkg/ebpf/c/common/filtering.h @@ -10,9 +10,7 @@ // PROTOTYPES -statfunc void *get_filter_map(void *, u16); statfunc void *get_event_filter_map(void *, u16, u32); -statfunc u64 uint_filter_range_matches(u64, void *, u64, u64, u64); statfunc u64 binary_filter_matches(u64, void *, proc_info_t *); statfunc u64 equality_filter_matches(u64, void *, void *); statfunc u64 bool_filter_matches(u64, bool); @@ -20,8 +18,8 @@ statfunc u64 match_scope_filters(program_data_t *); statfunc u64 match_data_filters(program_data_t *, u8); statfunc bool evaluate_scope_filters(program_data_t *); statfunc bool evaluate_data_filters(program_data_t *, u8); -statfunc bool event_is_selected(u32, u16); -statfunc bool policies_matched(event_data_t *); +statfunc bool event_is_selected(u32); +statfunc bool rules_matched(event_data_t *); // CONSTANTS @@ -30,46 +28,15 @@ statfunc bool policies_matched(event_data_t *); // FUNCTIONS -// get_filter_map returns the filter map for the given version and outer map -statfunc void *get_filter_map(void *outer_map, u16 version) -{ - return bpf_map_lookup_elem(outer_map, &version); -} - // get_event_filter_map returns the filter map for the given outer map, version and event id statfunc void *get_event_filter_map(void *outer_map, u16 version, u32 event_id) { - policy_key_t policy_key = { + filter_version_key_t key = { .version = version, .event_id = event_id, }; - return bpf_map_lookup_elem(outer_map, &policy_key); -} - -statfunc u64 -uint_filter_range_matches(u64 match_if_key_missing, void *filter_map, u64 value, u64 max, u64 min) -{ - // check equality_filter_matches() for more info - - u64 equals_in_policies = 0; - u64 key_used_in_policies = 0; - - if (filter_map) { - eq_t *equality = bpf_map_lookup_elem(filter_map, &value); - if (equality != NULL) { - equals_in_policies = equality->equals_in_policies; - key_used_in_policies = equality->key_used_in_policies; - } - } - - if ((max != FILTER_MAX_NOT_SET) && (value >= max)) - return equals_in_policies; - - if ((min != FILTER_MIN_NOT_SET) && (value <= min)) - return equals_in_policies; - - return equals_in_policies | (match_if_key_missing & ~key_used_in_policies); + return bpf_map_lookup_elem(outer_map, &key); } statfunc u64 binary_filter_matches(u64 match_if_key_missing, @@ -78,8 +45,8 @@ statfunc u64 binary_filter_matches(u64 match_if_key_missing, { // check equality_filter_matches() for more info - u64 equals_in_policies = 0; - u64 key_used_in_policies = 0; + u64 equals_in_rules = 0; + u64 key_used_in_rules = 0; if (filter_map) { eq_t *equality = bpf_map_lookup_elem(filter_map, proc_info->binary.path); @@ -88,23 +55,23 @@ statfunc u64 binary_filter_matches(u64 match_if_key_missing, equality = bpf_map_lookup_elem(filter_map, &proc_info->binary); } if (equality != NULL) { - equals_in_policies = equality->equals_in_policies; - key_used_in_policies = equality->key_used_in_policies; + equals_in_rules = equality->equals_in_rules; + key_used_in_rules = equality->key_used_in_rules; } } - return equals_in_policies | (match_if_key_missing & ~key_used_in_policies); + return equals_in_rules | (match_if_key_missing & ~key_used_in_rules); } statfunc u64 equality_filter_matches(u64 match_if_key_missing, void *filter_map, void *key) { // check match_scope_filters() for initial info // - // policy 2: comm=who - // policy 3: comm=ping - // policy 4: comm!=who + // rule 2: comm=who + // rule 3: comm=ping + // rule 4: comm!=who // - // match_if_key_missing = 0000 1000, since policy 4 has "not equal" for comm filter + // match_if_key_missing = 0000 1000, since rule 4 has "not equal" for comm filter // filter_map = comm_filter // key = "who" | "ping" // @@ -112,54 +79,54 @@ statfunc u64 equality_filter_matches(u64 match_if_key_missing, void *filter_map, // // considering an event from "who" command // - // equals_in_policies = 0000 0010, since policy 2 has "equal" for comm filter - // key_used_in_policies = 0000 1010, since policy 2 and 4 are using the key "who" + // equals_in_rules = 0000 0010, since rule 2 has "equal" for comm filter + // key_used_in_rules = 0000 1010, since rule 2 and 4 are using the key "who" // - // return = equals_in_policies | (match_if_key_missing & ~key_used_in_policies) + // return = equals_in_rules | (match_if_key_missing & ~key_used_in_rules) // 0000 0010 | // (0000 1000 & 1111 0101) -> 0000 0000 // // 0000 0010 | // 0000 0000 // --------- - // 0000 0010 = (policy 2 matched) + // 0000 0010 = (rule 2 matched) // // considering an event from "ping" command // - // equals_in_policies = 0000 0100, since policy 3 has "equal" for comm filter - // key_used_in_policies = 0000 0100, since policy 3 is set for comm filter + // equals_in_rules = 0000 0100, since rule 3 has "equal" for comm filter + // key_used_in_rules = 0000 0100, since rule 3 is set for comm filter // - // return = equals_in_policies | (match_if_key_missing & ~key_used_in_policies) + // return = equals_in_rules | (match_if_key_missing & ~key_used_in_rules) // 0000 0100 | // (0000 1000 & 1111 1011) -> 0000 1000 // // 0000 0100 | // 0000 1000 // --------- - // 0000 1100 = (policy 3 and 4 matched) + // 0000 1100 = (rule 3 and 4 matched) - u64 equals_in_policies = 0; - u64 key_used_in_policies = 0; + u64 equals_in_rules = 0; + u64 key_used_in_rules = 0; if (filter_map) { eq_t *equality = bpf_map_lookup_elem(filter_map, key); if (equality != NULL) { - equals_in_policies = equality->equals_in_policies; - key_used_in_policies = equality->key_used_in_policies; + equals_in_rules = equality->equals_in_rules; + key_used_in_rules = equality->key_used_in_rules; } } // match if: - // 1. key is used and equality matches (equals_in_policies) + // 1. key is used and equality matches (equals_in_rules) // 2. key is NOT used and the default action is to match - return equals_in_policies | (match_if_key_missing & ~key_used_in_policies); + return equals_in_rules | (match_if_key_missing & ~key_used_in_rules); } statfunc u64 bool_filter_matches(u64 match_bitmap, bool bool_value) { // check match_scope_filters() for initial info // - // policy 5: container=true + // rule 5: container=true // // considering an event from a container // @@ -185,186 +152,157 @@ statfunc u64 bool_filter_matches(u64 match_bitmap, bool bool_value) statfunc u64 match_scope_filters(program_data_t *p) { task_context_t *context = &p->event->context.task; + scope_filters_config_t *filters_cfg = &p->event->config.scope_filters; + u32 event_id = p->event->context.eventid; + u16 version = p->event->context.rules_version; + void *filter_map = NULL; + u64 res = ~0ULL; // Don't monitor self if (p->config->tracee_pid == context->host_pid) return 0; - proc_info_t *proc_info = p->proc_info; - policies_config_t *policies_cfg = &p->event->policies_config; - u64 res = ~0ULL; - // // boolean filters (not using versioned filter maps) // - if (policies_cfg->cont_filter_enabled) { + if (filters_cfg->cont_filter_enabled) { bool is_container = false; u8 state = p->task_info->container_state; if (state == CONTAINER_STARTED || state == CONTAINER_EXISTED) is_container = true; - u64 match_bitmap = policies_cfg->cont_filter_match_if_key_missing; - u64 mask = ~policies_cfg->cont_filter_enabled; + u64 match_bitmap = filters_cfg->cont_filter_match_if_key_missing; + u64 mask = ~filters_cfg->cont_filter_enabled; - // For policies that have this filter disabled we want to set the matching bits using 'mask' + // For rules that have this filter disabled we want to set the matching bits using 'mask' res &= bool_filter_matches(match_bitmap, is_container) | mask; } - if (policies_cfg->new_cont_filter_enabled) { + if (filters_cfg->new_cont_filter_enabled) { bool is_new_container = false; if (p->task_info->container_state == CONTAINER_STARTED) is_new_container = true; - u64 match_bitmap = policies_cfg->new_cont_filter_match_if_key_missing; - u64 mask = ~policies_cfg->new_cont_filter_enabled; + u64 match_bitmap = filters_cfg->new_cont_filter_match_if_key_missing; + u64 mask = ~filters_cfg->new_cont_filter_enabled; res &= bool_filter_matches(match_bitmap, is_new_container) | mask; } - if (policies_cfg->new_pid_filter_enabled) { - u64 match_bitmap = policies_cfg->new_pid_filter_match_if_key_missing; - u64 mask = ~policies_cfg->new_pid_filter_enabled; + if (filters_cfg->new_pid_filter_enabled) { + u64 match_bitmap = filters_cfg->new_pid_filter_match_if_key_missing; + u64 mask = ~filters_cfg->new_pid_filter_enabled; - res &= bool_filter_matches(match_bitmap, proc_info->new_proc) | mask; + res &= bool_filter_matches(match_bitmap, p->proc_info->new_proc) | mask; } // // equality filters (using versioned filter maps) // - u16 version = p->event->context.policies_version; - void *filter_map = NULL; - - if (policies_cfg->pid_filter_enabled) { - u64 match_if_key_missing = policies_cfg->pid_filter_match_if_key_missing; - u64 mask = ~policies_cfg->pid_filter_enabled; - u64 max = policies_cfg->pid_max; - u64 min = policies_cfg->pid_min; + if (filters_cfg->pid_filter_enabled) { + u64 match_if_key_missing = filters_cfg->pid_filter_match_if_key_missing; + u64 mask = ~filters_cfg->pid_filter_enabled; - filter_map = get_filter_map(&pid_filter_version, version); + filter_map = get_event_filter_map(&pid_filter_version, version, event_id); // the user might have given us a tid - check for it too - res &= uint_filter_range_matches( - match_if_key_missing, filter_map, context->host_pid, max, min) | - uint_filter_range_matches( - match_if_key_missing, filter_map, context->host_tid, max, min) | - mask; + res &= equality_filter_matches(match_if_key_missing, filter_map, &context->host_pid) | + equality_filter_matches(match_if_key_missing, filter_map, &context->host_tid) | mask; } - if (policies_cfg->uid_filter_enabled) { + if (filters_cfg->uid_filter_enabled) { context->uid = bpf_get_current_uid_gid(); - u64 match_if_key_missing = policies_cfg->uid_filter_match_if_key_missing; - u64 mask = ~policies_cfg->uid_filter_enabled; - u64 max = policies_cfg->uid_max; - u64 min = policies_cfg->uid_min; - - filter_map = get_filter_map(&uid_filter_version, version); - res &= uint_filter_range_matches(match_if_key_missing, filter_map, context->uid, max, min) | - mask; + u64 match_if_key_missing = filters_cfg->uid_filter_match_if_key_missing; + u64 mask = ~filters_cfg->uid_filter_enabled; + + filter_map = get_event_filter_map(&uid_filter_version, version, event_id); + res &= equality_filter_matches(match_if_key_missing, filter_map, &context->uid) | mask; } - if (policies_cfg->mnt_ns_filter_enabled) { + if (filters_cfg->mnt_ns_filter_enabled) { context->mnt_id = get_task_mnt_ns_id(p->event->task); - u64 match_if_key_missing = policies_cfg->mnt_ns_filter_match_if_key_missing; - u64 mask = ~policies_cfg->mnt_ns_filter_enabled; + u64 match_if_key_missing = filters_cfg->mnt_ns_filter_match_if_key_missing; + u64 mask = ~filters_cfg->mnt_ns_filter_enabled; - filter_map = get_filter_map(&mnt_ns_filter_version, version); + filter_map = get_event_filter_map(&mnt_ns_filter_version, version, event_id); res &= equality_filter_matches(match_if_key_missing, filter_map, &context->mnt_id) | mask; } - if (policies_cfg->pid_ns_filter_enabled) { + if (filters_cfg->pid_ns_filter_enabled) { context->pid_id = get_task_pid_ns_id(p->event->task); - u64 match_if_key_missing = policies_cfg->pid_ns_filter_match_if_key_missing; - u64 mask = ~policies_cfg->pid_ns_filter_enabled; + u64 match_if_key_missing = filters_cfg->pid_ns_filter_match_if_key_missing; + u64 mask = ~filters_cfg->pid_ns_filter_enabled; - filter_map = get_filter_map(&pid_ns_filter_version, version); + filter_map = get_event_filter_map(&pid_ns_filter_version, version, event_id); res &= equality_filter_matches(match_if_key_missing, filter_map, &context->pid_id) | mask; } - if (policies_cfg->uts_ns_filter_enabled) { + if (filters_cfg->uts_ns_filter_enabled) { char *uts_name = get_task_uts_name(p->event->task); if (uts_name) bpf_probe_read_kernel_str(&context->uts_name, TASK_COMM_LEN, uts_name); - u64 match_if_key_missing = policies_cfg->uts_ns_filter_match_if_key_missing; - u64 mask = ~policies_cfg->uts_ns_filter_enabled; + u64 match_if_key_missing = filters_cfg->uts_ns_filter_match_if_key_missing; + u64 mask = ~filters_cfg->uts_ns_filter_enabled; - filter_map = get_filter_map(&uts_ns_filter_version, version); + filter_map = get_event_filter_map(&uts_ns_filter_version, version, event_id); res &= equality_filter_matches(match_if_key_missing, filter_map, &context->uts_name) | mask; } - if (policies_cfg->comm_filter_enabled) { + if (filters_cfg->comm_filter_enabled) { bpf_get_current_comm(&context->comm, sizeof(context->comm)); - u64 match_if_key_missing = policies_cfg->comm_filter_match_if_key_missing; - u64 mask = ~policies_cfg->comm_filter_enabled; + u64 match_if_key_missing = filters_cfg->comm_filter_match_if_key_missing; + u64 mask = ~filters_cfg->comm_filter_enabled; - filter_map = get_filter_map(&comm_filter_version, version); + filter_map = get_event_filter_map(&comm_filter_version, version, event_id); res &= equality_filter_matches(match_if_key_missing, filter_map, &context->comm) | mask; } - if (policies_cfg->cgroup_id_filter_enabled) { + if (filters_cfg->cgroup_id_filter_enabled) { u32 cgroup_id_lsb = context->cgroup_id; - u64 match_if_key_missing = policies_cfg->cgroup_id_filter_match_if_key_missing; - u64 mask = ~policies_cfg->cgroup_id_filter_enabled; + u64 match_if_key_missing = filters_cfg->cgroup_id_filter_match_if_key_missing; + u64 mask = ~filters_cfg->cgroup_id_filter_enabled; - filter_map = get_filter_map(&cgroup_id_filter_version, version); + filter_map = get_event_filter_map(&cgroup_id_filter_version, version, event_id); res &= equality_filter_matches(match_if_key_missing, filter_map, &cgroup_id_lsb) | mask; } - if (policies_cfg->proc_tree_filter_enabled) { - u64 match_if_key_missing = policies_cfg->proc_tree_filter_match_if_key_missing; - u64 mask = ~policies_cfg->proc_tree_filter_enabled; - - filter_map = get_filter_map(&process_tree_map_version, version); - res &= equality_filter_matches(match_if_key_missing, filter_map, &context->host_pid) | mask; - } - - if (policies_cfg->bin_path_filter_enabled) { - u64 match_if_key_missing = policies_cfg->bin_path_filter_match_if_key_missing; - u64 mask = ~policies_cfg->bin_path_filter_enabled; - - filter_map = get_filter_map(&binary_filter_version, version); - res &= binary_filter_matches(match_if_key_missing, filter_map, proc_info) | mask; - } - - // - // follow filter - // + if (filters_cfg->bin_path_filter_enabled) { + u64 match_if_key_missing = filters_cfg->bin_path_filter_match_if_key_missing; + u64 mask = ~filters_cfg->bin_path_filter_enabled; - if (policies_cfg->follow_filter_enabled) { - // trace this proc anyway if follow was set by a scope - res |= proc_info->follow_in_scopes & policies_cfg->follow_filter_enabled; + filter_map = get_event_filter_map(&binary_filter_version, version, event_id); + res &= binary_filter_matches(match_if_key_missing, filter_map, p->proc_info) | mask; } - // Make sure only enabled policies are set in the bitmap (other bits are invalid) - return res & policies_cfg->enabled_policies; + return res; } // Function to evaluate data filters based on the program data and index. -// Returns policies bitmap. +// Returns rules bitmap. // // Parameters: // - program_data_t *p: Pointer to the program data structure. // - u8 index: Index of the string data to be used as filter. statfunc u64 match_data_filters(program_data_t *p, u8 index) { - policies_config_t *policies_cfg = &p->event->policies_config; // Retrieve the string filter for the current event // TODO: Dynamically determine the filter and type based on policy configuration string_filter_config_t *str_filter = &p->event->config.data_filter.string; if (!(str_filter->exact_enabled || str_filter->prefix_enabled || str_filter->suffix_enabled)) - return policies_cfg->enabled_policies; + return ~0ULL; u64 res = 0; - u64 explicit_disable_policies = 0; - u64 explicit_enable_policies = 0; - u64 default_enable_policies = 0; - // Determine policies that do not use any type of string filter (exact, prefix, suffix) - u64 mask_no_str_filter_policies = ~str_filter->exact_enabled & ~str_filter->prefix_enabled & - ~str_filter->suffix_enabled; + u64 explicit_disable_rules = 0; + u64 explicit_enable_rules = 0; + u64 default_enable_rules = 0; + // Determine rules that do not use any type of string filter (exact, prefix, suffix) + u64 mask_no_str_filter_rules = ~str_filter->exact_enabled & ~str_filter->prefix_enabled & + ~str_filter->suffix_enabled; void *filter_map = NULL; // event ID u32 eventid = p->event->context.eventid; - u16 version = p->event->context.policies_version; + u16 version = p->event->context.rules_version; // Exact match if (str_filter->exact_enabled) { @@ -381,9 +319,9 @@ statfunc u64 match_data_filters(program_data_t *p, u8 index) u64 match_if_key_missing = str_filter->exact_match_if_key_missing; filter_map = get_event_filter_map(&data_filter_exact_version, version, eventid); res = equality_filter_matches(match_if_key_missing, filter_map, key); - explicit_enable_policies |= (res & ~match_if_key_missing); - explicit_disable_policies |= (~res & match_if_key_missing); - default_enable_policies |= (res & match_if_key_missing); + explicit_enable_rules |= (res & ~match_if_key_missing); + explicit_disable_rules |= (~res & match_if_key_missing); + default_enable_rules |= (res & match_if_key_missing); } // Prefix match @@ -404,9 +342,9 @@ statfunc u64 match_data_filters(program_data_t *p, u8 index) u64 match_if_key_missing = str_filter->prefix_match_if_key_missing; filter_map = get_event_filter_map(&data_filter_prefix_version, version, eventid); res = equality_filter_matches(match_if_key_missing, filter_map, key); - explicit_enable_policies |= (res & ~match_if_key_missing); - explicit_disable_policies |= (~res & match_if_key_missing); - default_enable_policies |= (res & match_if_key_missing); + explicit_enable_rules |= (res & ~match_if_key_missing); + explicit_disable_rules |= (~res & match_if_key_missing); + default_enable_rules |= (res & match_if_key_missing); } // Suffix match @@ -425,60 +363,57 @@ statfunc u64 match_data_filters(program_data_t *p, u8 index) u64 match_if_key_missing = str_filter->suffix_match_if_key_missing; filter_map = get_event_filter_map(&data_filter_suffix_version, version, eventid); res = equality_filter_matches(match_if_key_missing, filter_map, key); - explicit_enable_policies |= (res & ~match_if_key_missing); - explicit_disable_policies |= (~res & match_if_key_missing); - default_enable_policies |= (res & match_if_key_missing); + explicit_enable_rules |= (res & ~match_if_key_missing); + explicit_disable_rules |= (~res & match_if_key_missing); + default_enable_rules |= (res & match_if_key_missing); } - // Match policies based on the following conditions: + // Match rules based on the following conditions: // - // 1. Explicitly Enabled Policies: A policy is enabled if at least one of the three - // filter types explicitly enables it (explicit_enable_policies). - // 2. Default Enabled Policies: Policies that are enabled by default (default_enable_policies) - // remain enabled only if they are not explicitly disabled (explicit_disable_policies). - res = explicit_enable_policies | (default_enable_policies & ~explicit_disable_policies); - // Combine policies that use string filters with those that do not - res |= mask_no_str_filter_policies; - - // Make sure only enabled policies are set in the bitmap (other bits are invalid) - return res & policies_cfg->enabled_policies; + // 1. Explicitly Enabled Rules: A rule is enabled if at least one of the three + // filter types explicitly enables it (explicit_enable_rules). + // 2. Default Enabled Rules: Rules that are enabled by default (default_enable_rules) + // remain enabled only if they are not explicitly disabled (explicit_disable_rules). + res = explicit_enable_rules | (default_enable_rules & ~explicit_disable_rules); + // Combine rules that use string filters with those that do not + res |= mask_no_str_filter_rules; + + return res; } statfunc bool evaluate_scope_filters(program_data_t *p) { - u64 matched_policies = match_scope_filters(p); - p->event->context.matched_policies &= matched_policies; - return p->event->context.matched_policies != 0; + u64 matched_scope_filters = match_scope_filters(p); + p->event->context.active_rules &= matched_scope_filters; + return p->event->context.active_rules != 0 || p->event->config.has_overflow; } statfunc bool evaluate_data_filters(program_data_t *p, u8 index) { u64 matched_data_filters = match_data_filters(p, index); - p->event->context.matched_policies &= matched_data_filters; - return p->event->context.matched_policies != 0; + p->event->context.active_rules &= matched_data_filters; + return p->event->context.active_rules != 0 || p->event->config.has_overflow; } -statfunc bool policies_matched(event_data_t *event) +statfunc bool rules_matched(event_data_t *event) { - return event->context.matched_policies != 0; + return event->context.active_rules != 0 || event->config.has_overflow; } -statfunc bool event_is_selected(u32 event_id, u16 policies_version) +statfunc bool event_is_selected(u32 event_id) { - void *inner_events_map = bpf_map_lookup_elem(&events_map_version, &policies_version); - if (inner_events_map == NULL) - return 0; - - event_config_t *event_config = bpf_map_lookup_elem(inner_events_map, &event_id); + event_config_t *event_config = bpf_map_lookup_elem(&events_config_map, &event_id); if (event_config == NULL) return 0; - return event_config->submit_for_policies != 0; + return event_config->submit_for_rules != 0; } statfunc u64 get_scopes_to_follow(program_data_t *p) { - return match_scope_filters(p); + return 0; + // return match_scope_filters(p); + // TODO: fixme } #endif diff --git a/pkg/ebpf/c/common/network.h b/pkg/ebpf/c/common/network.h index 89c453c5e2fa..45657e7c9146 100644 --- a/pkg/ebpf/c/common/network.h +++ b/pkg/ebpf/c/common/network.h @@ -156,8 +156,8 @@ typedef struct net_task_context { task_context_t taskctx; s32 syscall; u16 padding; - u16 policies_version; - u64 matched_policies; + u16 rules_version; + u64 active_rules; } net_task_context_t; struct { diff --git a/pkg/ebpf/c/maps.h b/pkg/ebpf/c/maps.h index 57dd08b72525..728393c25277 100644 --- a/pkg/ebpf/c/maps.h +++ b/pkg/ebpf/c/maps.h @@ -419,24 +419,6 @@ typedef struct elf_files_map elf_files_map_t; // #define MAX_FILTER_VERSION 64 // max amount of filter versions to track -struct policies_config_map { - __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 1); - __type(key, u32); - __type(value, policies_config_t); -} policies_config_map SEC(".maps"); - -typedef struct policies_config_map policies_config_map_t; - -// map of policies config maps -struct policies_config_version { - __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); - __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); - __array(values, policies_config_map_t); -} policies_config_version SEC(".maps"); - -typedef struct policies_config_version policies_config_version_t; // filter events by UID prototype, for specific UIDs either by == or != struct uid_filter { @@ -452,7 +434,7 @@ typedef struct uid_filter uid_filter_t; struct uid_filter_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); + __type(key, filter_version_key_t); __array(values, uid_filter_t); } uid_filter_version SEC(".maps"); @@ -472,7 +454,7 @@ typedef struct pid_filter pid_filter_t; struct pid_filter_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); + __type(key, filter_version_key_t); __array(values, pid_filter_t); } pid_filter_version SEC(".maps"); @@ -492,7 +474,7 @@ typedef struct mnt_ns_filter mnt_ns_filter_t; struct mnt_ns_filter_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); + __type(key, filter_version_key_t); __array(values, mnt_ns_filter_t); } mnt_ns_filter_version SEC(".maps"); @@ -512,7 +494,7 @@ typedef struct pid_ns_filter pid_ns_filter_t; struct pid_ns_filter_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); + __type(key, filter_version_key_t); __array(values, pid_ns_filter_t); } pid_ns_filter_version SEC(".maps"); @@ -532,7 +514,7 @@ typedef struct uts_ns_filter uts_ns_filter_t; struct uts_ns_filter_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); + __type(key, filter_version_key_t); __array(values, uts_ns_filter_t); } uts_ns_filter_version SEC(".maps"); @@ -552,7 +534,7 @@ typedef struct data_filter_exact data_filter_exact_t; struct data_filter_exact_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, policy_key_t); + __type(key, filter_version_key_t); __array(values, data_filter_exact_t); } data_filter_exact_version SEC(".maps"); @@ -573,7 +555,7 @@ typedef struct data_filter_suffix data_filter_suffix_t; struct data_filter_suffix_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, policy_key_t); + __type(key, filter_version_key_t); __array(values, data_filter_suffix_t); } data_filter_suffix_version SEC(".maps"); @@ -594,7 +576,7 @@ typedef struct data_filter_prefix data_filter_prefix_t; struct data_filter_prefix_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, policy_key_t); + __type(key, filter_version_key_t); __array(values, data_filter_prefix_t); } data_filter_prefix_version SEC(".maps"); @@ -614,7 +596,7 @@ typedef struct comm_filter comm_filter_t; struct comm_filter_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); + __type(key, filter_version_key_t); __array(values, comm_filter_t); } comm_filter_version SEC(".maps"); @@ -634,7 +616,7 @@ typedef struct cgroup_id_filter cgroup_id_filter_t; struct cgroup_id_filter_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); + __type(key, filter_version_key_t); __array(values, cgroup_id_filter_t); } cgroup_id_filter_version SEC(".maps"); @@ -654,51 +636,21 @@ typedef struct binary_filter binary_filter_t; struct binary_filter_version { __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); + __type(key, filter_version_key_t); __array(values, binary_filter_t); } binary_filter_version SEC(".maps"); typedef struct binary_filter_version binary_filter_version_t; -// filter events by the ancestry of the traced process -struct process_tree_map { - __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 10240); - __type(key, u32); - __type(value, eq_t); -} process_tree_map SEC(".maps"); - -typedef struct process_tree_map process_tree_map_t; - -// map of process tree maps -struct process_tree_map_version { - __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); - __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); - __array(values, process_tree_map_t); -} process_tree_map_version SEC(".maps"); - -typedef struct process_tree_map_version process_tree_map_version_t; - // map to persist event configuration data -struct events_map { +struct events_config_map { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, MAX_EVENT_ID); __type(key, u32); __type(value, event_config_t); -} events_map SEC(".maps"); - -typedef struct events_map events_map_t; - -// map of events maps -struct events_map_version { - __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); - __uint(max_entries, MAX_FILTER_VERSION); - __type(key, u16); - __array(values, events_map_t); -} events_map_version SEC(".maps"); +} events_config_map SEC(".maps"); -typedef struct events_map_version events_map_version_t; +typedef struct events_config_map events_config_map_t; // // perf event maps diff --git a/pkg/ebpf/c/tracee.bpf.c b/pkg/ebpf/c/tracee.bpf.c index 8a5d3f93df9f..5bf1ca6f6f29 100644 --- a/pkg/ebpf/c/tracee.bpf.c +++ b/pkg/ebpf/c/tracee.bpf.c @@ -140,18 +140,27 @@ int sys_enter_init(struct bpf_raw_tracepoint_args *ctx) SEC("raw_tracepoint/sys_enter_submit") int sys_enter_submit(struct bpf_raw_tracepoint_args *ctx) { - program_data_t p = {}; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) - return 0; + uint id = ctx->args[1]; + struct task_struct *task = (struct task_struct *) bpf_get_current_task(); + if (is_compat(task)) { + // Translate 32bit syscalls to 64bit syscalls, so we can send to the correct handler + u32 *id_64 = bpf_map_lookup_elem(&sys_32_to_64_map, &id); + if (id_64 == 0) + return 0; - syscall_data_t *sys = &p.task_info->syscall_data; + id = *id_64; + } - if (!reset_event(p.event, sys->id)) + program_data_t p = {}; + + if (!init_program_data(&p, ctx, id)) return 0; if (!evaluate_scope_filters(&p)) goto out; + syscall_data_t *sys = &p.task_info->syscall_data; + if (p.config->options & OPT_TRANSLATE_FD_FILEPATH && has_syscall_fd_arg(sys->id)) { // Process filepath related to fd argument uint fd_num = get_syscall_fd_num_from_arg(sys->id, &sys->args); @@ -265,20 +274,29 @@ int sys_exit_init(struct bpf_raw_tracepoint_args *ctx) SEC("raw_tracepoint/sys_exit_submit") int sys_exit_submit(struct bpf_raw_tracepoint_args *ctx) { - program_data_t p = {}; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) - return 0; + struct pt_regs *regs = (struct pt_regs *) ctx->args[0]; + uint id = get_syscall_id_from_regs(regs); + struct task_struct *task = (struct task_struct *) bpf_get_current_task(); + if (is_compat(task)) { + // Translate 32bit syscalls to 64bit syscalls, so we can send to the correct handler + u32 *id_64 = bpf_map_lookup_elem(&sys_32_to_64_map, &id); + if (id_64 == 0) + return 0; - syscall_data_t *sys = &p.task_info->syscall_data; + id = *id_64; + } - if (!reset_event(p.event, sys->id)) - return 0; + program_data_t p = {}; - long ret = ctx->args[1]; + if (!init_program_data(&p, ctx, id)) + return 0; if (!evaluate_scope_filters(&p)) goto out; + long ret = ctx->args[1]; + syscall_data_t *sys = &p.task_info->syscall_data; + // exec syscalls are different since the pointers are invalid after a successful exec. // we use a special handler (tail called) to only handle failed execs on syscall exit. if (sys->id == SYSCALL_EXECVE || sys->id == SYSCALL_EXECVEAT) @@ -638,8 +656,7 @@ int tracepoint__sched__sched_process_fork(struct bpf_raw_tracepoint_args *ctx) task->context.start_time = child_start_time; // Track thread stack if needed - if (event_is_selected(SUSPICIOUS_SYSCALL_SOURCE, p.event->context.policies_version) || - event_is_selected(STACK_PIVOT, p.event->context.policies_version)) + if (event_is_selected(SUSPICIOUS_SYSCALL_SOURCE) || event_is_selected(STACK_PIVOT)) update_thread_stack(ctx, task, child); // Update the proc_info_map with the new process's info (from parent) @@ -666,27 +683,6 @@ int tracepoint__sched__sched_process_fork(struct bpf_raw_tracepoint_args *ctx) c_proc_info->new_proc = true; // started after tracee (new_pid filter) } - // Update the process tree map (filter related) if the parent has an entry. - - policies_config_t *policies_cfg = &p.event->policies_config; - - if (policies_cfg->proc_tree_filter_enabled) { - u16 version = p.event->context.policies_version; - // Give the compiler a hint about the map type, otherwise libbpf will complain - // about missing type information. i.e.: "can't determine value size for type". - process_tree_map_t *inner_proc_tree_map = &process_tree_map; - - inner_proc_tree_map = bpf_map_lookup_elem(&process_tree_map_version, &version); - if (inner_proc_tree_map != NULL) { - eq_t *tgid_filtered = bpf_map_lookup_elem(inner_proc_tree_map, &parent_pid); - if (tgid_filtered) { - ret = bpf_map_update_elem(inner_proc_tree_map, &child_pid, tgid_filtered, BPF_ANY); - if (ret < 0) - tracee_log(ctx, BPF_LOG_LVL_DEBUG, BPF_LOG_ID_MAP_UPDATE_ELEM, ret); - } - } - } - if (!evaluate_scope_filters(&p)) return 0; @@ -1547,12 +1543,12 @@ int tracepoint__sched__sched_process_exit(struct bpf_raw_tracepoint_args *ctx) (!(task_flags & PF_KTHREAD) && (p.event->context.syscall < 0))) p.event->context.syscall = NO_SYSCALL; - // evaluate matched_policies before removing this pid from the maps + // evaluate active_rules before removing this pid from the maps evaluate_scope_filters(&p); bpf_map_delete_elem(&task_info_map, &p.event->context.task.host_tid); - if (!policies_matched(p.event)) + if (!rules_matched(p.event)) return 0; bool group_dead = false; @@ -1596,22 +1592,6 @@ int tracepoint__sched__sched_process_free(struct bpf_raw_tracepoint_args *ctx) // if tgid task is freed, we know for sure that the process exited // so we can safely remove it from the process map bpf_map_delete_elem(&proc_info_map, &tgid); - - u32 zero = 0; - config_entry_t *cfg = bpf_map_lookup_elem(&config_map, &zero); - if (unlikely(cfg == NULL)) - return 0; - - // remove it only from the current policies version map - u16 version = cfg->policies_version; - - // Give the compiler a hint about the map type, otherwise libbpf will complain - // about missing type information. i.e.: "can't determine value size for type". - process_tree_map_t *inner_proc_tree_map = &process_tree_map; - - inner_proc_tree_map = bpf_map_lookup_elem(&process_tree_map_version, &version); - if (inner_proc_tree_map != NULL) - bpf_map_delete_elem(inner_proc_tree_map, &tgid); } return 0; @@ -2904,7 +2884,7 @@ int BPF_KPROBE(trace_security_socket_accept) struct pt_regs *task_regs = get_current_task_pt_regs(); - if (event_is_selected(SOCKET_ACCEPT, p.event->context.policies_version)) { + if (event_is_selected(SOCKET_ACCEPT)) { args_t args = {}; args.args[0] = (unsigned long) sock; args.args[1] = (unsigned long) new_sock; @@ -3322,7 +3302,7 @@ statfunc int capture_file_write(struct pt_regs *ctx, u32 event_id, bool is_buf) del_args(event_id); program_data_t p = {}; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) + if (!init_program_data(&p, ctx, POLICY_SCOPES)) return 0; if (!evaluate_scope_filters(&p)) @@ -3386,7 +3366,7 @@ statfunc int capture_file_read(struct pt_regs *ctx, u32 event_id, bool is_buf) del_args(event_id); program_data_t p = {}; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) + if (!init_program_data(&p, ctx, POLICY_SCOPES)) return 0; if ((p.config->options & OPT_CAPTURE_FILES_READ) == 0) @@ -3857,7 +3837,7 @@ int BPF_KPROBE(trace_security_file_mprotect) should_extract_code = true; } - if (should_alert && policies_matched(p.event)) + if (should_alert && rules_matched(p.event)) submit_mem_prot_alert_event( &p.event->args_buf, alert, addr, len, reqprot, prev_prot, file_info); @@ -4085,7 +4065,7 @@ int BPF_KPROBE(trace_security_bpf_prog) bpf_map_delete_elem(&bpf_attach_tmp_map, &p.event->context.task.host_tid); - if (event_is_selected(BPF_ATTACH, p.event->context.policies_version)) + if (event_is_selected(BPF_ATTACH)) bpf_map_update_elem(&bpf_attach_map, &prog_id, &val, BPF_ANY); if (!evaluate_scope_filters(&p)) @@ -4269,8 +4249,8 @@ int BPF_KPROBE(trace_security_kernel_post_read_file) loff_t size = (loff_t) PT_REGS_PARM3(ctx); enum kernel_read_file_id type_id = (enum kernel_read_file_id) PT_REGS_PARM4(ctx); - // Send event if chosen - if (policies_matched(p.event)) { + // Send event if rules matched + if (rules_matched(p.event)) { void *file_path = get_path_str(&file->f_path); save_str_to_buf(&p.event->args_buf, file_path, 0); save_to_submit_buf(&p.event->args_buf, &size, sizeof(loff_t), 1); @@ -4530,7 +4510,7 @@ int tracepoint__module__module_load(struct bpf_raw_tracepoint_args *ctx) struct module *mod = (struct module *) ctx->args[0]; - if (event_is_selected(HIDDEN_KERNEL_MODULE_SEEKER, p.event->context.policies_version)) { + if (event_is_selected(HIDDEN_KERNEL_MODULE_SEEKER)) { u64 insert_time = get_current_time_in_ns(); kernel_new_mod_t new_mod = {.insert_time = insert_time}; u64 mod_addr = (u64) mod; @@ -4578,7 +4558,7 @@ int tracepoint__module__module_free(struct bpf_raw_tracepoint_args *ctx) struct module *mod = (struct module *) ctx->args[0]; - if (event_is_selected(HIDDEN_KERNEL_MODULE_SEEKER, p.event->context.policies_version)) { + if (event_is_selected(HIDDEN_KERNEL_MODULE_SEEKER)) { u64 mod_addr = (u64) mod; // We must delete before the actual deletion from modules list occurs, otherwise there's a // risk of race condition @@ -5937,8 +5917,8 @@ statfunc u64 sizeof_net_event_context_t(void) statfunc void set_net_task_context(event_data_t *event, net_task_context_t *netctx) { netctx->task = event->task; - netctx->policies_version = event->context.policies_version; - netctx->matched_policies = event->context.matched_policies; + netctx->rules_version = event->context.rules_version; // rules_version is meaningless here since we don't know the event yet + netctx->active_rules = event->context.active_rules; // same here netctx->syscall = event->context.syscall; __builtin_memset(&netctx->taskctx, 0, sizeof(task_context_t)); __builtin_memcpy(&netctx->taskctx, &event->context.task, sizeof(task_context_t)); @@ -5976,25 +5956,20 @@ statfunc enum event_id_e net_packet_to_net_event(net_packet_t packet_type) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Waddress-of-packed-member" -// Return if a network event should to be sumitted: if any of the policies -// matched, submit the network event. This means that if any of the policies +// Return if a network event should to be sumitted: if any of the rules +// matched, submit the network event. This means that if any of the rules // need a network event, kernel can submit the network base event and let -// userland deal with it (derived events will match the appropriate policies). +// userland deal with it (derived events will match the appropriate rules). statfunc u64 should_submit_net_event(net_event_context_t *neteventctx, net_packet_t packet_type) { enum event_id_e evt_id = net_packet_to_net_event(packet_type); - u16 version = neteventctx->eventctx.policies_version; - void *inner_events_map = bpf_map_lookup_elem(&events_map_version, &version); - if (inner_events_map == NULL) - return 0; - - event_config_t *evt_config = bpf_map_lookup_elem(inner_events_map, &evt_id); + event_config_t *evt_config = bpf_map_lookup_elem(&events_config_map, &evt_id); if (evt_config == NULL) return 0; - return evt_config->submit_for_policies & neteventctx->eventctx.matched_policies; + return evt_config->submit_for_rules & neteventctx->eventctx.active_rules; } #pragma clang diagnostic pop // -Waddress-of-packed-member @@ -6013,13 +5988,13 @@ statfunc bool should_submit_flow_event(net_event_context_t *neteventctx) u32 evt_id = NET_FLOW_BASE; - // Again, if any policy matched, submit the flow base event so other flow - // events can be derived in userland and their policies matched in userland. - event_config_t *evt_config = bpf_map_lookup_elem(&events_map, &evt_id); + // Again, if any rule matched, submit the flow base event so other flow + // events can be derived in userland and their rules matched in userland. + event_config_t *evt_config = bpf_map_lookup_elem(&events_config_map, &evt_id); if (evt_config == NULL) return 0; - u64 should = evt_config->submit_for_policies & neteventctx->eventctx.matched_policies; + u64 should = evt_config->submit_for_rules & neteventctx->eventctx.active_rules; // Cache the result so next time we don't need to check again. if (should) @@ -6118,7 +6093,7 @@ statfunc u32 cgroup_skb_submit(void *map, struct __sk_buff *ctx, // Check if a flag is set in the retval. #define retval_hasflag(flag) (neteventctx->eventctx.retval & flag) == flag -// Keep track of a flow event if they are enabled and if any policy matched. +// Keep track of a flow event if they are enabled and if any rule matched. // Submit the flow base event so userland can derive the flow events. statfunc u32 cgroup_skb_submit_flow(struct __sk_buff *ctx, net_event_context_t *neteventctx, @@ -6297,7 +6272,7 @@ int BPF_KRETPROBE(trace_ret_sock_alloc_file) // runs every time a socket is created (return) program_data_t p = {}; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) + if (!init_program_data(&p, ctx, POLICY_SCOPES)) return 0; if (!evaluate_scope_filters(&p)) @@ -6442,7 +6417,7 @@ int BPF_KPROBE(trace_security_socket_recvmsg) return 0; program_data_t p = {}; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) + if (!init_program_data(&p, ctx, POLICY_SCOPES)) return 0; if (!evaluate_scope_filters(&p)) @@ -6471,7 +6446,7 @@ int BPF_KPROBE(trace_security_socket_sendmsg) return 0; program_data_t p = {}; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) + if (!init_program_data(&p, ctx, POLICY_SCOPES)) return 0; if (!evaluate_scope_filters(&p)) @@ -6539,7 +6514,7 @@ int BPF_KPROBE(cgroup_bpf_run_filter_skb) program_data_t p = {}; p.scratch_idx = 1; p.event = e; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) + if (!init_program_data(&p, ctx, POLICY_SCOPES)) return 0; bool mightbecloned = false; // cloned sock structs come from accept() @@ -6608,8 +6583,8 @@ int BPF_KPROBE(cgroup_bpf_run_filter_skb) eventctx->eventid = NET_PACKET_IP; // will be changed in skb program eventctx->stack_id = 0; // no stack trace eventctx->processor_id = p.event->context.processor_id; // copy from current ctx - eventctx->policies_version = netctx->policies_version; // pick policies_version from net ctx - eventctx->matched_policies = netctx->matched_policies; // pick matched_policies from net ctx + eventctx->rules_version = netctx->rules_version; // pick rules_version from net ctx + eventctx->active_rules = netctx->active_rules; // pick active_rules from net ctx eventctx->syscall = NO_SYSCALL; // ingress has no orig syscall if (type == BPF_CGROUP_INET_EGRESS) eventctx->syscall = netctx->syscall; // egress does have an orig syscall @@ -7594,16 +7569,11 @@ int tracepoint__exec_test(struct bpf_raw_tracepoint_args *ctx) // Submit all test events int ret = 0; program_data_t p = {}; - if (!init_program_data(&p, ctx, NO_EVENT_SUBMIT)) + if (!init_program_data(&p, ctx, EXEC_TEST)) return 0; - if (!evaluate_scope_filters(&p)) - return 0; - - if (reset_event(p.event, EXEC_TEST)) { - if (evaluate_scope_filters(&p)) - ret |= events_perf_submit(&p, 0); - } + if (evaluate_scope_filters(&p)) + ret |= events_perf_submit(&p, 0); if (reset_event(p.event, TEST_MISSING_KSYMBOLS)) { if (evaluate_scope_filters(&p)) diff --git a/pkg/ebpf/c/types.h b/pkg/ebpf/c/types.h index aad9729a93e3..2b6e496d5d3b 100644 --- a/pkg/ebpf/c/types.h +++ b/pkg/ebpf/c/types.h @@ -34,8 +34,8 @@ typedef struct event_context { s64 retval; u32 stack_id; u16 processor_id; // ID of the processor that processed the event - u16 policies_version; - u64 matched_policies; + u16 rules_version; + u64 active_rules; } event_context_t; #define EVENT_ID_LIST_NET \ @@ -138,6 +138,7 @@ typedef struct event_context { X(OPEN_FILE_MOUNT, ) \ X(SECURITY_SB_UMOUNT, ) \ X(SECURITY_TASK_PRCTL, ) \ + X(POLICY_SCOPES, ) \ // ... #define EVENT_ID_LIST_LAST \ @@ -348,21 +349,21 @@ typedef struct ksym_name { char str[MAX_KSYM_NAME_SIZE]; } ksym_name_t; -typedef struct policy_key { +typedef struct filter_version_key { u16 version; u16 __pad; u32 event_id; -} policy_key_t; +} filter_version_key_t; typedef struct equality { - // bitmap indicating which policies have a filter that uses the '=' operator (0 means '!=') - u64 equals_in_policies; - // bitmap indicating which policies have a filter that utilize the provided key - u64 key_used_in_policies; + // bitmap indicating which rules have a filter that uses the '=' operator (0 means '!=') + u64 equals_in_rules; + // bitmap indicating which rules have a filter that utilize the provided key + u64 key_used_in_rules; } eq_t; -typedef struct policies_config { - // bitmap indicating which policies have the filter enabled +typedef struct scope_filters_config { + // bitmap indicating which rules have the filter enabled u64 uid_filter_enabled; u64 pid_filter_enabled; u64 mnt_ns_filter_enabled; @@ -373,9 +374,7 @@ typedef struct policies_config { u64 cont_filter_enabled; u64 new_cont_filter_enabled; u64 new_pid_filter_enabled; - u64 proc_tree_filter_enabled; u64 bin_path_filter_enabled; - u64 follow_filter_enabled; // bitmap indicating whether to match a rule if the key is missing from its filter map u64 uid_filter_match_if_key_missing; u64 pid_filter_match_if_key_missing; @@ -387,25 +386,13 @@ typedef struct policies_config { u64 cont_filter_match_if_key_missing; u64 new_cont_filter_match_if_key_missing; u64 new_pid_filter_match_if_key_missing; - u64 proc_tree_filter_match_if_key_missing; u64 bin_path_filter_match_if_key_missing; - // bitmap with policies that have at least one filter enabled - u64 enabled_policies; - - // global min max - u64 uid_max; - u64 uid_min; - u64 pid_max; - u64 pid_min; -} policies_config_t; +} scope_filters_config_t; typedef struct config_entry { u32 tracee_pid; u32 options; u32 cgroup_v1_hid; - u16 padding; // free for further use - u16 policies_version; - policies_config_t policies_config; } config_entry_t; typedef struct string_filter_config { @@ -423,8 +410,12 @@ typedef struct data_filter_config { } data_filter_config_t; typedef struct event_config { - u64 submit_for_policies; + u16 rules_version; + u8 has_overflow; + u8 _padding[5]; + u64 submit_for_rules; u64 field_types; + scope_filters_config_t scope_filters; data_filter_config_t data_filter; } event_config_t; @@ -454,7 +445,6 @@ typedef struct event_data { args_buffer_t args_buf; struct task_struct *task; event_config_t config; - policies_config_t policies_config; } event_data_t; // A control plane signal - sent to indicate some critical event which should be processed diff --git a/pkg/ebpf/config.go b/pkg/ebpf/config.go index b423b330c166..6889bd1b1aef 100644 --- a/pkg/ebpf/config.go +++ b/pkg/ebpf/config.go @@ -6,7 +6,6 @@ import ( bpf "github.com/aquasecurity/libbpfgo" "github.com/aquasecurity/tracee/pkg/errfmt" - "github.com/aquasecurity/tracee/pkg/policy" ) const ( @@ -18,12 +17,9 @@ const ( // Order of fields is important, as it is used as a value for // the ConfigMap BPF map. type Config struct { - TraceePid uint32 - Options uint32 - CgroupV1Hid uint32 - _ uint16 // padding free for further use - PoliciesVersion uint16 - PoliciesConfig policy.PoliciesConfig + TraceePid uint32 + Options uint32 + CgroupV1Hid uint32 } // UpdateBPF updates the ConfigMap BPF map with the current config. diff --git a/pkg/ebpf/event_parameters.go b/pkg/ebpf/event_parameters.go index d8ab21a546e7..a565e2de8bcb 100644 --- a/pkg/ebpf/event_parameters.go +++ b/pkg/ebpf/event_parameters.go @@ -38,15 +38,12 @@ func (t *Tracee) handleEventParameters() error { // confusion as it abuses the filter system. // TODO: in the future, a dedicated event parameter system should be added. eventParams := make([]map[string]filters.Filter[*filters.StringFilter], 0) - for iterator := t.policyManager.CreateAllIterator(); iterator.HasNext(); { - policy := iterator.Next() - if rule, ok := policy.Rules[eventID]; ok { - policyParams := rule.DataFilter.GetFieldFilters() - if len(policyParams) == 0 { - continue - } - eventParams = append(eventParams, policyParams) + for _, rule := range t.policyManager.GetRules(eventID) { + policyParams := rule.Data.DataFilter.GetFieldFilters() + if len(policyParams) == 0 { + continue } + eventParams = append(eventParams, policyParams) } if len(eventParams) == 0 { // No parameters for this event diff --git a/pkg/ebpf/events_pipeline.go b/pkg/ebpf/events_pipeline.go index eb1edd06402e..43df267e5aa6 100644 --- a/pkg/ebpf/events_pipeline.go +++ b/pkg/ebpf/events_pipeline.go @@ -13,6 +13,7 @@ import ( "github.com/aquasecurity/tracee/pkg/errfmt" "github.com/aquasecurity/tracee/pkg/events" "github.com/aquasecurity/tracee/pkg/logger" + "github.com/aquasecurity/tracee/pkg/policy" "github.com/aquasecurity/tracee/pkg/signatures/engine" traceetime "github.com/aquasecurity/tracee/pkg/time" "github.com/aquasecurity/tracee/pkg/utils" @@ -296,9 +297,14 @@ func (t *Tracee) decodeEvents(ctx context.Context, sourceChan chan []byte) (<-ch } evt.EventID = int(eCtx.EventID) evt.EventName = evtName - evt.PoliciesVersion = eCtx.PoliciesVersion - evt.MatchedPoliciesKernel = eCtx.MatchedPolicies - evt.MatchedPoliciesUser = 0 + evt.RulesVersion = eCtx.RulesVersion + // Convert eBPF single bitmap to bitmap array + if eCtx.MatchedRules != 0 { + evt.MatchedRulesKernel = []uint64{eCtx.MatchedRules} + } else { + evt.MatchedRulesKernel = []uint64{} + } + evt.MatchedRulesUser = []uint64{} evt.MatchedPolicies = []string{} evt.ArgsNum = int(argnum) evt.ReturnValue = int(eCtx.Retval) @@ -318,20 +324,12 @@ func (t *Tracee) decodeEvents(ctx context.Context, sourceChan chan []byte) (<-ch } evt.ParentEntityId = utils.HashTaskID(eCtx.HostPpid, traceetime.BootToEpochNS(eCtx.ParentStartTime)) - // If there aren't any policies that need filtering in userland, tracee **may** skip - // this event, as long as there aren't any derivatives or signatures that depend on it. - // Some base events (derivative and signatures) might not have set related policy bit, - // thus the need to continue with those within the pipeline. - if t.matchPolicies(evt) == 0 { - _, hasDerivation := t.eventDerivations[eventId] - reqBySig := t.policyManager.IsRequiredBySignature(eventId) - - if !hasDerivation && !reqBySig { - _ = t.stats.EventsFiltered.Increment() - t.eventsPool.Put(evt) - decoderPool.Put(ebpfMsgDecoder) - continue - } + // TODO(unrelated): move this to process stage (why did it moved here in the first place?) + if !t.matchRules(evt) { + _ = t.stats.EventsFiltered.Increment() + t.eventsPool.Put(evt) + decoderPool.Put(ebpfMsgDecoder) + continue } select { @@ -348,42 +346,30 @@ func (t *Tracee) decodeEvents(ctx context.Context, sourceChan chan []byte) (<-ch return out, errc } -// matchPolicies does the userland filtering (policy matching) for events. It iterates through all -// existing policies, that were set by the kernel in the event bitmap. Some of those policies might -// not match the event after userland filters are applied. In those cases, the policy bit is cleared -// (so the event is "filtered" for that policy). This may be called in different stages of the +// matchRules does the userland filtering (rule matching) for events. It iterates through all +// existing rules, that were set by the kernel in the event bitmap. Some of those rules might +// not match the event after userland filters are applied. In those cases, the rule bit is cleared +// (so the event is "filtered" for that rule). This may be called in different stages of the // pipeline (decode, derive, engine). -func (t *Tracee) matchPolicies(event *trace.Event) uint64 { +func (t *Tracee) matchRules(event *trace.Event) bool { eventID := events.ID(event.EventID) - bitmap := event.MatchedPoliciesKernel - // Short circuit if there are no policies in userland that need filtering. - if !t.policyManager.FilterableInUserland() { - event.MatchedPoliciesUser = bitmap // store untouched bitmap to be used in sink stage - return bitmap - } + // match scope filters for overflow rules that were not matched by the bpf code + t.matchOverflowRules(event) + + // Create a copy of the kernel matched rules bitmap array to work with + bitmap := make([]uint64, len(event.MatchedRulesKernel)) + copy(bitmap, event.MatchedRulesKernel) // Cache frequently accessed event fields eventUID := uint32(event.UserID) eventPID := uint32(event.HostProcessID) eventRetVal := int64(event.ReturnValue) - // range through each userland filterable policy - for it := t.policyManager.CreateUserlandIterator(); it.HasNext(); { - p := it.Next() - // Policy ID is the bit offset in the bitmap. - bitOffset := uint(p.ID) - - if !utils.HasBit(bitmap, bitOffset) { // event does not match this policy - continue - } - - // The event might have this policy bit set, but the policy might not have this - // event ID. This happens whenever the event submitted by the kernel is going to - // derive an event that this policy is interested in. In this case, don't do - // anything and let the derivation stage handle this event. - rule, ok := p.Rules[eventID] - if !ok { + // range through each userland filterable rule + for _, rule := range t.policyManager.GetUserlandRules(eventID) { + // Use proper bit index and bit offset for rules with ID > 64 + if !utils.HasBitInArray(bitmap, rule.ID) { // event does not match this rule continue } @@ -392,29 +378,33 @@ func (t *Tracee) matchPolicies(event *trace.Event) uint64 { // // 1. UID/PID range checks (very fast) - if p.UIDFilter.Enabled() { - if !p.UIDFilter.InMinMaxRange(eventUID) { - utils.ClearBit(&bitmap, bitOffset) + if rule.Policy.UIDFilter.Enabled() { + if !rule.Policy.UIDFilter.InMinMaxRange(eventUID) { + utils.ClearBitInArray(&bitmap, rule.ID) continue } } - if p.PIDFilter.Enabled() { - if !p.PIDFilter.InMinMaxRange(eventPID) { - utils.ClearBit(&bitmap, bitOffset) + if rule.Policy.PIDFilter.Enabled() { + // + // The same happens for the global PID min/max range. Clear the rule bit if + // the event PID is not in THIS rule PID min/max range. + // + if !rule.Policy.PIDFilter.InMinMaxRange(eventPID) { + utils.ClearBitInArray(&bitmap, rule.ID) continue } } // 2. event return value filters (fast) - if !rule.RetFilter.Filter(eventRetVal) { - utils.ClearBit(&bitmap, bitOffset) + if !rule.Data.RetFilter.Filter(eventRetVal) { + utils.ClearBitInArray(&bitmap, rule.ID) continue } // 3. event scope filters (medium cost) - if !rule.ScopeFilter.Filter(*event) { - utils.ClearBit(&bitmap, bitOffset) + if !rule.Data.ScopeFilter.Filter(*event) { + utils.ClearBitInArray(&bitmap, rule.ID) continue } @@ -424,20 +414,198 @@ func (t *Tracee) matchPolicies(event *trace.Event) uint64 { // events.PrintMemDump bypass was added due to issue #2546 // because it uses usermode applied filters as parameters for the event, // which occurs after filtering - if eventID != events.PrintMemDump && !rule.DataFilter.Filter(event.Args) { - utils.ClearBit(&bitmap, bitOffset) + if eventID != events.PrintMemDump && !rule.Data.DataFilter.Filter(event.Args) { + utils.ClearBitInArray(&bitmap, rule.ID) continue } - // Early exit optimization: if bitmap becomes 0, no need to continue - if bitmap == 0 { + // Early exit optimization: if bitmap array becomes empty, no need to continue + if utils.IsBitmapArrayEmpty(bitmap) { break } } - event.MatchedPoliciesUser = bitmap // store filtered bitmap to be used in sink stage + event.MatchedRulesUser = bitmap // store filtered bitmap to be used in sink stage + + return !utils.IsBitmapArrayEmpty(bitmap) +} + +// matchOverflowRules applies scope filters for overflow rules using the same logic as eBPF filtering +func (t *Tracee) matchOverflowRules(event *trace.Event) { + // Skip if event doesn't have overflow + if !t.policyManager.HasOverflowRules(events.ID(event.EventID)) { + return + } + + // Create filter version key + vKey := policy.FilterVersionKey{ + Version: event.RulesVersion, + EventID: uint32(event.EventID), + } + + // Get filter maps + fMaps := t.policyManager.GetFilterMaps() + if fMaps == nil { + return + } + + // Get extended scope filter configs to check which filters are enabled for overflow rules + extendedConfig, ok := fMaps.ExtendedScopeFilterConfigs[events.ID(event.EventID)] + if !ok { + return + } + + // Following eBPF logic: start with all overflow rules enabled (~0ULL equivalent) + // We work only on overflow bitmaps (index 1 and above) + overflowStartIndex := 1 + maxBitmapIndex := len(event.MatchedRulesKernel) + + // Determine how many overflow bitmaps we need to work with + for _, enabledBitmaps := range [][]uint64{ + extendedConfig.CommFilterEnabled, + extendedConfig.UIDFilterEnabled, + extendedConfig.PIDFilterEnabled, + extendedConfig.MntNsFilterEnabled, + extendedConfig.PidNsFilterEnabled, + extendedConfig.UtsNsFilterEnabled, + extendedConfig.CgroupIdFilterEnabled, + extendedConfig.ContFilterEnabled, + } { + if len(enabledBitmaps) > maxBitmapIndex { + maxBitmapIndex = len(enabledBitmaps) + } + } + + // Ensure MatchedRulesKernel has enough space for overflow rules + for len(event.MatchedRulesKernel) < maxBitmapIndex { + event.MatchedRulesKernel = append(event.MatchedRulesKernel, 0) + } + + // Initialize overflow bitmaps to all rules enabled (equivalent to res = ~0ULL in eBPF) + for i := overflowStartIndex; i < maxBitmapIndex; i++ { + event.MatchedRulesKernel[i] = ^uint64(0) // All bits set + } - return bitmap + // Apply each scope filter using the same logic as eBPF: res &= equality_filter_matches(...) | mask + t.applyOverflowScopeFilter(event, fMaps.CommFilters[vKey], event.ProcessName, extendedConfig.CommFilterEnabled, extendedConfig.CommFilterMatchIfKeyMissing) + t.applyOverflowScopeFilter(event, fMaps.UIDFilters[vKey], uint64(event.UserID), extendedConfig.UIDFilterEnabled, extendedConfig.UIDFilterMatchIfKeyMissing) + t.applyOverflowScopeFilter(event, fMaps.PIDFilters[vKey], uint64(event.HostProcessID), extendedConfig.PIDFilterEnabled, extendedConfig.PIDFilterMatchIfKeyMissing) + t.applyOverflowScopeFilter(event, fMaps.MntNsFilters[vKey], uint64(event.MountNS), extendedConfig.MntNsFilterEnabled, extendedConfig.MntNsFilterMatchIfKeyMissing) + t.applyOverflowScopeFilter(event, fMaps.PidNsFilters[vKey], uint64(event.PIDNS), extendedConfig.PidNsFilterEnabled, extendedConfig.PidNsFilterMatchIfKeyMissing) + t.applyOverflowScopeFilter(event, fMaps.UTSFilters[vKey], event.HostName, extendedConfig.UtsNsFilterEnabled, extendedConfig.UtsNsFilterMatchIfKeyMissing) + t.applyOverflowScopeFilter(event, fMaps.CgroupFilters[vKey], uint64(event.CgroupID), extendedConfig.CgroupIdFilterEnabled, extendedConfig.CgroupIdFilterMatchIfKeyMissing) + + // Container filter: only apply if ContainerID is not empty + if event.ContainerID != "" { + t.applyOverflowScopeFilter(event, fMaps.ContainerFilters[vKey], event.ContainerID, extendedConfig.ContFilterEnabled, extendedConfig.ContFilterMatchIfKeyMissing) + } else { + // If no ContainerID, apply the mask logic for missing key behavior + t.applyOverflowScopeFilterMissingKey(event, extendedConfig.ContFilterEnabled, extendedConfig.ContFilterMatchIfKeyMissing) + } +} + +// applyOverflowScopeFilter implements the same logic as equality_filter_matches in eBPF +// res &= equality_filter_matches(match_if_key_missing, filter_map, &key) | mask +func (t *Tracee) applyOverflowScopeFilter(event *trace.Event, filterMap interface{}, key interface{}, filterEnabled []uint64, matchIfKeyMissing []uint64) { + overflowStartIndex := 1 + + for i := overflowStartIndex; i < len(event.MatchedRulesKernel); i++ { + // Get the mask for rules that don't have this filter enabled (equivalent to ~filter_enabled in eBPF) + mask := ^uint64(0) // Default: all rules pass if filter not enabled + if i < len(filterEnabled) { + mask = ^filterEnabled[i] + } + + // Get match_if_key_missing bitmap for this overflow bitmap + var matchIfMissing uint64 + if i < len(matchIfKeyMissing) { + matchIfMissing = matchIfKeyMissing[i] + } + + // Implement equality_filter_matches logic + equalsInRules := t.getEqualsInRulesForOverflow(filterMap, key, i) + keyUsedInRules := t.getKeyUsedInRulesForOverflow(filterMap, key, i) + + // eBPF logic: equals_in_rules | (match_if_key_missing & ~key_used_in_rules) + filterMatches := equalsInRules | (matchIfMissing & ^keyUsedInRules) + + // Apply filter: res &= equality_filter_matches(...) | mask + event.MatchedRulesKernel[i] &= filterMatches | mask + } +} + +// applyOverflowScopeFilterMissingKey handles the case when a key is missing (e.g., empty ContainerID) +func (t *Tracee) applyOverflowScopeFilterMissingKey(event *trace.Event, filterEnabled []uint64, matchIfKeyMissing []uint64) { + overflowStartIndex := 1 + + for i := overflowStartIndex; i < len(event.MatchedRulesKernel); i++ { + // Get the mask for rules that don't have this filter enabled + mask := ^uint64(0) + if i < len(filterEnabled) { + mask = ^filterEnabled[i] + } + + // Get match_if_key_missing bitmap + var matchIfMissing uint64 + if i < len(matchIfKeyMissing) { + matchIfMissing = matchIfKeyMissing[i] + } + + // When key is missing: equals_in_rules = 0, key_used_in_rules = 0 + // So result is: 0 | (match_if_key_missing & ~0) = match_if_key_missing + filterMatches := matchIfMissing + + // Apply filter: res &= filterMatches | mask + event.MatchedRulesKernel[i] &= filterMatches | mask + } +} + +// getEqualsInRulesForOverflow extracts the equals_in_rules bitmap for overflow rules from filter maps +func (t *Tracee) getEqualsInRulesForOverflow(filterMap interface{}, key interface{}, bitmapIndex int) uint64 { + if filterMap == nil { + return 0 + } + + switch fm := filterMap.(type) { + case map[uint64][]policy.RuleBitmap: + if uint64Key, ok := key.(uint64); ok { + if bitmaps, exists := fm[uint64Key]; exists && bitmapIndex < len(bitmaps) { + return bitmaps[bitmapIndex].EqualsInRules + } + } + case map[string][]policy.RuleBitmap: + if stringKey, ok := key.(string); ok { + if bitmaps, exists := fm[stringKey]; exists && bitmapIndex < len(bitmaps) { + return bitmaps[bitmapIndex].EqualsInRules + } + } + } + + return 0 +} + +// getKeyUsedInRulesForOverflow extracts the key_used_in_rules bitmap for overflow rules from filter maps +func (t *Tracee) getKeyUsedInRulesForOverflow(filterMap interface{}, key interface{}, bitmapIndex int) uint64 { + if filterMap == nil { + return 0 + } + + switch fm := filterMap.(type) { + case map[uint64][]policy.RuleBitmap: + if uint64Key, ok := key.(uint64); ok { + if bitmaps, exists := fm[uint64Key]; exists && bitmapIndex < len(bitmaps) { + return bitmaps[bitmapIndex].KeyUsedInRules + } + } + case map[string][]policy.RuleBitmap: + if stringKey, ok := key.(string); ok { + if bitmaps, exists := fm[stringKey]; exists && bitmapIndex < len(bitmaps) { + return bitmaps[bitmapIndex].KeyUsedInRules + } + } + } + + return 0 } func parseContextFlags(containerId string, flags uint32) trace.ContextFlags { @@ -457,7 +625,7 @@ func parseContextFlags(containerId string, flags uint32) trace.ContextFlags { // processEvents is the event processing pipeline stage. For each received event, it goes // through all event processors and check if there is any internal processing needed for -// that event type. It also clears policy bits for out-of-order container related events +// that event type. It also clears rule bits for out-of-order container related events // (after the processing logic). This stage also starts some logic that will be used by // the processing logic in subsequent events. func (t *Tracee) processEvents(ctx context.Context, in <-chan *trace.Event) ( @@ -488,11 +656,12 @@ func (t *Tracee) processEvents(ctx context.Context, in <-chan *trace.Event) ( continue } - // Get a bitmap with all policies containing container filters - policiesWithContainerFilter := t.policyManager.WithContainerFilterEnabled() + // Get a bitmap with all rules containing container filters + eventId := events.ID(event.EventID) + containerFilteredRules := t.policyManager.GetContainerFilteredRulesBitmap(eventId) - // Filter out events that don't have a container ID from all the policies that - // have container filters. This will guarantee that any of those policies + // Filter out events that don't have a container ID from all the rules that + // have container filters. This will guarantee that any of those rules // won't get matched by this event. This situation might happen if the events // from a recently created container appear BEFORE the initial cgroup_mkdir of // that container root directory. This could be solved by sorting the events @@ -500,9 +669,7 @@ func (t *Tracee) processEvents(ctx context.Context, in <-chan *trace.Event) ( // enabled, so, in those cases, ignore the event IF the event is not a // cgroup_mkdir or cgroup_rmdir. - if policiesWithContainerFilter > 0 && event.Container.ID == "" { - eventId := events.ID(event.EventID) - + if len(containerFilteredRules) > 0 && containerFilteredRules[0] > 0 && event.Container.ID == "" { // never skip cgroup_{mkdir,rmdir}: container_{create,remove} events need it if eventId == events.CgroupMkdir || eventId == events.CgroupRmdir { goto sendEvent @@ -511,11 +678,15 @@ func (t *Tracee) processEvents(ctx context.Context, in <-chan *trace.Event) ( logger.Debugw("False container positive", "event.Timestamp", event.Timestamp, "eventId", eventId) - // remove event from the policies with container filters - utils.ClearBits(&event.MatchedPoliciesKernel, policiesWithContainerFilter) - utils.ClearBits(&event.MatchedPoliciesUser, policiesWithContainerFilter) + // remove event from rules with container filters + if len(event.MatchedRulesKernel) > 0 { + utils.ClearBits(&event.MatchedRulesKernel[0], containerFilteredRules[0]) + } + if len(event.MatchedRulesUser) > 0 { + utils.ClearBits(&event.MatchedRulesUser[0], containerFilteredRules[0]) + } - if event.MatchedPoliciesKernel == 0 { + if utils.IsBitmapArrayEmpty(event.MatchedRulesKernel) { t.eventsPool.Put(event) continue } @@ -559,6 +730,10 @@ func (t *Tracee) deriveEvents(ctx context.Context, in <-chan *trace.Event) ( // Send original event down the pipeline out <- event + // Capture base event info for derived event processing + baseEventID := event.EventID + baseEventMatchedRules := event.MatchedRulesUser + for _, err := range errors { t.handleError(err) } @@ -573,6 +748,15 @@ func (t *Tracee) deriveEvents(ctx context.Context, in <-chan *trace.Event) ( // Let's keep an eye on that moving from experimental for these and similar cases in tracee. event := &derivatives[i] + // Get matched rules for derived event based on base event matches + event.MatchedRulesUser = t.policyManager.GetDerivedEventMatchedRules( + events.ID(event.EventID), // derived event ID + events.ID(baseEventID), // base event ID + baseEventMatchedRules, // base event matched rules bitmap + ) + // We need to update the kernel matched rules since it is used in matchedRules function + event.MatchedRulesKernel = event.MatchedRulesUser + // Skip events that dont work with filtering due to missing types // being handled (https://github.com/aquasecurity/tracee/issues/2486) switch events.ID(derivatives[i].EventID) { @@ -581,7 +765,7 @@ func (t *Tracee) deriveEvents(ctx context.Context, in <-chan *trace.Event) ( case events.PrintMemDump: default: // Derived events might need filtering as well - if t.matchPolicies(event) == 0 { + if !t.matchRules(event) { _ = t.stats.EventsFiltered.Increment() continue } @@ -614,24 +798,20 @@ func (t *Tracee) sinkEvents(ctx context.Context, in <-chan *trace.Event) <-chan continue // might happen during initialization (ctrl+c seg faults) } - // Is the event enabled for the policies or globally? - if !t.policyManager.IsEnabled(event.MatchedPoliciesUser, events.ID(event.EventID)) { + if !t.policyManager.IsEventEnabled(events.ID(event.EventID)) { // TODO: create metrics from dropped events t.eventsPool.Put(event) continue } - // Only emit events requested by the user and matched by at least one policy. + // Only emit events requested by the user and matched by at least one rule. id := events.ID(event.EventID) - event.MatchedPoliciesUser = t.policyManager.MatchEvent(id, event.MatchedPoliciesUser) - if event.MatchedPoliciesUser == 0 { + event.MatchedPolicies = t.policyManager.GetMatchedRulesInfo(id, event.MatchedRulesUser) + if len(event.MatchedPolicies) == 0 { t.eventsPool.Put(event) continue } - // Populate the event with the names of the matched policies. - event.MatchedPolicies = t.policyManager.MatchedNames(event.MatchedPoliciesUser) - // Parse arguments for output formatting if enabled. if t.config.Output.ParseArguments { err := t.parseArguments(event) diff --git a/pkg/ebpf/events_pipeline_bench_test.go b/pkg/ebpf/events_pipeline_bench_test.go index 4cdf005ea8c9..ab66e1ab0b11 100644 --- a/pkg/ebpf/events_pipeline_bench_test.go +++ b/pkg/ebpf/events_pipeline_bench_test.go @@ -94,9 +94,9 @@ func BenchmarkGetEventFromPool(b *testing.B) { evt.Kubernetes = kubernetesData evt.EventID = int(ctx.EventID) evt.EventName = eventDefinition.GetName() - evt.PoliciesVersion = ctx.PoliciesVersion - evt.MatchedPoliciesKernel = ctx.MatchedPolicies - evt.MatchedPoliciesUser = 0 + evt.RulesVersion = ctx.RulesVersion + evt.MatchedRulesKernel = []uint64{ctx.MatchedRules} + evt.MatchedRulesUser = []uint64{0} evt.MatchedPolicies = []string{} evt.ArgsNum = int(argnum) evt.ReturnValue = int(ctx.Retval) @@ -234,37 +234,37 @@ func BenchmarkNewEventObject(b *testing.B) { ctx := <-decodeChan evt := trace.Event{ - Timestamp: int(ctx.Ts), - ThreadStartTime: int(ctx.StartTime), - ProcessorID: int(ctx.ProcessorId), - ProcessID: int(ctx.Pid), - ThreadID: int(ctx.Tid), - ParentProcessID: int(ctx.Ppid), - HostProcessID: int(ctx.HostPid), - HostThreadID: int(ctx.HostTid), - HostParentProcessID: int(ctx.HostPpid), - UserID: int(ctx.Uid), - MountNS: int(ctx.MntID), - PIDNS: int(ctx.PidID), - ProcessName: string(utils.TrimTrailingNUL(ctx.Comm[:])), - HostName: string(utils.TrimTrailingNUL(ctx.UtsName[:])), - CgroupID: uint(ctx.CgroupID), - ContainerID: containerData.ID, - Container: containerData, - Kubernetes: kubernetesData, - EventID: int(ctx.EventID), - EventName: eventDefinition.GetName(), - PoliciesVersion: ctx.PoliciesVersion, - MatchedPoliciesKernel: ctx.MatchedPolicies, - ArgsNum: int(argnum), - ReturnValue: int(ctx.Retval), - Args: args, - StackAddresses: stackAddresses, - ContextFlags: flags, - Syscall: syscall, - ThreadEntityId: utils.HashTaskID(ctx.HostTid, ctx.StartTime), - ProcessEntityId: utils.HashTaskID(ctx.HostPid, ctx.LeaderStartTime), - ParentEntityId: utils.HashTaskID(ctx.HostPpid, ctx.ParentStartTime), + Timestamp: int(ctx.Ts), + ThreadStartTime: int(ctx.StartTime), + ProcessorID: int(ctx.ProcessorId), + ProcessID: int(ctx.Pid), + ThreadID: int(ctx.Tid), + ParentProcessID: int(ctx.Ppid), + HostProcessID: int(ctx.HostPid), + HostThreadID: int(ctx.HostTid), + HostParentProcessID: int(ctx.HostPpid), + UserID: int(ctx.Uid), + MountNS: int(ctx.MntID), + PIDNS: int(ctx.PidID), + ProcessName: string(utils.TrimTrailingNUL(ctx.Comm[:])), + HostName: string(utils.TrimTrailingNUL(ctx.UtsName[:])), + CgroupID: uint(ctx.CgroupID), + ContainerID: containerData.ID, + Container: containerData, + Kubernetes: kubernetesData, + EventID: int(ctx.EventID), + EventName: eventDefinition.GetName(), + RulesVersion: ctx.RulesVersion, + MatchedRulesKernel: []uint64{ctx.MatchedRules}, + ArgsNum: int(argnum), + ReturnValue: int(ctx.Retval), + Args: args, + StackAddresses: stackAddresses, + ContextFlags: flags, + Syscall: syscall, + ThreadEntityId: utils.HashTaskID(ctx.HostTid, ctx.StartTime), + ProcessEntityId: utils.HashTaskID(ctx.HostPid, ctx.LeaderStartTime), + ParentEntityId: utils.HashTaskID(ctx.HostPpid, ctx.ParentStartTime), } processChan <- &evt diff --git a/pkg/ebpf/hidden_kernel_module.go b/pkg/ebpf/hidden_kernel_module.go index 15a62fcfe0d7..87ebd980dfa3 100644 --- a/pkg/ebpf/hidden_kernel_module.go +++ b/pkg/ebpf/hidden_kernel_module.go @@ -28,7 +28,7 @@ func (t *Tracee) lkmSeekerRoutine(ctx gocontext.Context) { logger.Debugw("Starting lkmSeekerRoutine goroutine") defer logger.Debugw("Stopped lkmSeekerRoutine goroutine") - if !t.policyManager.IsEventToEmit(events.HiddenKernelModule) { + if !t.policyManager.IsEventSelected(events.HiddenKernelModule) { return } diff --git a/pkg/ebpf/hooked_syscall_table.go b/pkg/ebpf/hooked_syscall_table.go index ae2a1ff94d57..03045adf57eb 100644 --- a/pkg/ebpf/hooked_syscall_table.go +++ b/pkg/ebpf/hooked_syscall_table.go @@ -26,7 +26,7 @@ func (t *Tracee) hookedSyscallTableRoutine(ctx gocontext.Context) { logger.Debugw("Starting hookedSyscallTable goroutine") defer logger.Debugw("Stopped hookedSyscallTable goroutine") - if !t.policyManager.IsEventToSubmit(events.HookedSyscall) { + if !t.policyManager.IsEventSelected(events.HookedSyscall) { return } diff --git a/pkg/ebpf/ksymbols.go b/pkg/ebpf/ksymbols.go index b82f4b4d1b0c..4fa2bbb8207a 100644 --- a/pkg/ebpf/ksymbols.go +++ b/pkg/ebpf/ksymbols.go @@ -36,7 +36,7 @@ func (t *Tracee) UpdateKallsyms() error { var allReqSymbols []string - for _, evtID := range t.policyManager.EventsSelected() { + for _, evtID := range t.policyManager.GetSelectedEvents() { for _, symDep := range evtDefSymDeps(evtID) { allReqSymbols = append(allReqSymbols, symDep.GetSymbolName()) } diff --git a/pkg/ebpf/perf_count.go b/pkg/ebpf/perf_count.go index 838e964b4d6b..6a3942c6b757 100644 --- a/pkg/ebpf/perf_count.go +++ b/pkg/ebpf/perf_count.go @@ -29,7 +29,7 @@ func (t *Tracee) countPerfEventSubmissions(ctx context.Context) { } evtStatZero := eventStatsValues{} - for _, id := range t.policyManager.EventsToSubmit() { + for _, id := range t.policyManager.GetSelectedEvents() { if id >= events.MaxCommonID { continue } diff --git a/pkg/ebpf/signature_engine.go b/pkg/ebpf/signature_engine.go index dca41bc32652..e14734068a8e 100644 --- a/pkg/ebpf/signature_engine.go +++ b/pkg/ebpf/signature_engine.go @@ -67,7 +67,7 @@ func (t *Tracee) engineEvents(ctx context.Context, in <-chan *trace.Event) (<-ch id := events.ID(event.EventID) // if the event is NOT marked as submit, it is not sent to the rules engine - if !t.policyManager.IsEventToSubmit(id) { + if !t.policyManager.IsEventSelected(id) { return } @@ -119,7 +119,7 @@ func (t *Tracee) engineEvents(ctx context.Context, in <-chan *trace.Event) (<-ch continue } - if t.matchPolicies(event) == 0 { + if !t.matchRules(event) { _ = t.stats.EventsFiltered.Increment() continue } diff --git a/pkg/ebpf/tracee.go b/pkg/ebpf/tracee.go index 4a48f9129700..a985c5ff18e8 100644 --- a/pkg/ebpf/tracee.go +++ b/pkg/ebpf/tracee.go @@ -120,7 +120,7 @@ type Tracee struct { // Streams streamsManager *streams.StreamsManager // policyManager manages policy state - policyManager *policy.Manager + policyManager *policy.PolicyManager // The dependencies of events used by Tracee eventsDependencies *dependencies.Manager // A reference to a environment.KernelSymbolTable that might change at runtime. @@ -597,8 +597,8 @@ func (t *Tracee) initTailCall(tailCall events.TailCall) error { // event, represented through its ID, we declare to which other events it can be // derived and the corresponding function to derive into that Event. func (t *Tracee) initDerivationTable() error { - shouldSubmit := func(id events.ID) func() bool { - return func() bool { return t.policyManager.IsEventToSubmit(id) } + shouldEmit := func(id events.ID) func() bool { + return func() bool { return t.policyManager.ShouldEmitEvent(id) } } symbolsCollisions := derive.SymbolsCollision(t.contSymbolsLoader, t.policyManager) @@ -611,56 +611,56 @@ func (t *Tracee) initDerivationTable() error { t.eventDerivations = derive.Table{ events.CgroupMkdir: { events.ContainerCreate: { - Enabled: shouldSubmit(events.ContainerCreate), + Enabled: shouldEmit(events.ContainerCreate), DeriveFunction: derive.ContainerCreate(t.containers), }, }, events.CgroupRmdir: { events.ContainerRemove: { - Enabled: shouldSubmit(events.ContainerRemove), + Enabled: shouldEmit(events.ContainerRemove), DeriveFunction: derive.ContainerRemove(t.containers), }, }, events.SyscallTableCheck: { events.HookedSyscall: { - Enabled: shouldSubmit(events.SyscallTableCheck), + Enabled: shouldEmit(events.SyscallTableCheck), DeriveFunction: derive.DetectHookedSyscall(t.getKernelSymbols()), }, }, events.PrintNetSeqOps: { events.HookedSeqOps: { - Enabled: shouldSubmit(events.HookedSeqOps), + Enabled: shouldEmit(events.HookedSeqOps), DeriveFunction: derive.HookedSeqOps(t.getKernelSymbols()), }, }, events.HiddenKernelModuleSeeker: { events.HiddenKernelModule: { - Enabled: shouldSubmit(events.HiddenKernelModuleSeeker), + Enabled: shouldEmit(events.HiddenKernelModule), DeriveFunction: derive.HiddenKernelModule(), }, }, events.SharedObjectLoaded: { events.SymbolsLoaded: { - Enabled: shouldSubmit(events.SymbolsLoaded), + Enabled: shouldEmit(events.SymbolsLoaded), DeriveFunction: derive.SymbolsLoaded( t.contSymbolsLoader, t.policyManager, ), }, events.SymbolsCollision: { - Enabled: shouldSubmit(events.SymbolsCollision), + Enabled: shouldEmit(events.SymbolsCollision), DeriveFunction: symbolsCollisions, }, }, events.SchedProcessExec: { events.SymbolsCollision: { - Enabled: shouldSubmit(events.SymbolsCollision), + Enabled: shouldEmit(events.SymbolsCollision), DeriveFunction: symbolsCollisions, }, }, events.SecuritySocketConnect: { events.NetTCPConnect: { - Enabled: shouldSubmit(events.NetTCPConnect), + Enabled: shouldEmit(events.NetTCPConnect), DeriveFunction: derive.NetTCPConnect( t.dnsCache, ), @@ -668,13 +668,13 @@ func (t *Tracee) initDerivationTable() error { }, events.ExecuteFinished: { events.ProcessExecuteFailed: { - Enabled: shouldSubmit(events.ProcessExecuteFailed), + Enabled: shouldEmit(events.ProcessExecuteFailed), DeriveFunction: executeFailedGen.ProcessExecuteFailed(), }, }, events.ProcessExecuteFailedInternal: { events.ProcessExecuteFailed: { - Enabled: shouldSubmit(events.ProcessExecuteFailed), + Enabled: shouldEmit(events.ProcessExecuteFailed), DeriveFunction: executeFailedGen.ProcessExecuteFailed(), }, }, @@ -683,63 +683,63 @@ func (t *Tracee) initDerivationTable() error { // events.NetPacketIPBase: { events.NetPacketIPv4: { - Enabled: shouldSubmit(events.NetPacketIPv4), + Enabled: shouldEmit(events.NetPacketIPv4), DeriveFunction: derive.NetPacketIPv4(), }, events.NetPacketIPv6: { - Enabled: shouldSubmit(events.NetPacketIPv6), + Enabled: shouldEmit(events.NetPacketIPv6), DeriveFunction: derive.NetPacketIPv6(), }, }, events.NetPacketTCPBase: { events.NetPacketTCP: { - Enabled: shouldSubmit(events.NetPacketTCP), + Enabled: shouldEmit(events.NetPacketTCP), DeriveFunction: derive.NetPacketTCP(), }, }, events.NetPacketUDPBase: { events.NetPacketUDP: { - Enabled: shouldSubmit(events.NetPacketUDP), + Enabled: shouldEmit(events.NetPacketUDP), DeriveFunction: derive.NetPacketUDP(), }, }, events.NetPacketICMPBase: { events.NetPacketICMP: { - Enabled: shouldSubmit(events.NetPacketICMP), + Enabled: shouldEmit(events.NetPacketICMP), DeriveFunction: derive.NetPacketICMP(), }, }, events.NetPacketICMPv6Base: { events.NetPacketICMPv6: { - Enabled: shouldSubmit(events.NetPacketICMPv6), + Enabled: shouldEmit(events.NetPacketICMPv6), DeriveFunction: derive.NetPacketICMPv6(), }, }, events.NetPacketDNSBase: { events.NetPacketDNS: { - Enabled: shouldSubmit(events.NetPacketDNS), + Enabled: shouldEmit(events.NetPacketDNS), DeriveFunction: derive.NetPacketDNS(), }, events.NetPacketDNSRequest: { - Enabled: shouldSubmit(events.NetPacketDNSRequest), + Enabled: shouldEmit(events.NetPacketDNSRequest), DeriveFunction: derive.NetPacketDNSRequest(), }, events.NetPacketDNSResponse: { - Enabled: shouldSubmit(events.NetPacketDNSResponse), + Enabled: shouldEmit(events.NetPacketDNSResponse), DeriveFunction: derive.NetPacketDNSResponse(), }, }, events.NetPacketHTTPBase: { events.NetPacketHTTP: { - Enabled: shouldSubmit(events.NetPacketHTTP), + Enabled: shouldEmit(events.NetPacketHTTP), DeriveFunction: derive.NetPacketHTTP(), }, events.NetPacketHTTPRequest: { - Enabled: shouldSubmit(events.NetPacketHTTPRequest), + Enabled: shouldEmit(events.NetPacketHTTPRequest), DeriveFunction: derive.NetPacketHTTPRequest(), }, events.NetPacketHTTPResponse: { - Enabled: shouldSubmit(events.NetPacketHTTPResponse), + Enabled: shouldEmit(events.NetPacketHTTPResponse), DeriveFunction: derive.NetPacketHTTPResponse(), }, }, @@ -748,13 +748,13 @@ func (t *Tracee) initDerivationTable() error { // events.NetPacketFlow: { events.NetFlowTCPBegin: { - Enabled: shouldSubmit(events.NetFlowTCPBegin), + Enabled: shouldEmit(events.NetFlowTCPBegin), DeriveFunction: derive.NetFlowTCPBegin( t.dnsCache, ), }, events.NetFlowTCPEnd: { - Enabled: shouldSubmit(events.NetFlowTCPEnd), + Enabled: shouldEmit(events.NetFlowTCPEnd), DeriveFunction: derive.NetFlowTCPEnd( t.dnsCache, ), @@ -818,15 +818,12 @@ func (t *Tracee) getOptionsConfig() uint32 { return cOptVal } -// newConfig returns a new Config instance based on the current Tracee state and -// the given policies config and version. -func (t *Tracee) newConfig(cfg *policy.PoliciesConfig) *Config { +// newConfig returns a new Config instance based on the current Tracee state +func (t *Tracee) newConfig() *Config { return &Config{ - TraceePid: uint32(os.Getpid()), - Options: t.getOptionsConfig(), - CgroupV1Hid: uint32(t.cgroups.GetDefaultCgroupHierarchyID()), - PoliciesVersion: 1, // version will be removed soon - PoliciesConfig: *cfg, + TraceePid: uint32(os.Getpid()), + Options: t.getOptionsConfig(), + CgroupV1Hid: uint32(t.cgroups.GetDefaultCgroupHierarchyID()), } } @@ -834,7 +831,7 @@ func (t *Tracee) initKsymTableRequiredSyms() error { // Get all required symbols needed in the table // 1. all event ksym dependencies // 2. specific cases (hooked_seq_ops, hooked_symbols, print_mem_dump) - for _, id := range t.policyManager.EventsSelected() { + for _, id := range t.policyManager.GetSelectedEvents() { if !events.Core.IsDefined(id) { return errfmt.Errorf("event %d is not defined", id) } @@ -888,14 +885,8 @@ func (t *Tracee) initKsymTableRequiredSyms() error { } } if t.policyManager.IsEventSelected(events.PrintMemDump) { - for it := t.policyManager.CreateAllIterator(); it.HasNext(); { - p := it.Next() - // This might break in the future if PrintMemDump will become a dependency of another event. - _, isSelected := p.Rules[events.PrintMemDump] - if !isSelected { - continue - } - printMemDumpFilters := p.Rules[events.PrintMemDump].DataFilter.GetFieldFilters() + for _, rule := range t.policyManager.GetRules(events.PrintMemDump) { + printMemDumpFilters := rule.Data.DataFilter.GetFieldFilters() if len(printMemDumpFilters) == 0 { continue } @@ -995,7 +986,7 @@ func (t *Tracee) validateKallsymsDependencies() { return nil }) - for _, eventId := range t.policyManager.EventsSelected() { + for _, eventId := range t.policyManager.GetSelectedEvents() { if !validateEvent(eventId) { // Cancel the event, its dependencies and its dependent events err := t.eventsDependencies.RemoveEvent(eventId) @@ -1024,7 +1015,7 @@ func (t *Tracee) setProgramsAutoload() { return progHandles } - for _, eventId := range t.policyManager.EventsSelected() { + for _, eventId := range t.policyManager.GetSelectedEvents() { progHandles := evtDefProgHandlesDeps(eventId) for _, progHandle := range progHandles { err := t.defaultProbes.Autoload(progHandle, true) @@ -1126,12 +1117,22 @@ func (t *Tracee) populateBPFMaps() error { } } - // Initialize config and filter maps - err = t.populateFilterMaps(false) + // Populates the eBPF filter maps with the given policies + err = t.policyManager.UpdateBPF( + t.bpfModule, + t.containers, + t.eventDecodeTypes, + ) if err != nil { return errfmt.WrapError(err) } + // Create new config and update eBPF map + cfg := t.newConfig() + if err := cfg.UpdateBPF(t.bpfModule); err != nil { + return errfmt.WrapError(err) + } + // Populate containers map with existing containers err = t.containers.PopulateBpfMap(t.bpfModule) if err != nil { @@ -1187,8 +1188,8 @@ func (t *Tracee) populateBPFMaps() error { } // Initialize tail call dependencies - eventsToSubmit := t.policyManager.EventsToSubmit() - tailCalls := events.Core.GetTailCalls(eventsToSubmit) + selectedEvents := t.policyManager.GetSelectedEvents() + tailCalls := events.Core.GetTailCalls(selectedEvents) for _, tailCall := range tailCalls { err := t.initTailCall(tailCall) if err != nil { @@ -1199,29 +1200,6 @@ func (t *Tracee) populateBPFMaps() error { return nil } -// populateFilterMaps populates the eBPF maps with the given policies -func (t *Tracee) populateFilterMaps(updateProcTree bool) error { - polCfg, err := t.policyManager.UpdateBPF( - t.bpfModule, - t.containers, - t.eventDecodeTypes, - true, - updateProcTree, - ) - if err != nil { - return errfmt.WrapError(err) - } - - // Create new config with updated policies and update eBPF map - - cfg := t.newConfig(polCfg) - if err := cfg.UpdateBPF(t.bpfModule); err != nil { - return errfmt.WrapError(err) - } - - return nil -} - // attachEvent attaches the probes of the given event to their respective eBPF programs. // Calling attachment of probes if a supported behavior, and will do nothing // if it has been attached already. @@ -1292,7 +1270,7 @@ func (t *Tracee) attachProbes() error { }) // Attach probes to their respective eBPF programs or cancel events if a required probe is missing. - for _, eventID := range t.policyManager.EventsSelected() { + for _, eventID := range t.policyManager.GetSelectedEvents() { err := t.attachEvent(eventID) if err != nil { err := t.eventsDependencies.RemoveEvent(eventID) @@ -1368,12 +1346,6 @@ func (t *Tracee) initBPF() error { return errfmt.WrapError(err) } - // returned PoliciesConfig is not used here, therefore it's discarded - _, err = t.policyManager.UpdateBPF(t.bpfModule, t.containers, t.eventDecodeTypes, false, true) - if err != nil { - return errfmt.WrapError(err) - } - // Initialize perf buffers and needed channels t.eventsChannel = make(chan []byte, 1000) @@ -1652,7 +1624,7 @@ func (t *Tracee) getSelfLoadedPrograms(kprobesOnly bool) map[string]int { // The symbol is do_init_module: kprobe with the program trace_do_init_module, kretprobe with the program trace_ret_do_init_module uniqueHooksMap := map[probeMapKey]struct{}{} - for _, tr := range t.policyManager.EventsSelected() { + for _, tr := range t.policyManager.GetSelectedEvents() { definition := events.Core.GetDefinitionByID(tr) if definition.NotValid() { continue @@ -1711,45 +1683,45 @@ func (t *Tracee) getSelfLoadedPrograms(kprobesOnly bool) map[string]int { // userland process itself, and not from the kernel. These events usually serve as informational // events for the signatures engine/logic. func (t *Tracee) invokeInitEvents(out chan *trace.Event) { - var matchedPolicies uint64 + var matchedRules []uint64 - setMatchedPolicies := func(event *trace.Event, matchedPolicies uint64) { - event.PoliciesVersion = 1 // version will be removed soon - event.MatchedPoliciesKernel = matchedPolicies - event.MatchedPoliciesUser = matchedPolicies - event.MatchedPolicies = t.policyManager.MatchedNames(matchedPolicies) + setMatchedRules := func(event *trace.Event, matchedRules []uint64) { + event.RulesVersion = 1 // version will be removed soon + event.MatchedRulesKernel = matchedRules + event.MatchedRulesUser = matchedRules + event.MatchedPolicies = t.policyManager.GetMatchedRulesInfo(events.ID(event.EventID), event.MatchedRulesUser) } - policiesMatch := func(id events.ID) uint64 { - return t.policyManager.MatchEventInAnyPolicy(id) + rulesMatch := func(id events.ID) []uint64 { + return t.policyManager.GetAllMatchedRulesBitmap(id) } // Initial namespace events - matchedPolicies = policiesMatch(events.TraceeInfo) - if matchedPolicies > 0 { + matchedRules = rulesMatch(events.TraceeInfo) + if !utils.IsBitmapArrayEmpty(matchedRules) { traceeDataEvent := events.TraceeInfoEvent(t.bootTime, t.startTime) - setMatchedPolicies(&traceeDataEvent, matchedPolicies) + setMatchedRules(&traceeDataEvent, matchedRules) out <- &traceeDataEvent _ = t.stats.EventCount.Increment() } - matchedPolicies = policiesMatch(events.InitNamespaces) - if matchedPolicies > 0 { + matchedRules = rulesMatch(events.InitNamespaces) + if !utils.IsBitmapArrayEmpty(matchedRules) { systemInfoEvent := events.InitNamespacesEvent() - setMatchedPolicies(&systemInfoEvent, matchedPolicies) + setMatchedRules(&systemInfoEvent, matchedRules) out <- &systemInfoEvent _ = t.stats.EventCount.Increment() } // Initial existing containers events (1 event per container) - matchedPolicies = policiesMatch(events.ExistingContainer) - if matchedPolicies > 0 { + matchedRules = rulesMatch(events.ExistingContainer) + if !utils.IsBitmapArrayEmpty(matchedRules) { existingContainerEvents := events.ExistingContainersEvents(t.containers, t.config.NoContainersEnrich) for i := range existingContainerEvents { event := &(existingContainerEvents[i]) - setMatchedPolicies(event, matchedPolicies) + setMatchedRules(event, matchedRules) out <- event _ = t.stats.EventCount.Increment() } @@ -1757,10 +1729,10 @@ func (t *Tracee) invokeInitEvents(out chan *trace.Event) { // Ftrace hook event - matchedPolicies = policiesMatch(events.FtraceHook) - if matchedPolicies > 0 { + matchedRules = rulesMatch(events.FtraceHook) + if !utils.IsBitmapArrayEmpty(matchedRules) { ftraceBaseEvent := events.GetFtraceBaseEvent() - setMatchedPolicies(ftraceBaseEvent, matchedPolicies) + setMatchedRules(ftraceBaseEvent, matchedRules) logger.Debugw("started ftraceHook goroutine") // TODO: Ideally, this should be inside the goroutine and be computed before each run, @@ -1777,7 +1749,7 @@ func (t *Tracee) invokeInitEvents(out chan *trace.Event) { // netEnabled returns true if any base network event is to be traced func (t *Tracee) netEnabled() bool { - for _, id := range t.policyManager.EventsSelected() { + for _, id := range t.policyManager.GetSelectedEvents() { if id >= events.NetPacketBase && id <= events.MaxNetID { return true } @@ -1828,19 +1800,13 @@ func (t *Tracee) triggerMemDump(event trace.Event) []error { var errs []error - for it := t.policyManager.CreateAllIterator(); it.HasNext(); { - p := it.Next() - // This might break in the future if PrintMemDump will become a dependency of another event. - _, isSelected := p.Rules[events.PrintMemDump] - if !isSelected { - continue - } - printMemDumpFilters := p.Rules[events.PrintMemDump].DataFilter.GetFieldFilters() + for _, rule := range t.policyManager.GetRules(events.PrintMemDump) { + printMemDumpFilters := rule.Data.DataFilter.GetFieldFilters() if len(printMemDumpFilters) == 0 { - errs = append(errs, errfmt.Errorf("policy %d: no address or symbols were provided to print_mem_dump event. "+ + errs = append(errs, errfmt.Errorf("policy %s: no address or symbols were provided to print_mem_dump event. "+ "please provide it via -e print_mem_dump.data.address="+ ", -e print_mem_dump.data.symbol_name=: or "+ - "-e print_mem_dump.data.symbol_name= if specifying a system owned symbol", p.ID)) + "-e print_mem_dump.data.symbol_name= if specifying a system owned symbol", rule.Policy.Name)) continue } @@ -1855,7 +1821,7 @@ func (t *Tracee) triggerMemDump(event trace.Event) []error { field := lengthFilter.Equal()[0] length, err = strconv.ParseUint(field, 10, 64) if err != nil { - errs = append(errs, errfmt.Errorf("policy %d: invalid length provided to print_mem_dump event: %v", p.ID, err)) + errs = append(errs, errfmt.Errorf("policy %s: invalid length provided to print_mem_dump event: %v", rule.Policy.Name, err)) continue } @@ -1866,7 +1832,7 @@ func (t *Tracee) triggerMemDump(event trace.Event) []error { for _, field := range addressFilter.Equal() { address, err := strconv.ParseUint(field, 16, 64) if err != nil { - errs[p.ID] = errfmt.Errorf("policy %d: invalid address provided to print_mem_dump event: %v", p.ID, err) + errs = append(errs, errfmt.Errorf("policy %s: invalid address provided to print_mem_dump event: %v", rule.Policy.Name, err)) continue } @@ -1889,14 +1855,14 @@ func (t *Tracee) triggerMemDump(event trace.Event) []error { owner = symbolSlice[0] name = symbolSlice[1] } else { - errs = append(errs, errfmt.Errorf("policy %d: invalid symbols provided to print_mem_dump event: %s - more than one ':' provided", p.ID, field)) + errs = append(errs, errfmt.Errorf("policy %s: invalid symbols provided to print_mem_dump event: %s - more than one ':' provided", rule.Policy.Name, field)) continue } symbol, err := t.getKernelSymbols().GetSymbolByOwnerAndName(owner, name) if err != nil { if owner != "system" { - errs = append(errs, errfmt.Errorf("policy %d: invalid symbols provided to print_mem_dump event: %s - %v", p.ID, field, err)) + errs = append(errs, errfmt.Errorf("policy %s: invalid symbols provided to print_mem_dump event: %s - %v", rule.Policy.Name, field, err)) continue } @@ -1923,7 +1889,7 @@ func (t *Tracee) triggerMemDump(event trace.Event) []error { values[i] = v } attemptedSymbols := fmt.Sprintf("{%s,%s,%s,%s}%s", values...) - errs = append(errs, errfmt.Errorf("policy %d: invalid symbols provided to print_mem_dump event: %s", p.ID, attemptedSymbols)) + errs = append(errs, errfmt.Errorf("policy %s: invalid symbols provided to print_mem_dump event: %s", rule.Policy.Name, attemptedSymbols)) continue } @@ -1958,28 +1924,18 @@ func (t *Tracee) triggerMemDumpCall(address uint64, length uint64, eventHandle u // SubscribeAll returns a stream subscribed to all policies func (t *Tracee) SubscribeAll() *streams.Stream { - return t.subscribe(policy.PolicyAll) + return t.subscribe(nil) } // Subscribe returns a stream subscribed to selected policies -func (t *Tracee) Subscribe(policyNames []string) (*streams.Stream, error) { - var policyMask uint64 - - for _, policyName := range policyNames { - p, err := t.policyManager.LookupByName(policyName) - if err != nil { - return nil, err - } - utils.SetBit(&policyMask, uint(p.ID)) - } - - return t.subscribe(policyMask), nil +func (t *Tracee) Subscribe(policyNames []string) *streams.Stream { + return t.subscribe(policyNames) } -func (t *Tracee) subscribe(policyMask uint64) *streams.Stream { +func (t *Tracee) subscribe(policyNames []string) *streams.Stream { // TODO: the channel size matches the pipeline channel size, // but we should make it configurable in the future. - return t.streamsManager.Subscribe(policyMask, t.config.PipelineChannelSize) + return t.streamsManager.Subscribe(policyNames, t.config.PipelineChannelSize) } // Unsubscribe unsubscribes stream @@ -2060,47 +2016,3 @@ func (t *Tracee) DisableEvent(eventName string) error { return nil } - -// EnableRule enables a rule in the specified policies -func (t *Tracee) EnableRule(policyNames []string, ruleId string) error { - eventID, found := events.Core.GetDefinitionIDByName(ruleId) - if !found { - return errfmt.Errorf("error rule not found: %s", ruleId) - } - - for _, policyName := range policyNames { - p, err := t.policyManager.LookupByName(policyName) - if err != nil { - return err - } - - err = t.policyManager.EnableRule(p.ID, eventID) - if err != nil { - return err - } - } - - return nil -} - -// DisableRule disables a rule in the specified policies -func (t *Tracee) DisableRule(policyNames []string, ruleId string) error { - eventID, found := events.Core.GetDefinitionIDByName(ruleId) - if !found { - return errfmt.Errorf("error rule not found: %s", ruleId) - } - - for _, policyName := range policyNames { - p, err := t.policyManager.LookupByName(policyName) - if err != nil { - return err - } - - err = t.policyManager.DisableRule(p.ID, eventID) - if err != nil { - return err - } - } - - return nil -} diff --git a/pkg/events/core.go b/pkg/events/core.go index 9a27447b5251..01b6d99fcb06 100644 --- a/pkg/events/core.go +++ b/pkg/events/core.go @@ -121,6 +121,7 @@ const ( OpenFileMount SecuritySbUmount SecurityTaskPrctl + PolicyScopes // TODO: make this an internal event MaxCommonID ) diff --git a/pkg/events/derive/derive.go b/pkg/events/derive/derive.go index 101e4b98a7fc..c6101f9cbf0b 100644 --- a/pkg/events/derive/derive.go +++ b/pkg/events/derive/derive.go @@ -134,7 +134,7 @@ func buildDerivedEvent(baseEvent *trace.Event, skeleton deriveBase, argsValues [ return trace.Event{}, unexpectedArgCountError(skeleton.Name, len(skeleton.Fields), len(argsValues)) } de := *baseEvent // shallow copy - de.PoliciesVersion = baseEvent.PoliciesVersion + de.RulesVersion = baseEvent.RulesVersion de.EventID = skeleton.ID de.EventName = skeleton.Name de.ReturnValue = 0 diff --git a/pkg/events/derive/symbols_collision.go b/pkg/events/derive/symbols_collision.go index 2e54e8e3ca13..6abf22118338 100644 --- a/pkg/events/derive/symbols_collision.go +++ b/pkg/events/derive/symbols_collision.go @@ -28,17 +28,14 @@ import ( func SymbolsCollision( soLoader sharedobjs.DynamicSymbolsLoader, - pManager *policy.Manager, + pManager *policy.PolicyManager, ) DeriveFunction { symbolsCollisionFilters := map[string]filters.Filter[*filters.StringFilter]{} // pick white and black lists from the filters (TODO: change this) - for it := pManager.CreateAllIterator(); it.HasNext(); { - p := it.Next() - if rule, ok := p.Rules[events.SymbolsCollision]; ok { - f := rule.DataFilter.GetFieldFilters() - maps.Copy(symbolsCollisionFilters, f) - } + for _, rule := range pManager.GetRules(events.SymbolsCollision) { + f := rule.Data.DataFilter.GetFieldFilters() + maps.Copy(symbolsCollisionFilters, f) } symbolsWhitelist := []string{} diff --git a/pkg/events/derive/symbols_loaded.go b/pkg/events/derive/symbols_loaded.go index 075d157fa690..1a93ecd27286 100644 --- a/pkg/events/derive/symbols_loaded.go +++ b/pkg/events/derive/symbols_loaded.go @@ -20,16 +20,13 @@ import ( func SymbolsLoaded( soLoader sharedobjs.DynamicSymbolsLoader, - pManager *policy.Manager, + pManager *policy.PolicyManager, ) DeriveFunction { symbolsLoadedFilters := map[string]filters.Filter[*filters.StringFilter]{} - for it := pManager.CreateAllIterator(); it.HasNext(); { - p := it.Next() - if rule, ok := p.Rules[events.SymbolsLoaded]; ok { - f := rule.DataFilter.GetFieldFilters() - maps.Copy(symbolsLoadedFilters, f) - } + for _, rule := range pManager.GetRules(events.SymbolsLoaded) { + f := rule.Data.DataFilter.GetFieldFilters() + maps.Copy(symbolsLoadedFilters, f) } loadWatchedSymbols := []string{} diff --git a/pkg/events/findings/findings.go b/pkg/events/findings/findings.go index d8989a9a2009..275e7febc377 100644 --- a/pkg/events/findings/findings.go +++ b/pkg/events/findings/findings.go @@ -31,40 +31,40 @@ func newEvent(id int, f *detect.Finding, e trace.Event) *trace.Event { metadata := getMetadataFromSignatureMetadata(f.SigMetadata) return &trace.Event{ - EventID: id, - EventName: f.SigMetadata.EventName, - Timestamp: e.Timestamp, - ThreadStartTime: e.ThreadStartTime, - ProcessorID: e.ProcessorID, - ProcessID: e.ProcessID, - CgroupID: e.CgroupID, - ThreadID: e.ThreadID, - ParentProcessID: e.ParentProcessID, - HostProcessID: e.HostProcessID, - HostThreadID: e.HostThreadID, - HostParentProcessID: e.HostParentProcessID, - UserID: e.UserID, - MountNS: e.MountNS, - PIDNS: e.PIDNS, - ProcessName: e.ProcessName, - Executable: e.Executable, - HostName: e.HostName, - ContainerID: e.ContainerID, - Container: e.Container, - Kubernetes: e.Kubernetes, - ReturnValue: e.ReturnValue, - Syscall: e.Syscall, - StackAddresses: e.StackAddresses, - ContextFlags: e.ContextFlags, - ThreadEntityId: e.ThreadEntityId, - ProcessEntityId: e.ProcessEntityId, - ParentEntityId: e.ParentEntityId, - PoliciesVersion: e.PoliciesVersion, - MatchedPoliciesKernel: e.MatchedPoliciesKernel, - MatchedPoliciesUser: e.MatchedPoliciesUser, - ArgsNum: len(arguments), - Args: arguments, - Metadata: metadata, + EventID: id, + EventName: f.SigMetadata.EventName, + Timestamp: e.Timestamp, + ThreadStartTime: e.ThreadStartTime, + ProcessorID: e.ProcessorID, + ProcessID: e.ProcessID, + CgroupID: e.CgroupID, + ThreadID: e.ThreadID, + ParentProcessID: e.ParentProcessID, + HostProcessID: e.HostProcessID, + HostThreadID: e.HostThreadID, + HostParentProcessID: e.HostParentProcessID, + UserID: e.UserID, + MountNS: e.MountNS, + PIDNS: e.PIDNS, + ProcessName: e.ProcessName, + Executable: e.Executable, + HostName: e.HostName, + ContainerID: e.ContainerID, + Container: e.Container, + Kubernetes: e.Kubernetes, + ReturnValue: e.ReturnValue, + Syscall: e.Syscall, + StackAddresses: e.StackAddresses, + ContextFlags: e.ContextFlags, + ThreadEntityId: e.ThreadEntityId, + ProcessEntityId: e.ProcessEntityId, + ParentEntityId: e.ParentEntityId, + RulesVersion: e.RulesVersion, + MatchedRulesKernel: e.MatchedRulesKernel, + MatchedRulesUser: e.MatchedRulesUser, + ArgsNum: len(arguments), + Args: arguments, + Metadata: metadata, } } diff --git a/pkg/events/findings/findings_test.go b/pkg/events/findings/findings_test.go index 8af7188052d6..500f2d9e6612 100644 --- a/pkg/events/findings/findings_test.go +++ b/pkg/events/findings/findings_test.go @@ -42,10 +42,10 @@ func TestFindingToEvent(t *testing.T) { PodNamespace: "namespace", PodUID: "uid", }, - ReturnValue: 10, - MatchedPoliciesKernel: 1, - MatchedPoliciesUser: 1, - ArgsNum: 3, + ReturnValue: 10, + MatchedRulesKernel: []uint64{1}, + MatchedRulesUser: []uint64{1}, + ArgsNum: 3, Args: []trace.Argument{ { ArgMeta: trace.ArgMeta{ @@ -188,10 +188,10 @@ func createFakeEventAndFinding() detect.Finding { PodNamespace: "namespace", PodUID: "uid", }, - ReturnValue: 10, - MatchedPoliciesKernel: 1, - MatchedPoliciesUser: 1, - ArgsNum: 1, + ReturnValue: 10, + MatchedRulesKernel: []uint64{1}, + MatchedRulesUser: []uint64{1}, + ArgsNum: 1, Args: []trace.Argument{ { ArgMeta: trace.ArgMeta{ diff --git a/pkg/events/trigger/context.go b/pkg/events/trigger/context.go index de7abfcdaf94..75aa9d98aa69 100644 --- a/pkg/events/trigger/context.go +++ b/pkg/events/trigger/context.go @@ -84,9 +84,9 @@ func (store *context) Apply(event trace.Event) (trace.Event, error) { invoking.EventID = event.EventID invoking.ReturnValue = 0 invoking.Args = make([]trace.Argument, len(event.Args)) - invoking.PoliciesVersion = event.PoliciesVersion - invoking.MatchedPoliciesKernel = event.MatchedPoliciesKernel - invoking.MatchedPoliciesUser = event.MatchedPoliciesUser + invoking.RulesVersion = event.RulesVersion + invoking.MatchedRulesKernel = event.MatchedRulesKernel + invoking.MatchedRulesUser = event.MatchedRulesUser copied := copy(invoking.Args, event.Args) if copied != len(event.Args) { return trace.Event{}, errfmt.Errorf("failed to apply event's args") diff --git a/pkg/policy/ebpf.go b/pkg/policy/ebpf.go index f022170aba27..2145b9d5d831 100644 --- a/pkg/policy/ebpf.go +++ b/pkg/policy/ebpf.go @@ -1,11 +1,8 @@ package policy import ( - "bytes" "encoding/binary" "fmt" - "os" - "strconv" "unsafe" bpf "github.com/aquasecurity/libbpfgo" @@ -15,342 +12,210 @@ import ( "github.com/aquasecurity/tracee/pkg/events" "github.com/aquasecurity/tracee/pkg/events/data" "github.com/aquasecurity/tracee/pkg/filters" - "github.com/aquasecurity/tracee/pkg/logger" + "github.com/aquasecurity/tracee/pkg/utils" "github.com/aquasecurity/tracee/pkg/utils/proc" ) const ( - // outer maps - UIDFilterMapVersion = "uid_filter_version" - PIDFilterMapVersion = "pid_filter_version" - MntNSFilterMapVersion = "mnt_ns_filter_version" - PidNSFilterMapVersion = "pid_ns_filter_version" - UTSFilterMapVersion = "uts_ns_filter_version" - CommFilterMapVersion = "comm_filter_version" - DataFilterPrefixMapVersion = "data_filter_prefix_version" - DataFilterSuffixMapVersion = "data_filter_suffix_version" - DataFilterExactMapVersion = "data_filter_exact_version" - CgroupIdFilterVersion = "cgroup_id_filter_version" - ProcessTreeFilterMapVersion = "process_tree_map_version" - BinaryFilterMapVersion = "binary_filter_version" - PoliciesConfigVersion = "policies_config_version" - - // inner maps - UIDFilterMap = "uid_filter" - PIDFilterMap = "pid_filter" - MntNSFilterMap = "mnt_ns_filter" - PidNSFilterMap = "pid_ns_filter" - UTSFilterMap = "uts_ns_filter" - CommFilterMap = "comm_filter" - DataFilterPrefixMap = "data_filter_prefix" - DataFilterSuffixMap = "data_filter_suffix" - DataFilterExactMap = "data_filter_exact" - CgroupIdFilterMap = "cgroup_id_filter" - ProcessTreeFilterMap = "process_tree_map" - BinaryFilterMap = "binary_filter" - PoliciesConfigMap = "policies_config_map" - - ProcInfoMap = "proc_info_map" + // Filter version map names + UIDFilterMapVersion = "uid_filter_version" + PIDFilterMapVersion = "pid_filter_version" + MntNSFilterMapVersion = "mnt_ns_filter_version" + PidNSFilterMapVersion = "pid_ns_filter_version" + UTSFilterMapVersion = "uts_ns_filter_version" + CommFilterMapVersion = "comm_filter_version" + DataFilterPrefixMapVersion = "data_filter_prefix_version" + DataFilterSuffixMapVersion = "data_filter_suffix_version" + DataFilterExactMapVersion = "data_filter_exact_version" + CgroupIdFilterVersion = "cgroup_id_filter_version" + BinaryFilterMapVersion = "binary_filter_version" + + // Filter map names + UIDFilterMap = "uid_filter" + PIDFilterMap = "pid_filter" + MntNSFilterMap = "mnt_ns_filter" + PidNSFilterMap = "pid_ns_filter" + UTSFilterMap = "uts_ns_filter" + CommFilterMap = "comm_filter" + CgroupIdFilterMap = "cgroup_id_filter" + BinaryFilterMap = "binary_filter" + DataFilterPrefixMap = "data_filter_prefix" + DataFilterSuffixMap = "data_filter_suffix" + DataFilterExactMap = "data_filter_exact" + + // Special maps + ProcInfoMap = "proc_info_map" + EventsConfigMap = "events_config_map" + + // Sizes and limits + maxBpfStrFilterSize = 256 // should be at least as big as the bpf map value size + maxBpfBinPathSize = 256 // maximum binary path size supported by BPF (MAX_BIN_PATH_SIZE) + bpfBinFilterSize = 264 // the key size of the BPF binary filter map entry + maxBpfDataFilterStrSize = 256 // maximum str size supported by Data filter in BPF (MAX_DATA_FILTER_STR_SIZE) + bpfDataFilterStrSize = 260 // path size + 4 bytes prefix len ) -// createNewInnerMapEventId creates a new map for the given map name, version and event id. -func createNewInnerMapEventId(m *bpf.Module, mapName string, mapVersion uint16, eventId events.ID) (*bpf.BPFMapLow, string, error) { - // use the map prototype to create a new map with the same properties - prototypeMap, err := m.GetMap(mapName) - if err != nil { - return nil, "", errfmt.WrapError(err) - } - - info, err := bpf.GetMapInfoByFD(prototypeMap.FileDescriptor()) +// updateBPF updates the BPF maps with the policies filters. +// createNewMaps indicates whether new maps should be created or not. +func (pm *PolicyManager) updateBPF( + bpfModule *bpf.Module, + cts *containers.Manager, + eventsFields map[events.ID][]data.DecodeAs, +) error { + fMaps, err := pm.computeFilterMaps(cts) if err != nil { - return nil, "", errfmt.WrapError(err) + return errfmt.WrapError(err) } + pm.fMaps = fMaps - btfFD, err := bpf.GetBTFFDByID(info.BTFID) - if err != nil { - return nil, "", errfmt.WrapError(err) + if err := pm.updateEventsConfigMap(bpfModule, eventsFields, fMaps.dataFilterConfigs); err != nil { + return errfmt.WrapError(err) } - opts := &bpf.BPFMapCreateOpts{ - BTFFD: uint32(btfFD), - BTFKeyTypeID: info.BTFKeyTypeID, - BTFValueTypeID: info.BTFValueTypeID, - BTFVmlinuxValueTypeID: info.BTFVmlinuxValueTypeID, - MapFlags: info.MapFlags, - MapExtra: info.MapExtra, - MapIfIndex: info.IfIndex, + // Update UInt RuleBitmaps filter maps + if err := pm.updateUIntFilterBPF(bpfModule, fMaps.uidFilters, UIDFilterMap, UIDFilterMapVersion); err != nil { + return errfmt.WrapError(err) } - - newInnerMapName := fmt.Sprintf("%s_%d_%d", mapName, mapVersion, uint32(eventId)) - - newInnerMap, err := bpf.CreateMap( - prototypeMap.Type(), - newInnerMapName, // new map name - prototypeMap.KeySize(), - prototypeMap.ValueSize(), - int(prototypeMap.MaxEntries()), - opts, - ) - if err != nil { - return nil, "", errfmt.WrapError(err) + if err := pm.updateUIntFilterBPF(bpfModule, fMaps.pidFilters, PIDFilterMap, PIDFilterMapVersion); err != nil { + return errfmt.WrapError(err) } - - return newInnerMap, newInnerMapName, nil -} - -// createNewInnerMap creates a new map for the given map name and version. -func createNewInnerMap(m *bpf.Module, mapName string, mapVersion uint16) (*bpf.BPFMapLow, error) { - // use the map prototype to create a new map with the same properties - prototypeMap, err := m.GetMap(mapName) - if err != nil { - return nil, errfmt.WrapError(err) + if err := pm.updateUIntFilterBPF(bpfModule, fMaps.mntNSFilters, MntNSFilterMap, MntNSFilterMapVersion); err != nil { + return errfmt.WrapError(err) } - - info, err := bpf.GetMapInfoByFD(prototypeMap.FileDescriptor()) - if err != nil { - return nil, errfmt.WrapError(err) + if err := pm.updateUIntFilterBPF(bpfModule, fMaps.pidNSFilters, PidNSFilterMap, PidNSFilterMapVersion); err != nil { + return errfmt.WrapError(err) } - - btfFD, err := bpf.GetBTFFDByID(info.BTFID) - if err != nil { - return nil, errfmt.WrapError(err) + if err := pm.updateUIntFilterBPF(bpfModule, fMaps.cgroupIdFilters, CgroupIdFilterMap, CgroupIdFilterVersion); err != nil { + return errfmt.WrapError(err) } - opts := &bpf.BPFMapCreateOpts{ - BTFFD: uint32(btfFD), - BTFKeyTypeID: info.BTFKeyTypeID, - BTFValueTypeID: info.BTFValueTypeID, - BTFVmlinuxValueTypeID: info.BTFVmlinuxValueTypeID, - MapFlags: info.MapFlags, - MapExtra: info.MapExtra, - MapIfIndex: info.IfIndex, + // Update String RuleBitmaps filter maps + if err := pm.updateStringFilterBPF(bpfModule, fMaps.utsFilters, UTSFilterMap, UTSFilterMapVersion); err != nil { + return errfmt.WrapError(err) } - newInnerMap, err := bpf.CreateMap( - prototypeMap.Type(), - fmt.Sprintf("%s_%d", mapName, mapVersion), // new map name - prototypeMap.KeySize(), - prototypeMap.ValueSize(), - int(prototypeMap.MaxEntries()), - opts, - ) - if err != nil { - return nil, errfmt.WrapError(err) + if err := pm.updateStringFilterBPF(bpfModule, fMaps.commFilters, CommFilterMap, CommFilterMapVersion); err != nil { + return errfmt.WrapError(err) } - return newInnerMap, nil -} - -// updateOuterMapWithEventId updates the outer map with the given map name, version and event id. -func updateOuterMapWithEventId(m *bpf.Module, mapName string, mapVersion uint16, eventId events.ID, innerMap *bpf.BPFMapLow) error { - outerMap, err := m.GetMap(mapName) - if err != nil { + // Update Binary RuleBitmaps filter map + if err := pm.updateBinaryFilterBPF(bpfModule, fMaps.binaryFilters, BinaryFilterMap, BinaryFilterMapVersion); err != nil { return errfmt.WrapError(err) } - - keyBytes := make([]byte, 8) - binary.LittleEndian.PutUint16(keyBytes, mapVersion) // version - binary.LittleEndian.PutUint16(keyBytes[2:], 0) // padding - binary.LittleEndian.PutUint32(keyBytes[4:], uint32(eventId)) // eventid - keyPointer := unsafe.Pointer(&keyBytes[0]) - - innerMapFD := uint32(innerMap.FileDescriptor()) - valuePointer := unsafe.Pointer(&innerMapFD) - - // update version filter map - // - key is the map version + event id - // - value is the related filter map FD. - if err := outerMap.Update(keyPointer, valuePointer); err != nil { + // Update ProcInfo map (required for binary filters) + if err := populateProcInfoMap(bpfModule, fMaps.binaryFilters); err != nil { return errfmt.WrapError(err) } - return nil -} - -// updateOuterMap updates the outer map with the given map name and version. -func updateOuterMap(m *bpf.Module, mapName string, mapVersion uint16, innerMap *bpf.BPFMapLow) error { - outerMap, err := m.GetMap(mapName) - if err != nil { + // Update Data Filters + if err := pm.updateStringDataFilterLPMBPF(bpfModule, fMaps.dataPrefixFilters, DataFilterPrefixMap, DataFilterPrefixMapVersion); err != nil { return errfmt.WrapError(err) } - - u16Key := mapVersion - keyPointer := unsafe.Pointer(&u16Key) - innerMapFD := uint32(innerMap.FileDescriptor()) - valuePointer := unsafe.Pointer(&innerMapFD) - - // update version filter map - // - key is the map version - // - value is the related filter map FD. - if err := outerMap.Update(keyPointer, valuePointer); err != nil { + if err := pm.updateStringDataFilterLPMBPF(bpfModule, fMaps.dataSuffixFilters, DataFilterSuffixMap, DataFilterSuffixMapVersion); err != nil { return errfmt.WrapError(err) } - - return nil -} - -// createNewDataFilterMapsVersion creates a new data filter maps based on filter equalities. -func (ps *policies) createNewDataFilterMapsVersion( - bpfModule *bpf.Module, - fEqs *filtersEqualities, -) error { - mapsNames := map[string]struct { - outerMapName string - equalities map[KernelDataFields]equality - }{ - DataFilterPrefixMap: {DataFilterPrefixMapVersion, fEqs.dataEqualitiesPrefix}, - DataFilterSuffixMap: {DataFilterSuffixMapVersion, fEqs.dataEqualitiesSuffix}, - DataFilterExactMap: {DataFilterExactMapVersion, fEqs.dataEqualitiesExact}, - } - - polsVersion := ps.version() - for innerMapName, mapEquality := range mapsNames { - outerMapName := mapEquality.outerMapName - // For each combination of version and event ID, a new inner map is created - // - // outerMap maps: - // 1. data_filter_prefix_version (u16, u32), data_filter_prefix - // 2. data_filter_suffix_version (u16, u32), data_filter_suffix - // 3. data_filter_exact_version (u16, u32), data_filter_exact - for key := range mapEquality.equalities { - innerMapNameTemp := fmt.Sprintf("%s_%d_%d", innerMapName, polsVersion, uint32(key.ID)) - if ps.bpfInnerMaps[innerMapNameTemp] != nil { - continue - } - - newInnerMap, newInnerMapName, err := createNewInnerMapEventId(bpfModule, innerMapName, polsVersion, key.ID) - if err != nil { - return errfmt.WrapError(err) - } - - if err := updateOuterMapWithEventId(bpfModule, outerMapName, polsVersion, key.ID, newInnerMap); err != nil { - return errfmt.WrapError(err) - } - - // store pointer to the new inner map version - ps.bpfInnerMaps[newInnerMapName] = newInnerMap - } + if err := pm.updateStringDataFilterBPF(bpfModule, fMaps.dataExactFilters, DataFilterExactMap, DataFilterExactMapVersion); err != nil { + return errfmt.WrapError(err) } return nil } -// createNewFilterMapsVersion creates a new version of the filter maps. -func (ps *policies) createNewFilterMapsVersion(bpfModule *bpf.Module) error { - mapsNames := map[string]string{ // inner map name: outer map name - UIDFilterMap: UIDFilterMapVersion, - PIDFilterMap: PIDFilterMapVersion, - MntNSFilterMap: MntNSFilterMapVersion, - PidNSFilterMap: PidNSFilterMapVersion, - UTSFilterMap: UTSFilterMapVersion, - CommFilterMap: CommFilterMapVersion, - CgroupIdFilterMap: CgroupIdFilterVersion, - ProcessTreeFilterMap: ProcessTreeFilterMapVersion, - BinaryFilterMap: BinaryFilterMapVersion, - } - - polsVersion := ps.version() - for innerMapName, outerMapName := range mapsNames { - // TODO: This only spawns new inner filter maps. Their termination must - // be tackled by the versioning mechanism. - newInnerMap, err := createNewInnerMap(bpfModule, innerMapName, polsVersion) - if err != nil { - return errfmt.WrapError(err) - } - - // outerMap maps: - // 1. uid_filter_version u16, uid_filter - // 2. pid_filter_version u16, pid_filter - // 3. mnt_ns_filter_version u16, mnt_ns_filter - // 4. pid_ns_filter_version u16, pid_ns_filter - // 5. cgroup_id_filter_version u16, cgroup_id_filter - // 6. uts_ns_filter_version u16, uts_ns_filter - // 7. comm_filter_version u16, comm_filter - // 8. process_tree_filter_version u16, process_tree_filter - // 9. binary_filter_version u16, binary_filter - if err := updateOuterMap(bpfModule, outerMapName, polsVersion, newInnerMap); err != nil { - return errfmt.WrapError(err) - } - - // store pointer to the new inner map version - ps.bpfInnerMaps[innerMapName] = newInnerMap - } +// eBPF data filter only supports first 64 rules for each key. +type stringFilterConfigBPF struct { + prefixEnabled uint64 + suffixEnabled uint64 + exactEnabled uint64 + prefixMatchIfKeyMissing uint64 + suffixMatchIfKeyMissing uint64 + exactMatchIfKeyMissing uint64 +} - return nil +type dataFilterConfigBPF struct { + string stringFilterConfigBPF } type eventConfig struct { - submitForPolicies uint64 - fieldTypes uint64 - dataFilter dataFilterConfig + rulesVersion uint16 + hasOverflow uint8 + padding [5]uint8 // free for further use + submitForRules uint64 + fieldTypes uint64 + scopeFilters scopeFiltersConfig + dataFilter dataFilterConfigBPF } -// createNewEventsMapVersion creates a new version of the events map. -func (ps *policies) createNewEventsMapVersion( +// updateEventsConfigMap updates the events config map with the given events fields and filter config. +func (pm *PolicyManager) updateEventsConfigMap( bpfModule *bpf.Module, - rules map[events.ID]*eventFlags, eventsFields map[events.ID][]data.DecodeAs, - eventsFilterCfg map[events.ID]stringFilterConfig, + dataFilterConfigs map[events.ID]dataFilterConfig, ) error { - polsVersion := ps.version() - innerMapName := "events_map" - outerMapName := "events_map_version" - - // TODO: This only spawns a new inner event map. Their termination must - // be tackled by the versioning mechanism. - newInnerMap, err := createNewInnerMap(bpfModule, innerMapName, polsVersion) + eventsConfigMap, err := bpfModule.GetMap(EventsConfigMap) if err != nil { return errfmt.WrapError(err) } - if err := updateOuterMap(bpfModule, outerMapName, polsVersion, newInnerMap); err != nil { - return errfmt.WrapError(err) - } - - // store pointer to the new inner map version - ps.bpfInnerMaps[innerMapName] = newInnerMap - - for id, ecfg := range rules { - stringFilter, exist := eventsFilterCfg[id] + for id, ecfg := range pm.rules { + filterConfig, exist := dataFilterConfigs[id] if !exist { - stringFilter = stringFilterConfig{} + filterConfig = dataFilterConfig{} + } + + // Extract the first bitmap from each field of stringFilterConfig + dataFilterCfg := dataFilterConfigBPF{} + if len(filterConfig.string.prefixEnabled) > 0 { + dataFilterCfg.string.prefixEnabled = filterConfig.string.prefixEnabled[0] + } + if len(filterConfig.string.suffixEnabled) > 0 { + dataFilterCfg.string.suffixEnabled = filterConfig.string.suffixEnabled[0] + } + if len(filterConfig.string.exactEnabled) > 0 { + dataFilterCfg.string.exactEnabled = filterConfig.string.exactEnabled[0] + } + if len(filterConfig.string.prefixMatchIfKeyMissing) > 0 { + dataFilterCfg.string.prefixMatchIfKeyMissing = filterConfig.string.prefixMatchIfKeyMissing[0] + } + if len(filterConfig.string.suffixMatchIfKeyMissing) > 0 { + dataFilterCfg.string.suffixMatchIfKeyMissing = filterConfig.string.suffixMatchIfKeyMissing[0] + } + if len(filterConfig.string.exactMatchIfKeyMissing) > 0 { + dataFilterCfg.string.exactMatchIfKeyMissing = filterConfig.string.exactMatchIfKeyMissing[0] } // encoded event's field types var fieldTypes uint64 fields := eventsFields[id] - - /* - Each event can have up to 8 argument data types stored. - These argument types are encoded in a 64-bit bitmap where: - - Each byte (8 bits) represents a single argument type. - - Therefore, a maximum of 8 argument types can be represented. - For example, consider an event with: - - Two integer arguments (argType = 1) - - One string argument (argType = 10) - The bitmap would be encoded as (in hex representation): - 00 00 00 00 00 0A 01 01 (0x00000000000A0101) - Breaking down the bitmap from right to left (least significant byte first): - | Byte Position | Argument Type | - |---------------|------------------------| - | 0 | 0x01 (int argument) | - | 1 | 0x01 (int argument) | - | 2 | 0x0A (string argument) | - | 3 - 7 | 0x00 (padding, unused) | - */ for n, fieldType := range fields { fieldTypes = fieldTypes | (uint64(fieldType) << (8 * n)) } + // Create submit bitmap based on rules count - n least significant bits set to 1 + submitForRules := uint64(0) + if ecfg.rulesCount >= 64 { + submitForRules = ^uint64(0) // All bits set to 1 + } else if ecfg.rulesCount > 0 { + submitForRules = (uint64(1) << ecfg.rulesCount) - 1 + } + + // Set hasOverflow flag + var overflowFlag uint8 + if ecfg.hasOverflow { + overflowFlag = 1 + } + + // TODO: this should be saved in poicy manager as well, next to fMaps + scopeFiltersConfig := pm.computeBPFScopeFiltersConfig(id) + eventConfig := eventConfig{ - // bitmap of policies that require this event to be submitted - submitForPolicies: ecfg.policiesSubmit, - fieldTypes: fieldTypes, - dataFilter: dataFilterConfig{ - string: stringFilter, - }, + rulesVersion: ecfg.rulesVersion, + hasOverflow: overflowFlag, + submitForRules: submitForRules, + fieldTypes: fieldTypes, + scopeFilters: scopeFiltersConfig, + dataFilter: dataFilterCfg, } - err := newInnerMap.Update(unsafe.Pointer(&id), unsafe.Pointer(&eventConfig)) + err := eventsConfigMap.Update(unsafe.Pointer(&id), unsafe.Pointer(&eventConfig)) if err != nil { return errfmt.WrapError(err) } @@ -359,324 +224,462 @@ func (ps *policies) createNewEventsMapVersion( return nil } -// updateUIntFilterBPF updates the BPF maps for the given uint equalities. -func (ps *policies) updateUIntFilterBPF(uintEqualities map[uint64]equality, innerMapName string) error { - // UInt equalities - // 1. uid_filter u32, eq_t - // 2. pid_filter u32, eq_t - // 3. mnt_ns_filter u32, eq_t - // 4. pid_ns_filter u32, eq_t - // 5. cgroup_id_filter u32, eq_t +// ScopeFiltersConfig mirrors the C struct scope_filters_config (scope_filters_config_t). +// Order of fields is important, as it is used as a value for the EventsConfigMap BPF map. +type scopeFiltersConfig struct { + UIDFilterEnabled uint64 + PIDFilterEnabled uint64 + MntNsFilterEnabled uint64 + PidNsFilterEnabled uint64 + UtsNsFilterEnabled uint64 + CommFilterEnabled uint64 + CgroupIdFilterEnabled uint64 + ContFilterEnabled uint64 + NewContFilterEnabled uint64 + NewPidFilterEnabled uint64 + BinPathFilterEnabled uint64 - for k, v := range uintEqualities { - u32Key := uint32(k) - keyPointer := unsafe.Pointer(&u32Key) + UIDFilterMatchIfKeyMissing uint64 + PIDFilterMatchIfKeyMissing uint64 + MntNsFilterMatchIfKeyMissing uint64 + PidNsFilterMatchIfKeyMissing uint64 + UtsNsFilterMatchIfKeyMissing uint64 + CommFilterMatchIfKeyMissing uint64 + CgroupIdFilterMatchIfKeyMissing uint64 + ContFilterMatchIfKeyMissing uint64 + NewContFilterMatchIfKeyMissing uint64 + NewPidFilterMatchIfKeyMissing uint64 + BinPathFilterMatchIfKeyMissing uint64 +} - eqVal := make([]byte, equalityValueSize) - valuePointer := unsafe.Pointer(&eqVal[0]) +// extendedScopeFiltersConfig supports overflow rules (ID > 64) using bitmap arrays +type extendedScopeFiltersConfig struct { + UIDFilterEnabled []uint64 + PIDFilterEnabled []uint64 + MntNsFilterEnabled []uint64 + PidNsFilterEnabled []uint64 + UtsNsFilterEnabled []uint64 + CommFilterEnabled []uint64 + CgroupIdFilterEnabled []uint64 + ContFilterEnabled []uint64 + NewContFilterEnabled []uint64 + NewPidFilterEnabled []uint64 + BinPathFilterEnabled []uint64 + + UIDFilterMatchIfKeyMissing []uint64 + PIDFilterMatchIfKeyMissing []uint64 + MntNsFilterMatchIfKeyMissing []uint64 + PidNsFilterMatchIfKeyMissing []uint64 + UtsNsFilterMatchIfKeyMissing []uint64 + CommFilterMatchIfKeyMissing []uint64 + CgroupIdFilterMatchIfKeyMissing []uint64 + ContFilterMatchIfKeyMissing []uint64 + NewContFilterMatchIfKeyMissing []uint64 + NewPidFilterMatchIfKeyMissing []uint64 + BinPathFilterMatchIfKeyMissing []uint64 +} - binary.LittleEndian.PutUint64(eqVal[0:8], v.equalsInPolicies) - binary.LittleEndian.PutUint64(eqVal[8:16], v.keyUsedInPolicies) +// computeScopeFiltersConfig computes scope filter configs for ALL rules (including overflow rules > 64) +// This is used by userspace to determine which filters are enabled for overflow rules +func (pm *PolicyManager) computeScopeFiltersConfig(eventID events.ID) extendedScopeFiltersConfig { + cfg := extendedScopeFiltersConfig{} - bpfMap, ok := ps.bpfInnerMaps[innerMapName] - if !ok { - return errfmt.Errorf("bpf map not found: %s", innerMapName) - } - if err := bpfMap.Update(keyPointer, valuePointer); err != nil { - return errfmt.WrapError(err) - } + eventRules, ok := pm.rules[eventID] + if !ok { + return cfg } - return nil -} - -const ( - maxBpfStrFilterSize = 256 // should be at least as big as the bpf map value size -) + // TODO: we need to consider both policies scope filters and events scope filters. + // This can be done by accessing rule.Data.ScopeFilters + // We first need to update this in filters computation. -// updateStringFilterBPF updates the BPF maps for the given string equalities. -func (ps *policies) updateStringFilterBPF(strEqualities map[string]equality, innerMapName string) error { - // String equalities - // 1. uts_ns_filter string_filter_t, eq_t - // 2. comm_filter string_filter_t, eq_t - - for k, v := range strEqualities { - byteStr := make([]byte, maxBpfStrFilterSize) - copy(byteStr, k) - keyPointer := unsafe.Pointer(&byteStr[0]) + // Loop through ALL rules for this event (including overflow rules) + for _, rule := range eventRules.Rules { + if rule.Policy == nil { + continue + } - eqVal := make([]byte, equalityValueSize) - valuePointer := unsafe.Pointer(&eqVal[0]) + ruleID := rule.ID - binary.LittleEndian.PutUint64(eqVal[0:8], v.equalsInPolicies) - binary.LittleEndian.PutUint64(eqVal[8:16], v.keyUsedInPolicies) + // Enabled filters bitmap array + if rule.Policy.UIDFilter.Enabled() { + utils.SetBitInArray(&cfg.UIDFilterEnabled, ruleID) + } + if rule.Policy.PIDFilter.Enabled() { + utils.SetBitInArray(&cfg.PIDFilterEnabled, ruleID) + } + if rule.Policy.MntNSFilter.Enabled() { + utils.SetBitInArray(&cfg.MntNsFilterEnabled, ruleID) + } + if rule.Policy.PidNSFilter.Enabled() { + utils.SetBitInArray(&cfg.PidNsFilterEnabled, ruleID) + } + if rule.Policy.UTSFilter.Enabled() { + utils.SetBitInArray(&cfg.UtsNsFilterEnabled, ruleID) + } + if rule.Policy.CommFilter.Enabled() { + utils.SetBitInArray(&cfg.CommFilterEnabled, ruleID) + } + if rule.Policy.ContIDFilter.Enabled() { + utils.SetBitInArray(&cfg.CgroupIdFilterEnabled, ruleID) + } + if rule.Policy.ContFilter.Enabled() { + utils.SetBitInArray(&cfg.ContFilterEnabled, ruleID) + } + if rule.Policy.NewContFilter.Enabled() { + utils.SetBitInArray(&cfg.NewContFilterEnabled, ruleID) + } + if rule.Policy.NewPidFilter.Enabled() { + utils.SetBitInArray(&cfg.NewPidFilterEnabled, ruleID) + } + if rule.Policy.BinaryFilter.Enabled() { + utils.SetBitInArray(&cfg.BinPathFilterEnabled, ruleID) + } - bpfMap, ok := ps.bpfInnerMaps[innerMapName] - if !ok { - return errfmt.Errorf("bpf map not found: %s", innerMapName) + // MatchIfKeyMissing filters bitmap array + if rule.Policy.UIDFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.UIDFilterMatchIfKeyMissing, ruleID) } - if err := bpfMap.Update(keyPointer, valuePointer); err != nil { - return errfmt.WrapError(err) + if rule.Policy.PIDFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.PIDFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.MntNSFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.MntNsFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.PidNSFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.PidNsFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.UTSFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.UtsNsFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.CommFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.CommFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.ContIDFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.CgroupIdFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.ContFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.ContFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.NewContFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.NewContFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.NewPidFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.NewPidFilterMatchIfKeyMissing, ruleID) + } + if rule.Policy.BinaryFilter.MatchIfKeyMissing() { + utils.SetBitInArray(&cfg.BinPathFilterMatchIfKeyMissing, ruleID) } } - return nil + return cfg } -// updateProcTreeFilterBPF updates the BPF maps for the given process tree equalities. -func (ps *policies) updateProcTreeFilterBPF(procTreeEqualities map[uint32]equality, innerMapName string) error { - // ProcessTree equality - // 1. process_tree_filter u32, eq_t - - updateBPF := func(pid uint32, v equality) (err error) { - u32Key := pid - keyPointer := unsafe.Pointer(&u32Key) - - eqVal := make([]byte, equalityValueSize) - valuePointer := unsafe.Pointer(&eqVal[0]) - - binary.LittleEndian.PutUint64(eqVal[0:8], v.equalsInPolicies) - binary.LittleEndian.PutUint64(eqVal[8:16], v.keyUsedInPolicies) - - bpfMap, ok := ps.bpfInnerMaps[innerMapName] - if !ok { - return errfmt.Errorf("bpf map not found: %s", innerMapName) - } - if err := bpfMap.Update(keyPointer, valuePointer); err != nil { - return errfmt.WrapError(err) - } - - return nil +// computeBPFScopeFiltersConfig computes the scope filters config for eBPF (rules 0-63 only) +// by extracting the first 64 bits from the full scope config +func (pm *PolicyManager) computeBPFScopeFiltersConfig(eventID events.ID) scopeFiltersConfig { + extendedCfg := pm.computeScopeFiltersConfig(eventID) + + // Extract first 64 bits (index 0) from each bitmap array for eBPF + cfg := scopeFiltersConfig{ + UIDFilterEnabled: getFirstBitmap(extendedCfg.UIDFilterEnabled), + PIDFilterEnabled: getFirstBitmap(extendedCfg.PIDFilterEnabled), + MntNsFilterEnabled: getFirstBitmap(extendedCfg.MntNsFilterEnabled), + PidNsFilterEnabled: getFirstBitmap(extendedCfg.PidNsFilterEnabled), + UtsNsFilterEnabled: getFirstBitmap(extendedCfg.UtsNsFilterEnabled), + CommFilterEnabled: getFirstBitmap(extendedCfg.CommFilterEnabled), + CgroupIdFilterEnabled: getFirstBitmap(extendedCfg.CgroupIdFilterEnabled), + ContFilterEnabled: getFirstBitmap(extendedCfg.ContFilterEnabled), + NewContFilterEnabled: getFirstBitmap(extendedCfg.NewContFilterEnabled), + NewPidFilterEnabled: getFirstBitmap(extendedCfg.NewPidFilterEnabled), + BinPathFilterEnabled: getFirstBitmap(extendedCfg.BinPathFilterEnabled), + + UIDFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.UIDFilterMatchIfKeyMissing), + PIDFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.PIDFilterMatchIfKeyMissing), + MntNsFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.MntNsFilterMatchIfKeyMissing), + PidNsFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.PidNsFilterMatchIfKeyMissing), + UtsNsFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.UtsNsFilterMatchIfKeyMissing), + CommFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.CommFilterMatchIfKeyMissing), + CgroupIdFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.CgroupIdFilterMatchIfKeyMissing), + ContFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.ContFilterMatchIfKeyMissing), + NewContFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.NewContFilterMatchIfKeyMissing), + NewPidFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.NewPidFilterMatchIfKeyMissing), + BinPathFilterMatchIfKeyMissing: getFirstBitmap(extendedCfg.BinPathFilterMatchIfKeyMissing), } - // First, update BPF for provided pids equalities - for pid, eq := range procTreeEqualities { - if err := updateBPF(pid, eq); err != nil { - return err - } - } + return cfg +} - procDir, err := os.Open("/proc") - if err != nil { - return errfmt.Errorf("could not open proc dir: %v", err) +// getFirstBitmap extracts the first uint64 from a bitmap array (rules 0-63) +// Returns 0 if the array is empty +func getFirstBitmap(bitmapArray []uint64) uint64 { + if len(bitmapArray) == 0 { + return 0 } - defer func() { - if err := procDir.Close(); err != nil { - logger.Errorw("Closing file", "error", err) - } - }() + return bitmapArray[0] +} - entries, err := procDir.Readdirnames(-1) - if err != nil { - return errfmt.Errorf("could not read proc dir: %v", err) - } +// updateUIntFilterBPF updates the BPF maps for the given uint filter map. +func (pm *PolicyManager) updateUIntFilterBPF( + bpfModule *bpf.Module, + filterMap map[filterVersionKey]map[uint64][]ruleBitmap, + innerMapName string, + outerMapName string, +) error { + for vKey, innerMap := range filterMap { + // Skip if no rules exist for this version/event + if len(innerMap) == 0 { + continue + } - // Then, update BPF for all processes that are children of the provided pids - for _, entry := range entries { - pid, err := strconv.ParseUint(entry, 10, 32) + // Get or create inner map + bpfMap, _, err := pm.createAndUpdateInnerMap(bpfModule, innerMapName, outerMapName, vKey) if err != nil { - continue + return fmt.Errorf("creating/getting inner map for version %d event %d: %w", + vKey.Version, vKey.EventID, err) } - var updateBPFIfParentMatches func(uint32) - updateBPFIfParentMatches = func(curPid uint32) { - stat, err := os.ReadFile(fmt.Sprintf("/proc/%d/stat", curPid)) - if err != nil { - return - } - // see https://man7.org/linux/man-pages/man5/proc.5.html for how to read /proc/pid/stat - splitStat := bytes.SplitN(stat, []byte{' '}, 5) - if len(splitStat) != 5 { - return - } - ppid, err := strconv.Atoi(string(splitStat[3])) - if err != nil { - return - } - if ppid == 1 { - return + for key, bitmaps := range innerMap { + // Check if there are bitmaps for this key + if len(bitmaps) == 0 { + continue } - // if the parent pid is in the provided pids, update BPF with its child pid - if eq, ok := procTreeEqualities[uint32(ppid)]; ok { - _ = updateBPF(uint32(pid), eq) - return - } + // Update only the first bitmap (first 64 rules) + bitmap := bitmaps[0] - updateBPFIfParentMatches(uint32(ppid)) - } + // Convert the uint64 key to []byte + keyBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(keyBytes, uint32(key)) + keyPointer := unsafe.Pointer(&keyBytes[0]) + + // Convert the ruleBitmap to []byte + bitmapBytes := make([]byte, ruleBitmapSize) + binary.LittleEndian.PutUint64(bitmapBytes[0:8], bitmap.equalsInRules) + binary.LittleEndian.PutUint64(bitmapBytes[8:16], bitmap.keyUsedInRules) + valuePointer := unsafe.Pointer(&bitmapBytes[0]) - updateBPFIfParentMatches(uint32(pid)) + // Update the BPF map + if err := bpfMap.Update(keyPointer, valuePointer); err != nil { + return errfmt.WrapError(err) + } + } } return nil } -const ( - maxBpfBinPathSize = 256 // maximum binary path size supported by BPF (MAX_BIN_PATH_SIZE) - bpfBinFilterSize = 264 // the key size of the BPF binary filter map entry +// updateStringFilterBPF updates the BPF maps for the given string filter map. +func (pm *PolicyManager) updateStringFilterBPF( + bpfModule *bpf.Module, + filterMap map[filterVersionKey]map[string][]ruleBitmap, + innerMapName string, + outerMapName string, +) error { + for vKey, innerMap := range filterMap { + // Skip if no rules exist for this version/event + if len(innerMap) == 0 { + continue + } - maxBpfDataFilterStrSize = 256 // maximum str size supported by Data filter in BPF (MAX_DATA_FILTER_STR_SIZE) - bpfDataFilterStrSize = 260 // path size + 4 bytes prefix len -) + // Get or create inner map + bpfMap, _, err := pm.createAndUpdateInnerMap(bpfModule, innerMapName, outerMapName, vKey) + if err != nil { + return fmt.Errorf("creating/getting inner map for version %d event %d: %w", + vKey.Version, vKey.EventID, err) + } -// updateBinaryFilterBPF updates the BPF maps for the given binary equalities. -func (ps *policies) updateBinaryFilterBPF(binEqualities map[filters.NSBinary]equality, innerMapName string) error { - // BinaryNS equality - // 1. binary_filter binary_t, eq_t + for key, bitmaps := range innerMap { + // Check if there are bitmaps for this key + if len(bitmaps) == 0 { + continue + } - for k, v := range binEqualities { - if len(k.Path) > maxBpfBinPathSize { - return filters.InvalidValue(k.Path) - } - binBytes := make([]byte, bpfBinFilterSize) - if k.MntNS == 0 { - // if no mount namespace given, bpf map key is only the path - copy(binBytes, k.Path) - } else { - // otherwise, key is composed of the mount namespace and the path - binary.LittleEndian.PutUint32(binBytes, k.MntNS) - copy(binBytes[4:], k.Path) - } - keyPointer := unsafe.Pointer(&binBytes[0]) + // Update only the first bitmap (first 64 rules) + bitmap := bitmaps[0] - eqVal := make([]byte, equalityValueSize) - valuePointer := unsafe.Pointer(&eqVal[0]) + byteStr := make([]byte, maxBpfStrFilterSize) + copy(byteStr, key) + keyPointer := unsafe.Pointer(&byteStr[0]) - binary.LittleEndian.PutUint64(eqVal[0:8], v.equalsInPolicies) - binary.LittleEndian.PutUint64(eqVal[8:16], v.keyUsedInPolicies) + bitmapBytes := make([]byte, ruleBitmapSize) + binary.LittleEndian.PutUint64(bitmapBytes[0:8], bitmap.equalsInRules) + binary.LittleEndian.PutUint64(bitmapBytes[8:16], bitmap.keyUsedInRules) + valuePointer := unsafe.Pointer(&bitmapBytes[0]) - bpfMap, ok := ps.bpfInnerMaps[innerMapName] - if !ok { - return errfmt.Errorf("bpf map not found: %s", innerMapName) - } - if err := bpfMap.Update(keyPointer, valuePointer); err != nil { - return errfmt.WrapError(err) + // Update the BPF map + if err := bpfMap.Update(keyPointer, valuePointer); err != nil { + return errfmt.WrapError(err) + } } } return nil } -// updateStringDataFilterLPMBPF updates the BPF maps for the given kernel data LPM equalities. -func (ps *policies) updateStringDataFilterLPMBPF(dataEqualities map[KernelDataFields]equality, innerMapName string) error { - // KernelDataFields equality - // 1. data_filter_prefix data_filter_lpm_key_t, eq_t - // 2. data_filter_suffix data_filter_lpm_key_t, eq_t +// updateBinaryFilterBPF updates the BPF maps for the given binary filter map. +func (pm *PolicyManager) updateBinaryFilterBPF( + bpfModule *bpf.Module, + filterMap map[filterVersionKey]map[filters.NSBinary][]ruleBitmap, + innerMapName string, + outerMapName string, +) error { + for vKey, innerMap := range filterMap { + // Skip if no rules exist for this version/event + if len(innerMap) == 0 { + continue + } - for k, v := range dataEqualities { - // Ensure the string length is within the maximum allowed limit, - // excluding the NULL terminator. - if len(k.String) > maxBpfDataFilterStrSize-1 { - return filters.InvalidValueMax(k.String, maxBpfDataFilterStrSize-1) + // Get or create inner map + bpfMap, _, err := pm.createAndUpdateInnerMap(bpfModule, innerMapName, outerMapName, vKey) + if err != nil { + return fmt.Errorf("creating/getting inner map for version %d event %d: %w", + vKey.Version, vKey.EventID, err) } - binBytes := make([]byte, bpfDataFilterStrSize) - // key is composed of: prefixlen and a string - // multiplication by 8 - convert prefix length from bytes to bits - // for LPM Trie compatibility. - prefixlen := len(k.String) * 8 - binary.LittleEndian.PutUint32(binBytes, uint32(prefixlen)) // prefixlen - copy(binBytes[4:], k.String) // string + for key, bitmaps := range innerMap { + // Check if there are bitmaps for this key + if len(bitmaps) == 0 { + continue + } - keyPointer := unsafe.Pointer(&binBytes[0]) + // Update only the first bitmap (first 64 rules) + bitmap := bitmaps[0] - eqVal := make([]byte, equalityValueSize) - valuePointer := unsafe.Pointer(&eqVal[0]) + if len(key.Path) > maxBpfBinPathSize { + return filters.InvalidValue(key.Path) + } - binary.LittleEndian.PutUint64(eqVal[0:8], v.equalsInPolicies) - binary.LittleEndian.PutUint64(eqVal[8:16], v.keyUsedInPolicies) + binBytes := make([]byte, bpfBinFilterSize) + if key.MntNS == 0 { + // if no mount namespace given, bpf map key is only the path + copy(binBytes, key.Path) + } else { + // otherwise, key is composed of the mount namespace and the path + binary.LittleEndian.PutUint32(binBytes, key.MntNS) + copy(binBytes[4:], key.Path) + } + keyPointer := unsafe.Pointer(&binBytes[0]) - innerMapName := fmt.Sprintf("%s_%d_%d", innerMapName, ps.version(), uint32(k.ID)) + bitmapBytes := make([]byte, ruleBitmapSize) + binary.LittleEndian.PutUint64(bitmapBytes[0:8], bitmap.equalsInRules) + binary.LittleEndian.PutUint64(bitmapBytes[8:16], bitmap.keyUsedInRules) + valuePointer := unsafe.Pointer(&bitmapBytes[0]) - bpfMap, ok := ps.bpfInnerMaps[innerMapName] - if !ok { - return errfmt.Errorf("bpf map not found: %s", innerMapName) - } - if err := bpfMap.Update(keyPointer, valuePointer); err != nil { - return errfmt.WrapError(err) + // Update the BPF map + if err := bpfMap.Update(keyPointer, valuePointer); err != nil { + return errfmt.WrapError(err) + } } } return nil } -// updateStringDataFilterBPF updates the BPF maps for the given kernel data equalities. -func (ps *policies) updateStringDataFilterBPF(dataEqualities map[KernelDataFields]equality, innerMapName string) error { - // KernelDataFields equality - // 1. data_filter_exact data_filter_key_t, eq_t - - for k, v := range dataEqualities { - // Ensure the string length is within the maximum allowed limit, - // excluding the NULL terminator. - if len(k.String) > maxBpfDataFilterStrSize-1 { - return filters.InvalidValueMax(k.String, maxBpfDataFilterStrSize-1) +// updateStringDataFilterLPMBPF updates the BPF maps for the given kernel data LPM filter map. +func (pm *PolicyManager) updateStringDataFilterLPMBPF( + bpfModule *bpf.Module, + filterMap map[filterVersionKey]map[string][]ruleBitmap, + innerMapName string, + outerMapName string, +) error { + for vKey, innerMap := range filterMap { + // Skip if no rules exist for this version/event + if len(innerMap) == 0 { + continue } - binBytes := make([]byte, maxBpfDataFilterStrSize) - // key is composed of a string - copy(binBytes, k.String) // string - - keyPointer := unsafe.Pointer(&binBytes[0]) + // Get or create inner map + bpfMap, _, err := pm.createAndUpdateInnerMap(bpfModule, innerMapName, outerMapName, vKey) + if err != nil { + return fmt.Errorf("creating/getting inner map for version %d event %d: %w", + vKey.Version, vKey.EventID, err) + } - eqVal := make([]byte, equalityValueSize) - valuePointer := unsafe.Pointer(&eqVal[0]) + for key, bitmaps := range innerMap { + // Check if there are bitmaps for this key + if len(bitmaps) == 0 { + continue + } - binary.LittleEndian.PutUint64(eqVal[0:8], v.equalsInPolicies) - binary.LittleEndian.PutUint64(eqVal[8:16], v.keyUsedInPolicies) + // Update only the first bitmap (first 64 rules) + bitmap := bitmaps[0] - innerMapName := fmt.Sprintf("%s_%d_%d", innerMapName, ps.version(), uint32(k.ID)) + // Ensure the string length is within the maximum allowed limit, + // excluding the NULL terminator. + if len(key) > maxBpfDataFilterStrSize-1 { + return filters.InvalidValueMax(key, maxBpfDataFilterStrSize-1) + } - bpfMap, ok := ps.bpfInnerMaps[innerMapName] - if !ok { - return errfmt.Errorf("bpf map not found: %s", innerMapName) - } - if err := bpfMap.Update(keyPointer, valuePointer); err != nil { - return errfmt.WrapError(err) + // key is composed of: prefixlen and a string + // multiply by 8 to convert prefix length from bytes to bits for LPM Trie + keyBytes := make([]byte, bpfDataFilterStrSize) + prefixlen := len(key) * 8 + binary.LittleEndian.PutUint32(keyBytes, uint32(prefixlen)) + copy(keyBytes[4:], key) + keyPointer := unsafe.Pointer(&keyBytes[0]) + + bitmapBytes := make([]byte, ruleBitmapSize) + binary.LittleEndian.PutUint64(bitmapBytes[0:8], bitmap.equalsInRules) + binary.LittleEndian.PutUint64(bitmapBytes[8:16], bitmap.keyUsedInRules) + valuePointer := unsafe.Pointer(&bitmapBytes[0]) + + // Update the BPF map + if err := bpfMap.Update(keyPointer, valuePointer); err != nil { + return errfmt.WrapError(err) + } } } return nil } -type procInfo struct { - newProc bool - followPolicies uint64 - mntNS uint32 - binaryBytes [maxBpfBinPathSize]byte - binNoMnt uint32 -} +// updateStringDataFilterBPF updates the BPF maps for the given kernel data filter map. +func (pm *PolicyManager) updateStringDataFilterBPF( + bpfModule *bpf.Module, + filterMap map[filterVersionKey]map[string][]ruleBitmap, + innerMapName string, + outerMapName string, +) error { + for vKey, innerMap := range filterMap { + // Skip if no rules exist for this version/event + if len(innerMap) == 0 { + continue + } -// populateProcInfoMap populates the ProcInfoMap with the binaries to track. -// TODO: Should ProcInfoMap be cleared when a Policies new version is created? -// Or should it be versioned too? -func populateProcInfoMap(bpfModule *bpf.Module, binEqualities map[filters.NSBinary]equality) error { - procInfoMap, err := bpfModule.GetMap(ProcInfoMap) - if err != nil { - return errfmt.WrapError(err) - } + // Get or create inner map + bpfMap, _, err := pm.createAndUpdateInnerMap(bpfModule, innerMapName, outerMapName, vKey) + if err != nil { + return fmt.Errorf("creating/getting inner map for version %d event %d: %w", + vKey.Version, vKey.EventID, err) + } - binsProcs, err := proc.GetAllBinaryProcs() - if err != nil { - return errfmt.WrapError(err) - } + for key, bitmaps := range innerMap { + // Check if there are bitmaps for this key + if len(bitmaps) == 0 { + continue + } - for bin := range binEqualities { - procs := binsProcs[bin.Path] - for _, p := range procs { - binBytes := make([]byte, maxBpfBinPathSize) - copy(binBytes, bin.Path) - binBytesCopy := (*[maxBpfBinPathSize]byte)(binBytes) - // TODO: Default values for newProc and followPolicies are 0 are safe only in - // init phase. As Policies are updated at runtime, this is not true anymore. - procInfo := procInfo{ - newProc: false, - followPolicies: 0, - mntNS: bin.MntNS, - binaryBytes: *binBytesCopy, - binNoMnt: 0, // always 0, see bin_no_mnt in tracee.bpf.c + // Update only the first bitmap (first 64 rules) + bitmap := bitmaps[0] + + // Ensure the string length is within the maximum allowed limit, + // excluding the NULL terminator + if len(key) > maxBpfDataFilterStrSize-1 { + return filters.InvalidValueMax(key, maxBpfDataFilterStrSize-1) } - if err := procInfoMap.Update(unsafe.Pointer(&p), unsafe.Pointer(&procInfo)); err != nil { + + keyBytes := make([]byte, maxBpfDataFilterStrSize) + copy(keyBytes, key) // string + keyPointer := unsafe.Pointer(&keyBytes[0]) + + bitmapBytes := make([]byte, ruleBitmapSize) + binary.LittleEndian.PutUint64(bitmapBytes[0:8], bitmap.equalsInRules) + binary.LittleEndian.PutUint64(bitmapBytes[8:16], bitmap.keyUsedInRules) + valuePointer := unsafe.Pointer(&bitmapBytes[0]) + + // Update the BPF map + if err := bpfMap.Update(keyPointer, valuePointer); err != nil { return errfmt.WrapError(err) } } @@ -685,300 +688,148 @@ func populateProcInfoMap(bpfModule *bpf.Module, binEqualities map[filters.NSBina return nil } -// updateBPF updates the BPF maps with the policies filters. -// createNewMaps indicates whether new maps should be created or not. -// updateProcTree indicates whether the process tree map should be updated or not. -func (ps *policies) updateBPF( +// createAndUpdateInnerMap creates a new inner map and updates the outer map with it. +// It returns the created map, its name and any error encountered. +func (pm *PolicyManager) createAndUpdateInnerMap( bpfModule *bpf.Module, - cts *containers.Manager, - rules map[events.ID]*eventFlags, - eventsFields map[events.ID][]data.DecodeAs, - createNewMaps bool, - updateProcTree bool, -) (*PoliciesConfig, error) { - fEqs := &filtersEqualities{ - uidEqualities: make(map[uint64]equality), - pidEqualities: make(map[uint64]equality), - mntNSEqualities: make(map[uint64]equality), - pidNSEqualities: make(map[uint64]equality), - cgroupIdEqualities: make(map[uint64]equality), - utsEqualities: make(map[string]equality), - commEqualities: make(map[string]equality), - dataEqualitiesPrefix: make(map[KernelDataFields]equality), - dataEqualitiesSuffix: make(map[KernelDataFields]equality), - dataEqualitiesExact: make(map[KernelDataFields]equality), - binaryEqualities: make(map[filters.NSBinary]equality), - } - - fEvtCfg := make(map[events.ID]stringFilterConfig) - - if err := ps.computeFilterEqualities(fEqs, cts); err != nil { - return nil, errfmt.WrapError(err) - } - - if err := ps.computeDataFilterEqualities(fEqs, fEvtCfg); err != nil { - return nil, errfmt.WrapError(err) - } - - if createNewMaps { - // Create new events map version - if err := ps.createNewEventsMapVersion(bpfModule, rules, eventsFields, fEvtCfg); err != nil { - return nil, errfmt.WrapError(err) - } - - // Create new filter maps version - if err := ps.createNewFilterMapsVersion(bpfModule); err != nil { - return nil, errfmt.WrapError(err) - } - - // Create new filter maps version based on version and event id - // TODO: Currently used only for data filters but should be extended to support other types - if err := ps.createNewDataFilterMapsVersion(bpfModule, fEqs); err != nil { - return nil, errfmt.WrapError(err) - } + innerMapName string, + outerMapName string, + vKey filterVersionKey, +) (*bpf.BPFMapLow, string, error) { + // Check if map already exists + newInnerMapName := fmt.Sprintf("%s_%d_%d", innerMapName, vKey.Version, vKey.EventID) + if pm.bpfInnerMaps[newInnerMapName] != nil { + return pm.bpfInnerMaps[newInnerMapName], newInnerMapName, nil + } + + // Create new inner map + newInnerMap, newInnerMapName, err := createNewInnerMapEventId(bpfModule, innerMapName, vKey.Version, vKey.EventID) + if err != nil { + return nil, "", errfmt.WrapError(err) } - // Update UInt equalities filter maps - if err := ps.updateUIntFilterBPF(fEqs.uidEqualities, UIDFilterMap); err != nil { - return nil, errfmt.WrapError(err) - } - if err := ps.updateUIntFilterBPF(fEqs.pidEqualities, PIDFilterMap); err != nil { - return nil, errfmt.WrapError(err) - } - if err := ps.updateUIntFilterBPF(fEqs.mntNSEqualities, MntNSFilterMap); err != nil { - return nil, errfmt.WrapError(err) - } - if err := ps.updateUIntFilterBPF(fEqs.pidNSEqualities, PidNSFilterMap); err != nil { - return nil, errfmt.WrapError(err) - } - if err := ps.updateUIntFilterBPF(fEqs.cgroupIdEqualities, CgroupIdFilterMap); err != nil { - return nil, errfmt.WrapError(err) + // Update outer map + if err := updateOuterMapWithEventId(bpfModule, outerMapName, vKey, newInnerMap); err != nil { + return nil, "", errfmt.WrapError(err) } - // Update String equalities filter maps - if err := ps.updateStringFilterBPF(fEqs.utsEqualities, UTSFilterMap); err != nil { - return nil, errfmt.WrapError(err) - } - if err := ps.updateStringFilterBPF(fEqs.commEqualities, CommFilterMap); err != nil { - return nil, errfmt.WrapError(err) - } + // Store map reference + pm.bpfInnerMaps[newInnerMapName] = newInnerMap - // Data Filter - Prefix match - if err := ps.updateStringDataFilterLPMBPF(fEqs.dataEqualitiesPrefix, DataFilterPrefixMap); err != nil { - return nil, errfmt.WrapError(err) - } + return newInnerMap, newInnerMapName, nil +} - // Data Filter - Suffix match - if err := ps.updateStringDataFilterLPMBPF(fEqs.dataEqualitiesSuffix, DataFilterSuffixMap); err != nil { - return nil, errfmt.WrapError(err) +// createNewInnerMapEventId creates a new map for the given map name, version and event id. +func createNewInnerMapEventId(m *bpf.Module, mapName string, mapVersion uint16, eventId uint32) (*bpf.BPFMapLow, string, error) { + // use the map prototype to create a new map with the same properties + prototypeMap, err := m.GetMap(mapName) + if err != nil { + return nil, "", errfmt.WrapError(err) } - // Data Filter - Exact match - if err := ps.updateStringDataFilterBPF(fEqs.dataEqualitiesExact, DataFilterExactMap); err != nil { - return nil, errfmt.WrapError(err) + info, err := bpf.GetMapInfoByFD(prototypeMap.FileDescriptor()) + if err != nil { + return nil, "", errfmt.WrapError(err) } - if updateProcTree { - // ProcessTreeFilter equalities - procTreeEqualities := make(map[uint32]equality) - ps.computeProcTreeEqualities(procTreeEqualities) - - // Update ProcessTree equalities filter map - if err := ps.updateProcTreeFilterBPF(procTreeEqualities, ProcessTreeFilterMap); err != nil { - return nil, errfmt.WrapError(err) - } + btfFD, err := bpf.GetBTFFDByID(info.BTFID) + if err != nil { + return nil, "", errfmt.WrapError(err) } - // Update Binary equalities filter map - if err := ps.updateBinaryFilterBPF(fEqs.binaryEqualities, BinaryFilterMap); err != nil { - return nil, errfmt.WrapError(err) - } - // Update ProcInfo map (required for binary filters) - if err := populateProcInfoMap(bpfModule, fEqs.binaryEqualities); err != nil { - return nil, errfmt.WrapError(err) + opts := &bpf.BPFMapCreateOpts{ + BTFFD: uint32(btfFD), + BTFKeyTypeID: info.BTFKeyTypeID, + BTFValueTypeID: info.BTFValueTypeID, + BTFVmlinuxValueTypeID: info.BTFVmlinuxValueTypeID, + MapFlags: info.MapFlags, + MapExtra: info.MapExtra, + MapIfIndex: info.IfIndex, } - if createNewMaps { - // Create the policies config map version - // - // This must be done after the filter maps have been updated, as the - // policies config map contains the filter config computed from the - // policies filters. - if err := ps.createNewPoliciesConfigMap(bpfModule); err != nil { - return nil, errfmt.WrapError(err) - } - } + newInnerMapName := fmt.Sprintf("%s_%d_%d", mapName, mapVersion, eventId) - // Update policies config map version - pCfg := ps.computePoliciesConfig() - if err := pCfg.UpdateBPF(ps.bpfInnerMaps[PoliciesConfigMap]); err != nil { - return nil, errfmt.WrapError(err) + newInnerMap, err := bpf.CreateMap( + prototypeMap.Type(), + newInnerMapName, // new map name + prototypeMap.KeySize(), + prototypeMap.ValueSize(), + int(prototypeMap.MaxEntries()), + opts, + ) + if err != nil { + return nil, "", errfmt.WrapError(err) } - return pCfg, nil + return newInnerMap, newInnerMapName, nil } -// createNewPoliciesConfigMap creates a new version of the policies config map -func (ps *policies) createNewPoliciesConfigMap(bpfModule *bpf.Module) error { - version := ps.version() - newInnerMap, err := createNewInnerMap(bpfModule, PoliciesConfigMap, version) +// updateOuterMapWithEventId updates the outer map with the given map name, version and event id. +func updateOuterMapWithEventId(m *bpf.Module, mapName string, fvKey filterVersionKey, innerMap *bpf.BPFMapLow) error { + outerMap, err := m.GetMap(mapName) if err != nil { return errfmt.WrapError(err) } - // policies_config_version u16, policies_config_map - if err := updateOuterMap(bpfModule, PoliciesConfigVersion, version, newInnerMap); err != nil { + keyPointer := unsafe.Pointer(&fvKey) + + innerMapFD := uint32(innerMap.FileDescriptor()) + valuePointer := unsafe.Pointer(&innerMapFD) + + // update version filter map + // - key is the map version + event id + // - value is the related filter map FD. + if err := outerMap.Update(keyPointer, valuePointer); err != nil { return errfmt.WrapError(err) } - ps.bpfInnerMaps[PoliciesConfigMap] = newInnerMap - return nil } -// PoliciesConfig mirrors the C struct policies_config (policies_config_t). -// Order of fields is important, as it is used as a value for -// the PoliciesConfigMap BPF map. -type PoliciesConfig struct { - UIDFilterEnabled uint64 - PIDFilterEnabled uint64 - MntNsFilterEnabled uint64 - PidNsFilterEnabled uint64 - UtsNsFilterEnabled uint64 - CommFilterEnabled uint64 - CgroupIdFilterEnabled uint64 - ContFilterEnabled uint64 - NewContFilterEnabled uint64 - NewPidFilterEnabled uint64 - ProcTreeFilterEnabled uint64 - BinPathFilterEnabled uint64 - FollowFilterEnabled uint64 - - UIDFilterMatchIfKeyMissing uint64 - PIDFilterMatchIfKeyMissing uint64 - MntNsFilterMatchIfKeyMissing uint64 - PidNsFilterMatchIfKeyMissing uint64 - UtsNsFilterMatchIfKeyMissing uint64 - CommFilterMatchIfKeyMissing uint64 - CgroupIdFilterMatchIfKeyMissing uint64 - ContFilterMatchIfKeyMissing uint64 - NewContFilterMatchIfKeyMissing uint64 - NewPidFilterMatchIfKeyMissing uint64 - ProcTreeFilterMatchIfKeyMissing uint64 - BinPathFilterMatchIfKeyMissing uint64 - - EnabledPolicies uint64 - - UidMax uint64 - UidMin uint64 - PidMax uint64 - PidMin uint64 +type procInfo struct { + newProc bool + followPolicies uint64 + mntNS uint32 + binaryBytes [maxBpfBinPathSize]byte + binNoMnt uint32 } -func (pc *PoliciesConfig) UpdateBPF(bpfConfigMap *bpf.BPFMapLow) error { - if bpfConfigMap == nil { - return errfmt.Errorf("bpfConfigMap is nil") +// populateProcInfoMap populates the ProcInfoMap with the binaries to track. +// TODO: Should ProcInfoMap be cleared when a Policies new version is created? +// Or should it be versioned too? +func populateProcInfoMap(bpfModule *bpf.Module, filterMap map[filterVersionKey]map[filters.NSBinary][]ruleBitmap) error { + procInfoMap, err := bpfModule.GetMap(ProcInfoMap) + if err != nil { + return errfmt.WrapError(err) } - cZero := 0 - if err := bpfConfigMap.Update(unsafe.Pointer(&cZero), unsafe.Pointer(pc)); err != nil { + binsProcs, err := proc.GetAllBinaryProcs() + if err != nil { return errfmt.WrapError(err) } - return nil -} - -// computePoliciesConfig computes the policies config from the policies. -func (ps *policies) computePoliciesConfig() *PoliciesConfig { - cfg := &PoliciesConfig{} - - for _, p := range ps.allFromMap() { - offset := p.ID - - // bitmap indicating which policies have filters enabled - if p.UIDFilter.Enabled() { - cfg.UIDFilterEnabled |= 1 << offset - } - if p.PIDFilter.Enabled() { - cfg.PIDFilterEnabled |= 1 << offset - } - if p.MntNSFilter.Enabled() { - cfg.MntNsFilterEnabled |= 1 << offset - } - if p.PidNSFilter.Enabled() { - cfg.PidNsFilterEnabled |= 1 << offset - } - if p.UTSFilter.Enabled() { - cfg.UtsNsFilterEnabled |= 1 << offset - } - if p.CommFilter.Enabled() { - cfg.CommFilterEnabled |= 1 << offset - } - if p.ContIDFilter.Enabled() { - cfg.CgroupIdFilterEnabled |= 1 << offset - } - if p.ContFilter.Enabled() { - cfg.ContFilterEnabled |= 1 << offset - } - if p.NewContFilter.Enabled() { - cfg.NewContFilterEnabled |= 1 << offset - } - if p.NewPidFilter.Enabled() { - cfg.NewPidFilterEnabled |= 1 << offset - } - if p.ProcessTreeFilter.Enabled() { - cfg.ProcTreeFilterEnabled |= 1 << offset - } - if p.BinaryFilter.Enabled() { - cfg.BinPathFilterEnabled |= 1 << offset - } - if p.Follow { - cfg.FollowFilterEnabled |= 1 << offset - } - // bitmap indicating whether to match a rule if the key is missing from its filter map - if p.UIDFilter.MatchIfKeyMissing() { - cfg.UIDFilterMatchIfKeyMissing |= 1 << offset - } - if p.PIDFilter.MatchIfKeyMissing() { - cfg.PIDFilterMatchIfKeyMissing |= 1 << offset - } - if p.MntNSFilter.MatchIfKeyMissing() { - cfg.MntNsFilterMatchIfKeyMissing |= 1 << offset - } - if p.PidNSFilter.MatchIfKeyMissing() { - cfg.PidNsFilterMatchIfKeyMissing |= 1 << offset - } - if p.UTSFilter.MatchIfKeyMissing() { - cfg.UtsNsFilterMatchIfKeyMissing |= 1 << offset - } - if p.CommFilter.MatchIfKeyMissing() { - cfg.CommFilterMatchIfKeyMissing |= 1 << offset - } - if p.ContIDFilter.MatchIfKeyMissing() { - cfg.CgroupIdFilterMatchIfKeyMissing |= 1 << offset - } - if p.ContFilter.MatchIfKeyMissing() { - cfg.ContFilterMatchIfKeyMissing |= 1 << offset - } - if p.NewContFilter.MatchIfKeyMissing() { - cfg.NewContFilterMatchIfKeyMissing |= 1 << offset - } - if p.NewPidFilter.MatchIfKeyMissing() { - cfg.NewPidFilterMatchIfKeyMissing |= 1 << offset - } - if p.ProcessTreeFilter.MatchIfKeyMissing() { - cfg.ProcTreeFilterMatchIfKeyMissing |= 1 << offset - } - if p.BinaryFilter.MatchIfKeyMissing() { - cfg.BinPathFilterMatchIfKeyMissing |= 1 << offset + for _, innerMap := range filterMap { + for bin := range innerMap { + procs := binsProcs[bin.Path] + for _, p := range procs { + binBytes := make([]byte, maxBpfBinPathSize) + copy(binBytes, bin.Path) + binBytesCopy := (*[maxBpfBinPathSize]byte)(binBytes) + // TODO: Default values for newProc and followPolicies are 0 are safe only in + // init phase. As Policies are updated at runtime, this is not true anymore. + procInfo := procInfo{ + newProc: false, + followPolicies: 0, + mntNS: bin.MntNS, + binaryBytes: *binBytesCopy, + binNoMnt: 0, // always 0, see bin_no_mnt in tracee.bpf.c + } + if err := procInfoMap.Update(unsafe.Pointer(&p), unsafe.Pointer(&procInfo)); err != nil { + return errfmt.WrapError(err) + } + } } - cfg.EnabledPolicies |= 1 << offset } - cfg.UidMax = ps.uidFilterMax - cfg.UidMin = ps.uidFilterMin - cfg.PidMax = ps.pidFilterMax - cfg.PidMin = ps.pidFilterMin - - return cfg + return nil } diff --git a/pkg/policy/equality.go b/pkg/policy/equality.go deleted file mode 100644 index c3dbb16427f1..000000000000 --- a/pkg/policy/equality.go +++ /dev/null @@ -1,245 +0,0 @@ -package policy - -import ( - "strings" - - "github.com/aquasecurity/tracee/pkg/containers" - "github.com/aquasecurity/tracee/pkg/filters" - "github.com/aquasecurity/tracee/pkg/logger" - "github.com/aquasecurity/tracee/pkg/utils" -) - -// equality mirrors the C struct equality (eq_t). -// Check it for more info. -type equality struct { - equalsInPolicies uint64 - keyUsedInPolicies uint64 -} - -const ( - // 8 bytes for equalsInPolicies and 8 bytes for keyUsedInPolicies - equalityValueSize = 16 -) - -// filtersEqualities stores the equalities for each filter in the policies -type filtersEqualities struct { - uidEqualities map[uint64]equality - pidEqualities map[uint64]equality - mntNSEqualities map[uint64]equality - pidNSEqualities map[uint64]equality - cgroupIdEqualities map[uint64]equality - utsEqualities map[string]equality - commEqualities map[string]equality - dataEqualitiesPrefix map[KernelDataFields]equality - dataEqualitiesSuffix map[KernelDataFields]equality - dataEqualitiesExact map[KernelDataFields]equality - binaryEqualities map[filters.NSBinary]equality -} - -// equalityType represents the type of equality. -type equalityType int - -const ( - notEqual equalityType = iota - equal -) - -// equalUpdater updates the equality with the given policyID. -type equalityUpdater func(eq *equality, policyID uint) - -// notEqualUpdate updates the equality as not equal with the given policyID. -func notEqualUpdate(eq *equality, policyID uint) { - // NotEqual == 0, so clear n bitmap bit - utils.ClearBit(&eq.equalsInPolicies, policyID) - utils.SetBit(&eq.keyUsedInPolicies, policyID) -} - -// equalUpdate updates the equality as equal with the given policyID. -func equalUpdate(eq *equality, policyID uint) { - // Equal == 1, so set n bitmap bit - utils.SetBit(&eq.equalsInPolicies, policyID) - utils.SetBit(&eq.keyUsedInPolicies, policyID) -} - -// updateEqualities updates the equalities map with the given filter equalities -// for the given equality type and policy ID. -func updateEqualities[T comparable]( - equalitiesMap map[T]equality, - filterEqualities map[T]struct{}, - eqType equalityType, - policyID uint, -) { - var update equalityUpdater - - switch eqType { - case notEqual: - update = notEqualUpdate - case equal: - update = equalUpdate - default: - logger.Errorw("Invalid equality type", "type", eqType) - return - } - - for k := range filterEqualities { - eq, ok := equalitiesMap[k] - if !ok { - eq = equality{} // initialize if not exists - } - update(&eq, policyID) // update the equality - equalitiesMap[k] = eq // update the map - } -} - -// updateAffixEqualities updates the equalities map with the given filter equalities -// for the specified equality type and policy ID. It handles corner cases where paths -// in the prefix/suffix filter are substrings of existing paths in the equalities map. -// In cases where one prefix/suffix path overlaps with another, their equality bitmaps -// are combined, addressing the corner case. This ensures that a single lookup retrieves -// the longest matching path, with equality bitmaps merged from overlapping policies. -func updateAffixEqualities[T comparable]( - equalitiesMap map[T]equality, - filterEqualities map[T]struct{}, - eqType equalityType, - policyID uint, -) { - var update equalityUpdater - - switch eqType { - case notEqual: - update = notEqualUpdate - case equal: - update = equalUpdate - default: - logger.Errorw("Invalid equality type", "type", eqType) - return - } - - for newK := range filterEqualities { - newEq, exists := equalitiesMap[newK] - if !exists { - newEq = equality{} // initialize if not exists - } - - newKD, isKernelData := any(newK).(KernelDataFields) - - var longestMatch KernelDataFields - var longestMatchEq equality - - if isKernelData { - for existingK, existingEq := range equalitiesMap { - existingKD, isExistingKernelData := any(existingK).(KernelDataFields) - // skip if event ID is different - if !isExistingKernelData || existingKD.ID != newKD.ID { - continue - } - - // check if exists a substrings of existing paths in the equalities map - if strings.HasPrefix(existingKD.String, newKD.String) { - // Directly update the equality if the new path is a prefix - update(&existingEq, policyID) - equalitiesMap[existingK] = existingEq - } else if strings.HasPrefix(newKD.String, existingKD.String) { - // Cache the longest match - if len(existingKD.String) > len(longestMatch.String) { - longestMatch = existingKD - longestMatchEq = existingEq - } - } - } - - // If a match was found, use the longest matching equality - if len(longestMatch.String) > 0 { - newEq = longestMatchEq - } - } - - update(&newEq, policyID) // update the equality - equalitiesMap[newK] = newEq // update the map - } -} - -// computeFilterEqualities computes the equalities for each filter type in the policies -// updating the provided filtersEqualities struct. -func (ps *policies) computeFilterEqualities( - fEqs *filtersEqualities, - cts *containers.Manager, -) error { - for _, p := range ps.allFromMap() { - policyID := uint(p.ID) - - // NOTE: Equal has precedence over NotEqual, so NotEqual must be updated first - - // UIDFilters - uidEqualities := p.UIDFilter.Equalities() - updateEqualities(fEqs.uidEqualities, uidEqualities.NotEqual, notEqual, policyID) - updateEqualities(fEqs.uidEqualities, uidEqualities.Equal, equal, policyID) - - // PIDFilters - pidEqualities := p.PIDFilter.Equalities() - updateEqualities(fEqs.pidEqualities, pidEqualities.NotEqual, notEqual, policyID) - updateEqualities(fEqs.pidEqualities, pidEqualities.Equal, equal, policyID) - - // MntNSFilters - mntNSEqualities := p.MntNSFilter.Equalities() - updateEqualities(fEqs.mntNSEqualities, mntNSEqualities.NotEqual, notEqual, policyID) - updateEqualities(fEqs.mntNSEqualities, mntNSEqualities.Equal, equal, policyID) - - // PidNSFilters - pidNSEqualities := p.PidNSFilter.Equalities() - updateEqualities(fEqs.pidNSEqualities, pidNSEqualities.NotEqual, notEqual, policyID) - updateEqualities(fEqs.pidNSEqualities, pidNSEqualities.Equal, equal, policyID) - - // ContIDFilters - contIDEqualities := p.ContIDFilter.Equalities() - for contID := range contIDEqualities.ExactNotEqual { - cgroupIDs, err := cts.FindContainerCgroupID32LSB(contID) - if err != nil { - return err - } - - eq := fEqs.cgroupIdEqualities[uint64(cgroupIDs[0])] - notEqualUpdate(&eq, policyID) - fEqs.cgroupIdEqualities[uint64(cgroupIDs[0])] = eq - } - for contID := range contIDEqualities.ExactEqual { - cgroupIDs, err := cts.FindContainerCgroupID32LSB(contID) - if err != nil { - return err - } - - eq := fEqs.cgroupIdEqualities[uint64(cgroupIDs[0])] - equalUpdate(&eq, policyID) - fEqs.cgroupIdEqualities[uint64(cgroupIDs[0])] = eq - } - - // UTSFilters - utsEqualities := p.UTSFilter.Equalities() - updateEqualities(fEqs.utsEqualities, utsEqualities.ExactNotEqual, notEqual, policyID) - updateEqualities(fEqs.utsEqualities, utsEqualities.ExactEqual, equal, policyID) - - // CommFilters - commEqualities := p.CommFilter.Equalities() - updateEqualities(fEqs.commEqualities, commEqualities.ExactNotEqual, notEqual, policyID) - updateEqualities(fEqs.commEqualities, commEqualities.ExactEqual, equal, policyID) - - // BinaryFilters - binaryEqualities := p.BinaryFilter.Equalities() - updateEqualities(fEqs.binaryEqualities, binaryEqualities.NotEqual, notEqual, policyID) - updateEqualities(fEqs.binaryEqualities, binaryEqualities.Equal, equal, policyID) - } - - return nil -} - -// computeProcTreeEqualities computes the equalities for the process tree filter -// in the policies updating the provided eqs map. -func (ps *policies) computeProcTreeEqualities(eqs map[uint32]equality) { - for _, p := range ps.allFromMap() { - policyID := uint(p.ID) - - procTreeEqualities := p.ProcessTreeFilter.Equalities() - updateEqualities(eqs, procTreeEqualities.NotEqual, notEqual, policyID) - updateEqualities(eqs, procTreeEqualities.Equal, equal, policyID) - } -} diff --git a/pkg/policy/equality_data.go b/pkg/policy/equality_data.go deleted file mode 100644 index 079f9975ca69..000000000000 --- a/pkg/policy/equality_data.go +++ /dev/null @@ -1,161 +0,0 @@ -package policy - -import ( - "github.com/aquasecurity/tracee/pkg/events" - "github.com/aquasecurity/tracee/pkg/filters" - "github.com/aquasecurity/tracee/pkg/utils" -) - -type dataFilterConfig struct { - string stringFilterConfig - // other types of filters -} - -type stringFilterConfig struct { - prefixEnabled uint64 - suffixEnabled uint64 - exactEnabled uint64 - prefixMatchIfKeyMissing uint64 - suffixMatchIfKeyMissing uint64 - exactMatchIfKeyMissing uint64 -} - -type KernelDataFields struct { - ID events.ID - String string -} - -func (d *stringFilterConfig) EnableExact(policyID int) { - d.exactEnabled |= 1 << policyID -} - -func (d *stringFilterConfig) EnablePrefix(policyID int) { - d.prefixEnabled |= 1 << policyID -} - -func (d *stringFilterConfig) EnableSuffix(policyID int) { - d.suffixEnabled |= 1 << policyID -} - -func (d *stringFilterConfig) EnablePrefixMatchIfKeyMissing(policyID int) { - d.prefixMatchIfKeyMissing |= 1 << policyID -} - -func (d *stringFilterConfig) EnableSuffixMatchIfKeyMissing(policyID int) { - d.suffixMatchIfKeyMissing |= 1 << policyID -} - -func (d *stringFilterConfig) EnableExactMatchIfKeyMissing(policyID int) { - d.exactMatchIfKeyMissing |= 1 << policyID -} - -func combineEventBitmap(eventsMap map[events.ID]stringFilterConfig, eventID events.ID, strCfgFilter *stringFilterConfig) { - existingFilter, exists := eventsMap[eventID] - if !exists { - eventsMap[eventID] = stringFilterConfig{ - prefixEnabled: strCfgFilter.prefixEnabled, - suffixEnabled: strCfgFilter.suffixEnabled, - exactEnabled: strCfgFilter.exactEnabled, - prefixMatchIfKeyMissing: strCfgFilter.prefixMatchIfKeyMissing, - suffixMatchIfKeyMissing: strCfgFilter.suffixMatchIfKeyMissing, - exactMatchIfKeyMissing: strCfgFilter.exactMatchIfKeyMissing, - } - return - } - - existingFilter.prefixEnabled |= strCfgFilter.prefixEnabled - existingFilter.suffixEnabled |= strCfgFilter.suffixEnabled - existingFilter.exactEnabled |= strCfgFilter.exactEnabled - existingFilter.prefixMatchIfKeyMissing |= strCfgFilter.prefixMatchIfKeyMissing - existingFilter.suffixMatchIfKeyMissing |= strCfgFilter.suffixMatchIfKeyMissing - existingFilter.exactMatchIfKeyMissing |= strCfgFilter.exactMatchIfKeyMissing - - eventsMap[eventID] = existingFilter -} - -// computeDataFilterEqualities computes the equalities for the kernel data filter -// in the policies updating the provided eqs map. -func (ps *policies) computeDataFilterEqualities(fEqs *filtersEqualities, eventsConfig map[events.ID]stringFilterConfig) error { - for _, p := range ps.allFromMap() { - // Reinitialize variables at the start of each iteration - combinedEqualities := make(map[KernelDataFields]struct{}) - combinedNotEqualities := make(map[KernelDataFields]struct{}) - combinedPrefixEqualities := make(map[KernelDataFields]struct{}) - combinedNotPrefixEqualities := make(map[KernelDataFields]struct{}) - combinedSuffixEqualities := make(map[KernelDataFields]struct{}) - combinedNotSuffixEqualities := make(map[KernelDataFields]struct{}) - - policyID := p.ID - for eventID, rule := range p.Rules { - strCfgFilter := &stringFilterConfig{} - equalities, err := rule.DataFilter.Equalities() - if err != nil { - continue - } - ps.handleExactMatches(policyID, eventID, strCfgFilter, equalities, combinedEqualities, combinedNotEqualities) - ps.handlePrefixMatches(policyID, eventID, strCfgFilter, equalities, combinedPrefixEqualities, combinedNotPrefixEqualities) - ps.handleSuffixMatches(policyID, eventID, strCfgFilter, equalities, combinedSuffixEqualities, combinedNotSuffixEqualities) - - // Combine the event bitmap across all policies - combineEventBitmap(eventsConfig, eventID, strCfgFilter) - } - - // Exact match equalities - updateEqualities(fEqs.dataEqualitiesExact, combinedNotEqualities, notEqual, uint(policyID)) - updateEqualities(fEqs.dataEqualitiesExact, combinedEqualities, equal, uint(policyID)) - - // Prefix match equalities - updateAffixEqualities(fEqs.dataEqualitiesPrefix, combinedNotPrefixEqualities, notEqual, uint(policyID)) - updateAffixEqualities(fEqs.dataEqualitiesPrefix, combinedPrefixEqualities, equal, uint(policyID)) - - // Suffix match equalities - updateAffixEqualities(fEqs.dataEqualitiesSuffix, combinedNotSuffixEqualities, notEqual, uint(policyID)) - updateAffixEqualities(fEqs.dataEqualitiesSuffix, combinedSuffixEqualities, equal, uint(policyID)) - } - - return nil -} - -func (ps *policies) handleExactMatches(policyId int, eventID events.ID, filter *stringFilterConfig, equalities filters.StringFilterEqualities, combinedEqualities, combinedNotEqualities map[KernelDataFields]struct{}) { - for k := range equalities.ExactEqual { - combinedEqualities[KernelDataFields{eventID, k}] = struct{}{} - - filter.EnableExact(policyId) - } - for k := range equalities.ExactNotEqual { - combinedNotEqualities[KernelDataFields{eventID, k}] = struct{}{} - - filter.EnableExact(policyId) - filter.EnableExactMatchIfKeyMissing(policyId) - } -} - -func (ps *policies) handlePrefixMatches(policyId int, eventID events.ID, filter *stringFilterConfig, equalities filters.StringFilterEqualities, combinedPrefixEqualities, combinedNotPrefixEqualities map[KernelDataFields]struct{}) { - for k := range equalities.PrefixEqual { - combinedPrefixEqualities[KernelDataFields{eventID, k}] = struct{}{} - - filter.EnablePrefix(policyId) - } - for k := range equalities.PrefixNotEqual { - combinedNotPrefixEqualities[KernelDataFields{eventID, k}] = struct{}{} - - filter.EnablePrefix(policyId) - filter.EnablePrefixMatchIfKeyMissing(policyId) - } -} - -func (ps *policies) handleSuffixMatches(policyId int, eventID events.ID, filter *stringFilterConfig, equalities filters.StringFilterEqualities, combinedSuffixEqualities, combinedNotSuffixEqualities map[KernelDataFields]struct{}) { - for k := range equalities.SuffixEqual { - reversed := utils.ReverseString(k) - combinedSuffixEqualities[KernelDataFields{eventID, reversed}] = struct{}{} - - filter.EnableSuffix(policyId) - } - for k := range equalities.SuffixNotEqual { - reversed := utils.ReverseString(k) - combinedNotSuffixEqualities[KernelDataFields{eventID, reversed}] = struct{}{} - - filter.EnableSuffix(policyId) - filter.EnableSuffixMatchIfKeyMissing(policyId) - } -} diff --git a/pkg/policy/errors.go b/pkg/policy/errors.go index a4ef98912325..4201712653a8 100644 --- a/pkg/policy/errors.go +++ b/pkg/policy/errors.go @@ -1,30 +1,37 @@ package policy import ( - "errors" "fmt" ) -func PolicyNilError() error { - return errors.New("policy cannot be nil") +type policyError struct { + msg string } -func PoliciesMaxExceededError() error { - return fmt.Errorf("policies maximum exceeded [%d]", PolicyMax) +func (e *policyError) Error() string { + return e.msg } -func PoliciesOutOfRangeError(idx int) error { - return fmt.Errorf("policies index [%d] out-of-range [0-%d]", idx, PolicyMax-1) +func (e *policyError) Is(target error) bool { + t, ok := target.(*policyError) + if !ok { + return false + } + return e.msg == t.msg } -func PolicyAlreadyExistsError(name string, idx int) error { - return fmt.Errorf("policy [%s] already exists at index [%d]", name, idx) +func PolicyNilError() error { + return &policyError{msg: "policy cannot be nil"} } -func PolicyNotFoundByIDError(idx int) error { - return fmt.Errorf("policy not found at index [%d]", idx) +func PolicyAlreadyExistsError(name string) error { + return &policyError{msg: fmt.Sprintf("policy [%s] already exists", name)} } func PolicyNotFoundByNameError(name string) error { - return fmt.Errorf("policy [%s] not found", name) + return &policyError{msg: fmt.Sprintf("policy [%s] not found", name)} +} + +func SelectEventError(eventName string) error { + return &policyError{msg: fmt.Sprintf("failed to select event %s", eventName)} } diff --git a/pkg/policy/event_flags.go b/pkg/policy/event_flags.go deleted file mode 100644 index a9d24fadc7ab..000000000000 --- a/pkg/policy/event_flags.go +++ /dev/null @@ -1,99 +0,0 @@ -package policy - -import "github.com/aquasecurity/tracee/pkg/utils" - -// eventFlags is a struct that holds the flags of an event. -type eventFlags struct { - // policiesSubmit is a bitmask with the policies that require the event, - // if matched, to be submitted to the userland from the ebpf program. - // It is computed on policies updates. - policiesSubmit uint64 - - // policiesEmit is a bitmask with the policies that require the event, - // if matched, to be emitted in the pipeline sink stage. - // It is computed on policies updates. - policiesEmit uint64 - - // requiredBySignature indicates if the event is required by a signature event. - requiredBySignature bool - - // enabled indicates if the event is enabled. - // It is *NOT* computed on policies updates, so its value remains the same - // until changed via the API. - enabled bool -} - -// -// constructor -// - -type eventFlagsOption func(*eventFlags) - -func eventFlagsWithSubmit(submit uint64) eventFlagsOption { - return func(es *eventFlags) { - es.policiesSubmit = submit - } -} - -func eventFlagsWithEmit(emit uint64) eventFlagsOption { - return func(es *eventFlags) { - es.policiesEmit = emit - } -} - -func eventFlagsWithRequiredBySignature(required bool) eventFlagsOption { - return func(es *eventFlags) { - es.requiredBySignature = required - } -} - -func eventFlagsWithEnabled(enabled bool) eventFlagsOption { - return func(es *eventFlags) { - es.enabled = enabled - } -} - -func newEventFlags(options ...eventFlagsOption) *eventFlags { - // default values - ef := &eventFlags{ - policiesSubmit: 0, - policiesEmit: 0, - requiredBySignature: false, - enabled: false, - } - - // apply options - for _, option := range options { - option(ef) - } - - return ef -} - -// -// methods -// - -func (ef *eventFlags) enableSubmission(policyId int) { - utils.SetBit(&ef.policiesSubmit, uint(policyId)) -} - -func (ef *eventFlags) enableEmission(policyId int) { - utils.SetBit(&ef.policiesEmit, uint(policyId)) -} - -func (ef *eventFlags) disableSubmission(policyId int) { - utils.ClearBit(&ef.policiesSubmit, uint(policyId)) -} - -func (ef *eventFlags) disableEmission(policyId int) { - utils.ClearBit(&ef.policiesEmit, uint(policyId)) -} - -func (ef *eventFlags) enableEvent() { - ef.enabled = true -} - -func (ef *eventFlags) disableEvent() { - ef.enabled = false -} diff --git a/pkg/policy/event_flags_test.go b/pkg/policy/event_flags_test.go deleted file mode 100644 index 2da7d69e5125..000000000000 --- a/pkg/policy/event_flags_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package policy - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/aquasecurity/tracee/pkg/utils" -) - -// TestNewEventFlags tests the newEventFlags function. -func TestNewEventFlags(t *testing.T) { - t.Parallel() - - ef := newEventFlags() - emit := uint64(0) - submit := uint64(0) - - assert.Equal(t, emit, ef.policiesSubmit) - assert.Equal(t, submit, ef.policiesEmit) - assert.False(t, ef.enabled) - - submit = uint64(1 << 0) - emit = uint64(1<<1 | 1<<2) - efWithOptions := newEventFlags( - eventFlagsWithSubmit(submit), - eventFlagsWithEmit(emit), - eventFlagsWithEnabled(true), - ) - assert.Equal(t, submit, efWithOptions.policiesSubmit) - assert.Equal(t, emit, efWithOptions.policiesEmit) - assert.True(t, efWithOptions.enabled) -} - -// TestEnableSubmission tests the enableSubmission function. -func TestEnableSubmission(t *testing.T) { - t.Parallel() - - ef := newEventFlags() - ef.enableSubmission(1) - assert.True(t, utils.HasBit(ef.policiesSubmit, 1)) -} - -// TestEnableEmission tests the enableEmission function. -func TestEnableEmission(t *testing.T) { - t.Parallel() - - ef := newEventFlags() - ef.enableEmission(1) - assert.True(t, utils.HasBit(ef.policiesEmit, 1)) - - ef.enableEmission(-1) -} - -// TestDisableSubmission tests the disableSubmission function. -func TestDisableSubmission(t *testing.T) { - t.Parallel() - - ef := newEventFlags() - ef.enableSubmission(42) - ef.disableSubmission(42) - assert.False(t, utils.HasBit(ef.policiesSubmit, 42)) -} - -// TestDisableEmission tests the disableEmission function. -func TestDisableEmission(t *testing.T) { - t.Parallel() - - ef := newEventFlags() - ef.enableEmission(42) - ef.disableEmission(42) - assert.False(t, utils.HasBit(ef.policiesEmit, 42)) -} - -// TestEnableEvent tests the enableEvent function. -func TestEnableEvent(t *testing.T) { - t.Parallel() - - ef := newEventFlags(eventFlagsWithEnabled(false)) - ef.enableEvent() - assert.True(t, ef.enabled) -} - -// TestDisableEvent tests the disableEvent function. -func TestDisableEvent(t *testing.T) { - t.Parallel() - - ef := newEventFlags(eventFlagsWithEnabled(true)) - ef.disableEvent() - assert.False(t, ef.enabled) -} diff --git a/pkg/policy/filter_maps.go b/pkg/policy/filter_maps.go new file mode 100644 index 000000000000..42457d027725 --- /dev/null +++ b/pkg/policy/filter_maps.go @@ -0,0 +1,495 @@ +package policy + +import ( + "strings" + + "github.com/aquasecurity/tracee/pkg/containers" + "github.com/aquasecurity/tracee/pkg/errfmt" + "github.com/aquasecurity/tracee/pkg/events" + "github.com/aquasecurity/tracee/pkg/filters" + "github.com/aquasecurity/tracee/pkg/utils" +) + +// ruleBitmap mirrors the C struct equality (eq_t) +// it stores information about which rules a filter value applies to. +// equalsInRules: A bitmap representing whether a value is equal to the filter value. +// keyUsedInRules: A bitmap representing whether a value's key is used in the rule. +type ruleBitmap struct { + equalsInRules uint64 + keyUsedInRules uint64 +} + +// RuleBitmap is the exported version of ruleBitmap for external access +type RuleBitmap struct { + EqualsInRules uint64 + KeyUsedInRules uint64 +} + +const ( + ruleBitmapSize = 16 // 8 bytes for equalsInRules and 8 bytes for keyUsedInRules +) + +// filterVersionKey matches C's filter_version_key_t struct +type filterVersionKey struct { + Version uint16 + Pad uint16 + EventID uint32 +} + +// FilterVersionKey is the exported version of filterVersionKey for external use +type FilterVersionKey = filterVersionKey + +// FilterMaps contains maps that mirror the corresponding eBPF filter maps. +// Each field corresponds to a specific eBPF map used for filtering events in kernel space. +// The computed values in these maps are used to update their eBPF counterparts. +// The outer map key is a combination of event ID and rules version (filterVersionKey), +// while the inner map key varies by filter type (e.g., uint64, string) and the value is a ruleBitmap. +// filterMaps contains maps that mirror the corresponding eBPF filter maps. +// Each field corresponds to a specific eBPF map used for filtering events in kernel space. +// The computed values in these maps are used to update their eBPF counterparts. +// The outer map key is a combination of event ID and rules version (filterVersionKey), +// while the inner map key varies by filter type (e.g., uint64, string) and the value is a ruleBitmap. +type filterMaps struct { + uidFilters map[filterVersionKey]map[uint64][]ruleBitmap + pidFilters map[filterVersionKey]map[uint64][]ruleBitmap + mntNSFilters map[filterVersionKey]map[uint64][]ruleBitmap + pidNSFilters map[filterVersionKey]map[uint64][]ruleBitmap + cgroupIdFilters map[filterVersionKey]map[uint64][]ruleBitmap + utsFilters map[filterVersionKey]map[string][]ruleBitmap + commFilters map[filterVersionKey]map[string][]ruleBitmap + containerFilters map[filterVersionKey]map[string][]ruleBitmap + dataPrefixFilters map[filterVersionKey]map[string][]ruleBitmap + dataSuffixFilters map[filterVersionKey]map[string][]ruleBitmap + dataExactFilters map[filterVersionKey]map[string][]ruleBitmap + binaryFilters map[filterVersionKey]map[filters.NSBinary][]ruleBitmap + dataFilterConfigs map[events.ID]dataFilterConfig + extendedScopeFilterConfigs map[events.ID]extendedScopeFiltersConfig +} + +// ExtendedScopeFiltersConfig is the exported version of extendedScopeFiltersConfig +type ExtendedScopeFiltersConfig struct { + UIDFilterEnabled []uint64 + PIDFilterEnabled []uint64 + MntNsFilterEnabled []uint64 + PidNsFilterEnabled []uint64 + UtsNsFilterEnabled []uint64 + CommFilterEnabled []uint64 + CgroupIdFilterEnabled []uint64 + ContFilterEnabled []uint64 + NewContFilterEnabled []uint64 + NewPidFilterEnabled []uint64 + BinPathFilterEnabled []uint64 + + UIDFilterMatchIfKeyMissing []uint64 + PIDFilterMatchIfKeyMissing []uint64 + MntNsFilterMatchIfKeyMissing []uint64 + PidNsFilterMatchIfKeyMissing []uint64 + UtsNsFilterMatchIfKeyMissing []uint64 + CommFilterMatchIfKeyMissing []uint64 + CgroupIdFilterMatchIfKeyMissing []uint64 + ContFilterMatchIfKeyMissing []uint64 + NewContFilterMatchIfKeyMissing []uint64 + NewPidFilterMatchIfKeyMissing []uint64 + BinPathFilterMatchIfKeyMissing []uint64 +} + +// FilterMaps is the exported version of filterMaps for external access +type FilterMaps struct { + UIDFilters map[FilterVersionKey]map[uint64][]RuleBitmap + PIDFilters map[FilterVersionKey]map[uint64][]RuleBitmap + MntNsFilters map[FilterVersionKey]map[uint64][]RuleBitmap + PidNsFilters map[FilterVersionKey]map[uint64][]RuleBitmap + CgroupFilters map[FilterVersionKey]map[uint64][]RuleBitmap + UTSFilters map[FilterVersionKey]map[string][]RuleBitmap + CommFilters map[FilterVersionKey]map[string][]RuleBitmap + ContainerFilters map[FilterVersionKey]map[string][]RuleBitmap + ExtendedScopeFilterConfigs map[events.ID]ExtendedScopeFiltersConfig +} + +type equalityType int + +const ( + notEqual equalityType = iota + equal +) + +type dataFilterConfig struct { + string stringFilterConfig + // other types of filters +} + +// stringFilterConfig stores configuration for string matching filters. +type stringFilterConfig struct { + prefixEnabled []uint64 // Bitmap of rules with prefix matching enabled + suffixEnabled []uint64 // Bitmap of rules with suffix matching enabled + exactEnabled []uint64 // Bitmap of rules with exact matching enabled + prefixMatchIfKeyMissing []uint64 // Bitmap of rules with prefix matching enabled if the filter key is missing + suffixMatchIfKeyMissing []uint64 // Bitmap of rules with suffix matching enabled if the filter key is missing + exactMatchIfKeyMissing []uint64 // Bitmap of rules with exact matching enabled if the filter key is missing +} + +// computeFilterMaps processes policy rules and returns two data structures: +// - A filterMaps instance containing maps that mirror eBPF filter maps in kernel space, +// used for filtering events based on scope and data filters. +// - A map of data filter configurations per event ID, containing information about +// enabled string matching operations for each rule. +// +// The cts parameter provides container information required for resolving container IDs +// to cgroup IDs when processing container filters. +// +// Returns error if filter processing fails for any rule. +func (pm *PolicyManager) computeFilterMaps( + conts *containers.Manager, +) (maps *filterMaps, err error) { + maps = &filterMaps{ + uidFilters: make(map[filterVersionKey]map[uint64][]ruleBitmap), + pidFilters: make(map[filterVersionKey]map[uint64][]ruleBitmap), + mntNSFilters: make(map[filterVersionKey]map[uint64][]ruleBitmap), + pidNSFilters: make(map[filterVersionKey]map[uint64][]ruleBitmap), + cgroupIdFilters: make(map[filterVersionKey]map[uint64][]ruleBitmap), + utsFilters: make(map[filterVersionKey]map[string][]ruleBitmap), + commFilters: make(map[filterVersionKey]map[string][]ruleBitmap), + dataPrefixFilters: make(map[filterVersionKey]map[string][]ruleBitmap), + dataSuffixFilters: make(map[filterVersionKey]map[string][]ruleBitmap), + dataExactFilters: make(map[filterVersionKey]map[string][]ruleBitmap), + binaryFilters: make(map[filterVersionKey]map[filters.NSBinary][]ruleBitmap), + dataFilterConfigs: make(map[events.ID]dataFilterConfig), + extendedScopeFilterConfigs: make(map[events.ID]extendedScopeFiltersConfig), + } + + for eventID, eventRules := range pm.rules { + vKey := filterVersionKey{ + Version: eventRules.rulesVersion, + EventID: uint32(eventID), + } + + for _, rule := range eventRules.Rules { + if err = pm.processRuleScopeFilters(maps, vKey, rule, conts); err != nil { + return nil, errfmt.WrapError(err) + } + + if err = pm.processRuleDataFilters(maps, vKey, rule, eventID); err != nil { + return nil, errfmt.WrapError(err) + } + } + + // Compute extended scope filter configs for overflow rules + maps.extendedScopeFilterConfigs[eventID] = pm.computeScopeFiltersConfig(eventID) + } + return maps, nil +} + +func (pm *PolicyManager) processRuleScopeFilters( + filterMaps *filterMaps, + vKey filterVersionKey, + rule *EventRule, + cts *containers.Manager, +) error { + if rule.Policy == nil { + return nil + } + + // UIDFilters + uidEqs := rule.Policy.UIDFilter.Equalities() + updateRuleBitmapsForEvent(filterMaps.uidFilters, vKey, rule.ID, uidEqs.NotEqual, uidEqs.Equal) + + // PIDFilters + pidEqs := rule.Policy.PIDFilter.Equalities() + updateRuleBitmapsForEvent(filterMaps.pidFilters, vKey, rule.ID, pidEqs.NotEqual, pidEqs.Equal) + + // MntNSFilters + mntNSEqs := rule.Policy.MntNSFilter.Equalities() + updateRuleBitmapsForEvent(filterMaps.mntNSFilters, vKey, rule.ID, mntNSEqs.NotEqual, mntNSEqs.Equal) + + // PidNSFilters + pidNSEqs := rule.Policy.PidNSFilter.Equalities() + updateRuleBitmapsForEvent(filterMaps.pidNSFilters, vKey, rule.ID, pidNSEqs.NotEqual, pidNSEqs.Equal) + + // ContIDFilters requires special handling for container lookup + contIDEqs := rule.Policy.ContIDFilter.Equalities() + for contID := range contIDEqs.ExactNotEqual { + cgroupIDs, err := cts.FindContainerCgroupID32LSB(contID) + if err != nil { + return err + } + updateRuleBitmapForKey(filterMaps.cgroupIdFilters, vKey, uint64(cgroupIDs[0]), rule.ID, notEqual) + } + for contID := range contIDEqs.ExactEqual { + cgroupIDs, err := cts.FindContainerCgroupID32LSB(contID) + if err != nil { + return err + } + updateRuleBitmapForKey(filterMaps.cgroupIdFilters, vKey, uint64(cgroupIDs[0]), rule.ID, equal) + } + + // UTSFilters + utsEqs := rule.Policy.UTSFilter.Equalities() + updateRuleBitmapsForEvent(filterMaps.utsFilters, vKey, rule.ID, utsEqs.ExactNotEqual, utsEqs.ExactEqual) + + // CommFilters + commEqs := rule.Policy.CommFilter.Equalities() + updateRuleBitmapsForEvent(filterMaps.commFilters, vKey, rule.ID, commEqs.ExactNotEqual, commEqs.ExactEqual) + + // BinaryFilters + binEqs := rule.Policy.BinaryFilter.Equalities() + updateRuleBitmapsForEvent(filterMaps.binaryFilters, vKey, rule.ID, binEqs.NotEqual, binEqs.Equal) + + return nil +} + +// updateRuleBitmapsForEvent updates the rule bitmaps for a given filter version and rule ID. +// It processes both "not equal" and "equal" filter values. +// NotEqual values must be processed first because Equal values have precedence. +// If a value is present in both NotEqual and Equal maps, it will be treated as Equal. +func updateRuleBitmapsForEvent[K comparable]( + eqs map[filterVersionKey]map[K][]ruleBitmap, + vKey filterVersionKey, + ruleID uint, + notEqualsMap map[K]struct{}, + equalsMap map[K]struct{}, +) { + for key := range notEqualsMap { + updateRuleBitmapForKey(eqs, vKey, key, ruleID, notEqual) + } + for key := range equalsMap { + updateRuleBitmapForKey(eqs, vKey, key, ruleID, equal) + } +} + +// updateRuleBitmapForKey updates the rule bitmap for a specific key, version, rule, and equality type. +func updateRuleBitmapForKey[K comparable]( + eqs map[filterVersionKey]map[K][]ruleBitmap, + vKey filterVersionKey, + key K, + ruleID uint, + eqType equalityType, +) { + bitmapIndex := ruleID / 64 + bitOffset := ruleID % 64 + + innerMap := getOrCreateRuleBitmapMap(eqs, vKey) + + // Ensure that the slice of bitmaps exists for the key and has enough bitmaps + for len(innerMap[key]) <= int(bitmapIndex) { + innerMap[key] = append(innerMap[key], ruleBitmap{}) + } + + // Update the proper bitmap + updateRuleBitmap(&innerMap[key][bitmapIndex], bitOffset, eqType) +} + +// getOrCreateRuleBitmapMap ensures that an inner map exists for a given filterVersionKey. +// If it doesn't exist, a new map is created and stored in the outer map. +func getOrCreateRuleBitmapMap[K comparable]( + outerMap map[filterVersionKey]map[K][]ruleBitmap, + vKey filterVersionKey, +) map[K][]ruleBitmap { + if innerMap, exists := outerMap[vKey]; exists { + return innerMap + } + innerMap := make(map[K][]ruleBitmap) + outerMap[vKey] = innerMap + return innerMap +} + +// updateRuleBitmap updates the rule bitmap for a specific rule and equality type. +func updateRuleBitmap(rb *ruleBitmap, bitOffset uint, eqType equalityType) { + switch eqType { + case equal: + utils.SetBit(&rb.equalsInRules, bitOffset) + utils.SetBit(&rb.keyUsedInRules, bitOffset) + case notEqual: + utils.ClearBit(&rb.equalsInRules, bitOffset) + utils.SetBit(&rb.keyUsedInRules, bitOffset) + } +} + +func (pm *PolicyManager) processRuleDataFilters( + filterMaps *filterMaps, + vKey filterVersionKey, + rule *EventRule, + eventID events.ID, +) error { + if rule.Data == nil { + return nil + } + + equalities, err := rule.Data.DataFilter.Equalities() + if err != nil { + return nil // Skip this rule + } + + // Get or create config + config, exists := filterMaps.dataFilterConfigs[eventID] + if !exists { + config = dataFilterConfig{} + } + + // Process string filters + pm.processStringFilterRule(filterMaps, vKey, rule.ID, equalities, &config.string) + + // Store updated config + filterMaps.dataFilterConfigs[eventID] = config + return nil +} + +// processStringFilterRule processes string equality filters (exact, prefix, and suffix matches) +// for a given rule. It updates the filter maps with rule bitmaps and returns a string filter +// configuration indicating which matching operations are enabled for this rule. +// +// For each type of string match (exact, prefix, suffix): +// - Updates rule bitmaps in the corresponding filter map +// - Handles both equal and not-equal cases +// - For suffix matches, strings are reversed to allow prefix-based matching in eBPF +// - Special handling for overlapping prefix/suffix patterns +func (pm *PolicyManager) processStringFilterRule( + filterMaps *filterMaps, + vKey filterVersionKey, + ruleID uint, + equalities filters.StringFilterEqualities, + strFilterCfg *stringFilterConfig, +) { + // Calculate bitmap index and bit offset + bitmapIndex := ruleID / 64 + bitOffset := ruleID % 64 + + // Handle exact matches + exactBitmaps := getOrCreateRuleBitmapMap(filterMaps.dataExactFilters, vKey) + for k := range equalities.ExactNotEqual { + eb := exactBitmaps[k] + for len(exactBitmaps) <= int(bitmapIndex) { + eb = append(eb, ruleBitmap{}) + } + updateRuleBitmap(&eb[bitmapIndex], bitOffset, notEqual) + exactBitmaps[k] = eb + + // Ensure strFilterCfg.exactEnabled has enough bitmaps + for len(strFilterCfg.exactEnabled) <= int(bitmapIndex) { + strFilterCfg.exactEnabled = append(strFilterCfg.exactEnabled, 0) + } + utils.SetBit(&strFilterCfg.exactEnabled[bitmapIndex], bitOffset) + + // Ensure strFilterCfg.exactMatchIfKeyMissing has enough bitmaps + for len(strFilterCfg.exactMatchIfKeyMissing) <= int(bitmapIndex) { + strFilterCfg.exactMatchIfKeyMissing = append(strFilterCfg.exactMatchIfKeyMissing, 0) + } + utils.SetBit(&strFilterCfg.exactMatchIfKeyMissing[bitmapIndex], bitOffset) + } + for k := range equalities.ExactEqual { + eb := exactBitmaps[k] + for len(exactBitmaps) <= int(bitmapIndex) { + eb = append(eb, ruleBitmap{}) + } + updateRuleBitmap(&eb[bitmapIndex], bitOffset, equal) + exactBitmaps[k] = eb + + // Ensure strFilterCfg.exactEnabled has enough bitmaps + for len(strFilterCfg.exactEnabled) <= int(bitmapIndex) { + strFilterCfg.exactEnabled = append(strFilterCfg.exactEnabled, 0) + } + utils.SetBit(&strFilterCfg.exactEnabled[bitmapIndex], bitOffset) + } + + // Handle prefix matches + prefixBitmaps := getOrCreateRuleBitmapMap(filterMaps.dataPrefixFilters, vKey) + for k := range equalities.PrefixNotEqual { + updatePrefixOrSuffixMatch(prefixBitmaps, k, ruleID, notEqual) + + // Ensure strFilterCfg.prefixEnabled has enough bitmaps + for len(strFilterCfg.prefixEnabled) <= int(bitmapIndex) { + strFilterCfg.prefixEnabled = append(strFilterCfg.prefixEnabled, 0) + } + utils.SetBit(&strFilterCfg.prefixEnabled[bitmapIndex], bitOffset) + + // Ensure strFilterCfg.prefixMatchIfKeyMissing has enough bitmaps + for len(strFilterCfg.prefixMatchIfKeyMissing) <= int(bitmapIndex) { + strFilterCfg.prefixMatchIfKeyMissing = append(strFilterCfg.prefixMatchIfKeyMissing, 0) + } + utils.SetBit(&strFilterCfg.prefixMatchIfKeyMissing[bitmapIndex], bitOffset) + } + for k := range equalities.PrefixEqual { + updatePrefixOrSuffixMatch(prefixBitmaps, k, ruleID, equal) + + // Ensure strFilterCfg.prefixEnabled has enough bitmaps + for len(strFilterCfg.prefixEnabled) <= int(bitmapIndex) { + strFilterCfg.prefixEnabled = append(strFilterCfg.prefixEnabled, 0) + } + utils.SetBit(&strFilterCfg.prefixEnabled[bitmapIndex], bitOffset) + } + + // Handle suffix matches + suffixBitmaps := getOrCreateRuleBitmapMap(filterMaps.dataSuffixFilters, vKey) + for k := range equalities.SuffixNotEqual { + reversed := utils.ReverseString(k) + updatePrefixOrSuffixMatch(suffixBitmaps, reversed, ruleID, notEqual) + + // Ensure strFilterCfg.suffixEnabled has enough bitmaps + for len(strFilterCfg.suffixEnabled) <= int(bitmapIndex) { + strFilterCfg.suffixEnabled = append(strFilterCfg.suffixEnabled, 0) + } + utils.SetBit(&strFilterCfg.suffixEnabled[bitmapIndex], bitOffset) + + // Ensure strFilterCfg.suffixMatchIfKeyMissing has enough bitmaps + for len(strFilterCfg.suffixMatchIfKeyMissing) <= int(bitmapIndex) { + strFilterCfg.suffixMatchIfKeyMissing = append(strFilterCfg.suffixMatchIfKeyMissing, 0) + } + utils.SetBit(&strFilterCfg.suffixMatchIfKeyMissing[bitmapIndex], bitOffset) + } + for k := range equalities.SuffixEqual { + reversed := utils.ReverseString(k) + updatePrefixOrSuffixMatch(suffixBitmaps, reversed, ruleID, equal) + + // Ensure strFilterCfg.suffixEnabled has enough bitmaps + for len(strFilterCfg.suffixEnabled) <= int(bitmapIndex) { + strFilterCfg.suffixEnabled = append(strFilterCfg.suffixEnabled, 0) + } + utils.SetBit(&strFilterCfg.suffixEnabled[bitmapIndex], bitOffset) + } +} + +// updatePrefixOrSuffixMatch handles both prefix and suffix matches by updating the rule bitmap +// for the given pattern and rule ID. It also updates existing entries with matching prefixes. +func updatePrefixOrSuffixMatch( + ruleBitmaps map[string][]ruleBitmap, + pattern string, + ruleID uint, + eqType equalityType, +) { + bitmapIndex := ruleID / 64 + bitOffset := ruleID % 64 + + // Ensure slice exists and has enough capacity + for len(ruleBitmaps[pattern]) <= int(bitmapIndex) { + ruleBitmaps[pattern] = append(ruleBitmaps[pattern], ruleBitmap{}) + } + + newRuleBitmap := ruleBitmaps[pattern][bitmapIndex] + var longestMatch string + var hasMatch bool + + // Iterate through existing entries to find overlapping prefixes + for existingPattern, existingRuleBitmaps := range ruleBitmaps { + if strings.HasPrefix(existingPattern, pattern) { + // Update existing rule bitmap for entries with matching prefix + for len(existingRuleBitmaps) <= int(bitmapIndex) { + existingRuleBitmaps = append(existingRuleBitmaps, ruleBitmap{}) + } + updateRuleBitmap(&existingRuleBitmaps[bitmapIndex], bitOffset, eqType) + ruleBitmaps[existingPattern] = existingRuleBitmaps + } else if strings.HasPrefix(pattern, existingPattern) { + // Find the longest existing prefix match + if !hasMatch || len(existingPattern) > len(longestMatch) { + longestMatch = existingPattern + for len(existingRuleBitmaps) <= int(bitmapIndex) { + existingRuleBitmaps = append(existingRuleBitmaps, ruleBitmap{}) + } + newRuleBitmap = existingRuleBitmaps[bitmapIndex] + hasMatch = true + } + } + } + + // Update the rule bitmap for the new pattern + updateRuleBitmap(&newRuleBitmap, bitOffset, eqType) + ruleBitmaps[pattern][bitmapIndex] = newRuleBitmap +} diff --git a/pkg/policy/policies.go b/pkg/policy/policies.go deleted file mode 100644 index 1334711b47ef..000000000000 --- a/pkg/policy/policies.go +++ /dev/null @@ -1,231 +0,0 @@ -package policy - -import ( - bpf "github.com/aquasecurity/libbpfgo" - - "github.com/aquasecurity/tracee/pkg/events" - "github.com/aquasecurity/tracee/pkg/filters" - "github.com/aquasecurity/tracee/pkg/logger" - "github.com/aquasecurity/tracee/pkg/utils" -) - -const ( - PolicyMax = int(64) - PolicyAll = ^uint64(0) - PolicyNone = uint64(0) -) - -var AlwaysSubmit = events.EventState{ - Submit: PolicyAll, -} - -type policies struct { - bpfInnerMaps map[string]*bpf.BPFMapLow // BPF inner maps - policiesArray [PolicyMax]*Policy // underlying policies array for fast access of empty slots - policiesMapByID map[int]*Policy // all policies map by ID - policiesMapByName map[string]*Policy // all policies map by name - policiesList []*Policy // all policies list - - // computed values - - userlandPolicies []*Policy // reduced list with userland filterable policies (read in a hot path) - uidFilterMin uint64 - uidFilterMax uint64 - pidFilterMin uint64 - pidFilterMax uint64 - uidFilterableInUserland bool - pidFilterableInUserland bool - filterableInUserland bool - containerFiltersEnabled uint64 // bitmap of policies that have at least one container filter type enabled -} - -func NewPolicies() *policies { - return &policies{ - bpfInnerMaps: map[string]*bpf.BPFMapLow{}, - policiesArray: [PolicyMax]*Policy{}, - policiesMapByID: map[int]*Policy{}, - policiesMapByName: map[string]*Policy{}, - policiesList: []*Policy{}, - userlandPolicies: []*Policy{}, - uidFilterMin: filters.MinNotSetUInt, - uidFilterMax: filters.MaxNotSetUInt, - pidFilterMin: filters.MinNotSetUInt, - pidFilterMax: filters.MaxNotSetUInt, - uidFilterableInUserland: false, - pidFilterableInUserland: false, - filterableInUserland: false, - containerFiltersEnabled: 0, - } -} - -// Compile-time check to ensure that Policies implements the Cloner interface -var _ utils.Cloner[*policies] = &policies{} - -func (ps *policies) count() int { - return len(ps.policiesMapByID) -} - -// Deprecated: version returns the version of the Policies. -// Will be removed soon. -func (ps *policies) version() uint16 { - return 1 // version will be removed soon -} - -// withContainerFilterEnabled returns a bitmap of policies that have at least one container filter type enabled. -func (ps *policies) withContainerFilterEnabled() uint64 { - return ps.containerFiltersEnabled -} - -// containerFilterEnabled returns true if at least one policy has a container filter type enabled. -func (ps *policies) containerFilterEnabled() bool { - return ps.withContainerFilterEnabled() > 0 -} - -// set sets a policy in the policies, given an ID. -func set(ps *policies, id int, p *Policy) error { - p.ID = id - ps.policiesArray[id] = p - ps.policiesMapByID[id] = p - ps.policiesMapByName[p.Name] = p - ps.policiesList = append(ps.policiesList, p) - - ps.compute() - - return nil -} - -// add adds a policy. -// The policy ID (index) is automatically assigned to the first empty slot. -func (ps *policies) add(p *Policy) error { - if p == nil { - return PolicyNilError() - } - if ps.count() == PolicyMax { - return PoliciesMaxExceededError() - } - if existing, ok := ps.policiesMapByName[p.Name]; ok { - return PolicyAlreadyExistsError(existing.Name, existing.ID) - } - - // search for the first empty slot - for id, slot := range ps.allFromArray() { - if slot == nil { - return set(ps, id, p) - } - } - - return nil -} - -// set sets a policy. -// A policy overwrite is allowed only if the policy that is going to be overwritten -// has the same ID and name. -func (ps *policies) set(p *Policy) error { - if p == nil { - return PolicyNilError() - } - - id := p.ID - if !isIDInRange(id) { - return PoliciesOutOfRangeError(id) - } - - existing, ok := ps.policiesMapByName[p.Name] - if ok && existing.ID != id { // name already exists with a different ID - return PolicyAlreadyExistsError(existing.Name, existing.ID) - } - - return set(ps, id, p) -} - -// remove removes a policy by name. -func (ps *policies) remove(name string) error { - p, ok := ps.policiesMapByName[name] - if !ok { - return PolicyNotFoundByNameError(name) - } - - id := p.ID - ps.policiesList = append(ps.policiesList[:id], ps.policiesList[id+1:]...) - delete(ps.policiesMapByID, id) - delete(ps.policiesMapByName, p.Name) - ps.policiesArray[id] = nil - - ps.compute() - - return nil -} - -// lookupById returns a policy by ID. -func (ps *policies) lookupById(id int) (*Policy, error) { - if !isIDInRange(id) { - return nil, PoliciesOutOfRangeError(id) - } - - p := ps.policiesArray[id] - if p == nil { - return nil, PolicyNotFoundByIDError(id) - } - return p, nil -} - -// lookupByName returns a policy by name. -func (ps *policies) lookupByName(name string) (*Policy, error) { - if p, ok := ps.policiesMapByName[name]; ok { - return p, nil - } - - return nil, PolicyNotFoundByNameError(name) -} - -// matchedNames returns a list of matched policies names based on -// the given matched bitmap. -func (ps *policies) matchedNames(matched uint64) []string { - names := []string{} - - for _, p := range ps.allFromMap() { - if utils.HasBit(matched, uint(p.ID)) { - names = append(names, p.Name) - } - } - - return names -} - -// allFromMap returns a map of allFromMap policies by ID. -// When iterating, the order is not guaranteed. -func (ps *policies) allFromMap() map[int]*Policy { - return ps.policiesMapByID -} - -// allFromArray returns an slice of the underlying policies array. -// When iterating, the order is guaranteed. -func (ps *policies) allFromArray() []*Policy { - return ps.policiesArray[:] -} - -func isIDInRange(id int) bool { - return id >= 0 && id < PolicyMax -} - -// Clone returns a deep copy of Policies. -func (ps *policies) Clone() *policies { - if ps == nil { - return nil - } - - nPols := NewPolicies() - - // Deep copy of all policies - for _, p := range ps.allFromArray() { - if p == nil { - continue - } - if err := nPols.set(p.Clone()); err != nil { - logger.Errorw("Cloning policy %s: %v", p.Name, err) - return nil - } - } - - return nPols -} diff --git a/pkg/policy/policies_compute.go b/pkg/policy/policies_compute.go deleted file mode 100644 index 66c6a02cfbd2..000000000000 --- a/pkg/policy/policies_compute.go +++ /dev/null @@ -1,152 +0,0 @@ -package policy - -import ( - "github.com/aquasecurity/tracee/pkg/filters" - "github.com/aquasecurity/tracee/pkg/utils" -) - -// compute recalculates values, updates flags, fills the reduced userland map, -// and sets the related bitmap that is used to prevent the iteration of the entire map. -// -// It must be called at every runtime policies changes. -func (ps *policies) compute() { - ps.calculateGlobalMinMax() - ps.updateContainerFilterEnabled() - ps.updateUserlandPolicies() -} - -// calculateGlobalMinMax sets the global min and max, to be checked in kernel, -// of the Minimum and Maximum enabled filters only if scope filter types -// (e.g. BPFUIDFilter) from all policies have both Minimum and Maximum values set. -// -// Policies userland filter flags are also set (e.g. uidFilterableInUserland). -// -// The scope filter types relevant for this function are just UIDFilter and -// PIDFilter. -func (ps *policies) calculateGlobalMinMax() { - var ( - uidMinFilterCount int - uidMaxFilterCount int - uidFilterCount int - pidMinFilterCount int - pidMaxFilterCount int - pidFilterCount int - policyCount int - - uidMinFilterableInUserland bool - uidMaxFilterableInUserland bool - pidMinFilterableInUserland bool - pidMaxFilterableInUserland bool - ) - - for _, p := range ps.allFromMap() { - policyCount++ - - if p.UIDFilter.Enabled() { - uidFilterCount++ - - if p.UIDFilter.Minimum() != filters.MinNotSetUInt { - uidMinFilterCount++ - } - if p.UIDFilter.Maximum() != filters.MaxNotSetUInt { - uidMaxFilterCount++ - } - } - if p.PIDFilter.Enabled() { - pidFilterCount++ - - if p.PIDFilter.Minimum() != filters.MinNotSetUInt { - pidMinFilterCount++ - } - if p.PIDFilter.Maximum() != filters.MaxNotSetUInt { - pidMaxFilterCount++ - } - } - } - - uidMinFilterableInUserland = policyCount > 1 && (uidMinFilterCount != uidFilterCount) - uidMaxFilterableInUserland = policyCount > 1 && (uidMaxFilterCount != uidFilterCount) - pidMinFilterableInUserland = policyCount > 1 && (pidMinFilterCount != pidFilterCount) - pidMaxFilterableInUserland = policyCount > 1 && (pidMaxFilterCount != pidFilterCount) - - // reset global min max - ps.uidFilterMax = filters.MaxNotSetUInt - ps.uidFilterMin = filters.MinNotSetUInt - ps.pidFilterMax = filters.MaxNotSetUInt - ps.pidFilterMin = filters.MinNotSetUInt - - ps.uidFilterableInUserland = uidMinFilterableInUserland || uidMaxFilterableInUserland - ps.pidFilterableInUserland = pidMinFilterableInUserland || pidMaxFilterableInUserland - - if ps.uidFilterableInUserland && ps.pidFilterableInUserland { - // there's no need to iterate filter policies again since - // all uint events will be submitted from ebpf with no regards - - return - } - - // set a reduced range of uint values to be filtered in ebpf - for _, p := range ps.allFromMap() { - if p.UIDFilter.Enabled() { - if !uidMinFilterableInUserland { - ps.uidFilterMin = utils.Min(ps.uidFilterMin, p.UIDFilter.Minimum()) - } - if !uidMaxFilterableInUserland { - ps.uidFilterMax = utils.Max(ps.uidFilterMax, p.UIDFilter.Maximum()) - } - } - if p.PIDFilter.Enabled() { - if !pidMinFilterableInUserland { - ps.pidFilterMin = utils.Min(ps.pidFilterMin, p.PIDFilter.Minimum()) - } - if !pidMaxFilterableInUserland { - ps.pidFilterMax = utils.Max(ps.pidFilterMax, p.PIDFilter.Maximum()) - } - } - } -} - -func (ps *policies) updateContainerFilterEnabled() { - ps.containerFiltersEnabled = 0 - - for _, p := range ps.allFromMap() { - if p.ContainerFilterEnabled() { - utils.SetBit(&ps.containerFiltersEnabled, uint(p.ID)) - } - } -} - -// updateUserlandPolicies sets the userlandPolicies list and the filterableInUserland bitmap. -func (ps *policies) updateUserlandPolicies() { - userlandList := []*Policy{} - ps.filterableInUserland = false - - for _, p := range ps.allFromArray() { - if p == nil { - continue - } - - hasUserlandFilters := false - - // Check filters under Rules - for _, rule := range p.Rules { - if rule.DataFilter.Enabled() || - rule.RetFilter.Enabled() || - rule.ScopeFilter.Enabled() { - hasUserlandFilters = true - break - } - } - - // Check other filters - if hasUserlandFilters || - (p.UIDFilter.Enabled() && ps.uidFilterableInUserland) || - (p.PIDFilter.Enabled() && ps.pidFilterableInUserland) { - // add policy to userland list and set the flag - userlandList = append(userlandList, p) - ps.filterableInUserland = true - } - } - - ps.userlandPolicies = userlandList -} diff --git a/pkg/policy/policies_iterator.go b/pkg/policy/policies_iterator.go deleted file mode 100644 index 5c0ae37ccd40..000000000000 --- a/pkg/policy/policies_iterator.go +++ /dev/null @@ -1,44 +0,0 @@ -package policy - -import "github.com/aquasecurity/tracee/pkg/utils" - -// policiesIterator is an iterator for Policies. -type policiesIterator struct { - policies []*Policy - index int -} - -// HasNext returns true if there are more policies to iterate. -func (i *policiesIterator) HasNext() bool { - return i.index < len(i.policies) -} - -// Next returns the next policy in the iteration. -func (i *policiesIterator) Next() *Policy { - if !i.HasNext() { - return nil - } - - p := i.policies[i.index] - i.index++ - - return p -} - -// createUserlandIterator returns a new iterator for a reduced list of policies -// which must be filtered in userland (ArgFilter, RetFilter, ScopeFilter, -// UIDFilter and PIDFilter). -func (ps *policies) createUserlandIterator() utils.Iterator[*Policy] { - return &policiesIterator{ - policies: ps.userlandPolicies, - index: 0, - } -} - -// createAllIterator returns a new iterator for all policies. -func (ps *policies) createAllIterator() utils.Iterator[*Policy] { - return &policiesIterator{ - policies: ps.policiesList, - index: 0, - } -} diff --git a/pkg/policy/policies_test.go b/pkg/policy/policies_test.go deleted file mode 100644 index 5cc25a14ed75..000000000000 --- a/pkg/policy/policies_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package policy - -import ( - "reflect" - "sync" - "sync/atomic" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/require" - - "github.com/aquasecurity/tracee/pkg/events" - "github.com/aquasecurity/tracee/pkg/filters" - "github.com/aquasecurity/tracee/pkg/filters/sets" -) - -func TestPoliciesClone(t *testing.T) { - t.Parallel() - - ps := NewPolicies() - - p1 := NewPolicy() - p1.Name = "p1" - err := p1.PIDFilter.Parse("=1") - require.NoError(t, err) - - p2 := NewPolicy() - p2.Name = "p2" - err = p2.UIDFilter.Parse("=2") - require.NoError(t, err) - - // Initialize the rule first - p2.Rules[events.Read] = RuleData{ - EventID: events.Read, - DataFilter: filters.NewDataFilter(), - RetFilter: filters.NewIntFilter(), - ScopeFilter: filters.NewScopeFilter(), - } - err = p2.Rules[events.Read].DataFilter.Parse(events.Read, "fd", "=dataval") - require.NoError(t, err) - - err = ps.add(p1) - require.NoError(t, err) - err = ps.add(p2) - require.NoError(t, err) - - copy := ps.Clone() - - opt1 := cmp.AllowUnexported( - policies{}, - sync.Mutex{}, - sync.RWMutex{}, - atomic.Int32{}, - filters.StringFilter{}, - filters.UIntFilter[uint32]{}, - filters.UIntFilter[uint64]{}, - filters.BoolFilter{}, - filters.IntFilter[int64]{}, - filters.DataFilter{}, - filters.ScopeFilter{}, - filters.ProcessTreeFilter{}, - filters.BinaryFilter{}, - sets.PrefixSet{}, - sets.SuffixSet{}, - filters.KernelDataFilter{}, - ) - opt2 := cmp.FilterPath( - func(p cmp.Path) bool { - // ignore the function field - // https://cs.opensource.google/go/go/+/refs/tags/go1.22.0:src/reflect/deepequal.go;l=187 - return p.Last().Type().Kind() == reflect.Func - }, - cmp.Ignore(), - ) - if !cmp.Equal(ps, copy, opt1, opt2) { - diff := cmp.Diff(ps, copy, opt1, opt2) - t.Errorf("Clone did not produce an identical copy\ndiff: %s", diff) - } - - // ensure that changes to the copy do not affect the original - p3 := NewPolicy() - p3.Name = "p3" - err = p3.CommFilter.Parse("=comm") - require.NoError(t, err) - err = copy.add(p3) - require.NoError(t, err) - - p1, err = copy.lookupByName("p1") - require.NoError(t, err) - p1.Name = "p1-modified" - - if cmp.Equal(ps, copy, opt1, opt2) { - t.Errorf("Changes to copied policy affected the original: %+v", ps) - } -} diff --git a/pkg/policy/policy.go b/pkg/policy/policy.go index 79ad3846e7d2..bb4c7eaa74e8 100644 --- a/pkg/policy/policy.go +++ b/pkg/policy/policy.go @@ -7,7 +7,6 @@ import ( ) type Policy struct { - ID int Name string UIDFilter *filters.UIntFilter[uint32] PIDFilter *filters.UIntFilter[uint32] @@ -37,7 +36,6 @@ var _ utils.Cloner[*Policy] = &Policy{} func NewPolicy() *Policy { return &Policy{ - ID: 0, Name: "", UIDFilter: filters.NewUInt32Filter(), PIDFilter: filters.NewUInt32Filter(), @@ -70,7 +68,6 @@ func (p *Policy) Clone() *Policy { n := NewPolicy() - n.ID = p.ID n.Name = p.Name n.UIDFilter = p.UIDFilter.Clone() n.PIDFilter = p.PIDFilter.Clone() diff --git a/pkg/policy/policy_manager.go b/pkg/policy/policy_manager.go index 40b6539eab88..7ebed717d02c 100644 --- a/pkg/policy/policy_manager.go +++ b/pkg/policy/policy_manager.go @@ -13,6 +13,7 @@ import ( "github.com/aquasecurity/tracee/pkg/events" "github.com/aquasecurity/tracee/pkg/events/data" "github.com/aquasecurity/tracee/pkg/events/dependencies" + "github.com/aquasecurity/tracee/pkg/filters" "github.com/aquasecurity/tracee/pkg/logger" "github.com/aquasecurity/tracee/pkg/pcaps" "github.com/aquasecurity/tracee/pkg/proctree" @@ -25,50 +26,69 @@ type ManagerConfig struct { CaptureConfig config.CaptureConfig } -// Manager is a thread-safe struct that manages the enabled policies for each rule -type Manager struct { - mu sync.RWMutex - cfg ManagerConfig +// Manager is responsible for managing all loaded policies and generating lists of rules grouped by event ID. +type PolicyManager struct { + policies map[string]*Policy // Map of policies by name + rules map[events.ID]EventRules // Map of rules by event ID + bootstrapPolicy *Policy // Holds the bootstrap policy evtsDepsManager *dependencies.Manager - ps *policies - rules map[events.ID]*eventFlags + bpfInnerMaps map[string]*bpf.BPFMapLow // TODO: move this to ebpf related code + mu sync.RWMutex // Read/Write Mutex to protect concurrent access + cfg ManagerConfig + fMaps *filterMaps +} + +// EventRules holds information about a specific event. +type EventRules struct { + Rules []*EventRule // List of rules associated with this event + UserlandRules []*EventRule // List of rules with userland filters enabled + enabled bool // Flag indicating whether the event is enabled. TODO: move to events manager + rulesVersion uint16 // Version of the rules for this event (for future updates) + rulesCount uint // The total number of rules for this event + ruleIDToEventRule map[uint]*EventRule // Map from RuleID to EventRule for fast lookup + containerFilteredRules []uint64 // Bitmaps to track container-filtered rules + hasOverflow bool // Flag to indicate if there are more than 64 rules +} + +type RuleSelectionType int + +const ( + NotSelected RuleSelectionType = iota + SelectedByUser + SelectedByDependency + SelectedByBootstrap +) + +// EventRule represents a single rule within an event's rule set. +type EventRule struct { + ID uint // Unique ID of the rule within the event - used for bitmap position + Data *RuleData // Data associated with the rule + Policy *Policy // Reference to the policy where the rule was defined + SelectionType RuleSelectionType // How the rule was selected: by user, by dependency, or by bootstrap policy + DerivedRuleID uint // For dependency rules, ID of the rule that caused the dependency } func NewManager( cfg ManagerConfig, - depsManager *dependencies.Manager, + evtsDepsManager *dependencies.Manager, initialPolicies ...*Policy, -) (*Manager, error) { - if depsManager == nil { +) (*PolicyManager, error) { + if evtsDepsManager == nil { panic("evtDepsManager is nil") } - ps := NewPolicies() - for _, p := range initialPolicies { - if err := ps.set(p); err != nil { - logger.Errorw("failed to set initial policy", "error", err) - } - } - - m := &Manager{ + pm := &PolicyManager{ + policies: make(map[string]*Policy), + rules: make(map[events.ID]EventRules), + evtsDepsManager: evtsDepsManager, + bpfInnerMaps: make(map[string]*bpf.BPFMapLow), mu: sync.RWMutex{}, cfg: cfg, - evtsDepsManager: depsManager, - ps: ps, - rules: make(map[events.ID]*eventFlags), } - if err := m.initialize(); err != nil { - return nil, errfmt.Errorf("failed to initialize policy manager: %s", err) - } - - return m, nil -} - -func (m *Manager) subscribeDependencyHandlers() { - // TODO: As dynamic event addition or removal becomes a thing, we should subscribe all the watchers - // before selecting them. There is no reason to select the event in the New function anyhow. - m.evtsDepsManager.SubscribeAdd( + // Subscribe to event removals to clean up policy rules when events become unavailable + // (e.g., due to missing kernel symbol dependencies) + evtsDepsManager.SubscribeRemove( dependencies.EventNodeType, func(node interface{}) []dependencies.Action { eventNode, ok := node.(*dependencies.EventNode) @@ -77,123 +97,73 @@ func (m *Manager) subscribeDependencyHandlers() { return nil } - m.addDependencyEventToRules(eventNode.GetID(), eventNode.GetDependents()) + pm.removeEventFromRules(eventNode.GetID()) return nil }) - m.evtsDepsManager.SubscribeRemove( - dependencies.EventNodeType, - func(node interface{}) []dependencies.Action { - eventNode, ok := node.(*dependencies.EventNode) - if !ok { - logger.Errorw("Got node from type not requested") - return nil - } - - m.removeEventFromRules(eventNode.GetID()) - - return nil - }) -} - -// AddDependencyEventToRules adds for management an event that is a dependency of other events. -// The difference from chosen events is that it doesn't affect its eviction. -func (m *Manager) addDependencyEventToRules(evtID events.ID, dependentEvts []events.ID) { - var newSubmit uint64 - var reqBySig bool - - for _, dependentEvent := range dependentEvts { - currentFlags, ok := m.rules[dependentEvent] - if ok { - newSubmit |= currentFlags.policiesSubmit - reqBySig = reqBySig || events.Core.GetDefinitionByID(dependentEvent).IsSignature() - } - } - - m.addEventFlags( - evtID, - newEventFlags( - eventFlagsWithSubmit(newSubmit), - eventFlagsWithRequiredBySignature(reqBySig), - eventFlagsWithEnabled(true), - ), - ) -} -func (m *Manager) addEventFlags(id events.ID, chosenFlags *eventFlags) { - currentFlags, ok := m.rules[id] - if ok { - currentFlags.policiesSubmit |= chosenFlags.policiesSubmit - currentFlags.policiesEmit |= chosenFlags.policiesEmit - currentFlags.requiredBySignature = chosenFlags.requiredBySignature - currentFlags.enabled = chosenFlags.enabled - return + // Create and add the bootstrap policy with conditional rules + pm.bootstrapPolicy = createBootstrapPolicy(cfg) + if err := pm.AddPolicy(pm.bootstrapPolicy); err != nil { + return nil, errfmt.Errorf("failed to add bootstrap policy: %s", err) } - m.rules[id] = newEventFlags( - eventFlagsWithSubmit(chosenFlags.policiesSubmit), - eventFlagsWithEmit(chosenFlags.policiesEmit), - eventFlagsWithRequiredBySignature(chosenFlags.requiredBySignature), - eventFlagsWithEnabled(chosenFlags.enabled), - ) -} - -func (m *Manager) addDependenciesToRulesRecursive(eventNode *dependencies.EventNode) { - eventID := eventNode.GetID() - for _, dependencyEventID := range eventNode.GetDependencies().GetIDs() { - m.addDependencyEventToRules(dependencyEventID, []events.ID{eventID}) - dependencyNode, err := m.evtsDepsManager.GetEvent(dependencyEventID) - if err == nil { - m.addDependenciesToRulesRecursive(dependencyNode) + for _, p := range initialPolicies { + if err := pm.AddPolicy(p); err != nil { + logger.Errorw("failed to add initial policy", "error", err) } } -} -func (m *Manager) selectEvent(eventID events.ID, chosenState *eventFlags) { - m.addEventFlags(eventID, chosenState) - eventNode, err := m.evtsDepsManager.SelectEvent(eventID) - if err != nil { - logger.Errorw("Event selection failed", - "event", events.Core.GetDefinitionByID(eventID).GetName()) - return + // TODO: update required capabilities on policy addition/removal + if err := pm.updateCapsForSelectedEvents(); err != nil { + return nil, errfmt.Errorf("failed to set required capabilitis: %v", err) } - m.addDependenciesToRulesRecursive(eventNode) + return pm, nil } -func (m *Manager) removeEventFromRules(evtID events.ID) { +func (pm *PolicyManager) removeEventFromRules(evtID events.ID) { logger.Debugw("Remove event from rules", "event", events.Core.GetDefinitionByID(evtID).GetName()) - delete(m.rules, evtID) + delete(pm.rules, evtID) } -func (m *Manager) selectMandatoryEvents() { - // Initialize events state with mandatory events (TODO: review this need for sched exec) +// createBootstrapPolicy creates the bootstrap policy with rules based on the provided configuration. +// bootsrap policy is an internal policy to ensure essential events are always selected. +func createBootstrapPolicy(cfg ManagerConfig) *Policy { + rules := make(map[events.ID]RuleData) + + // Helper function to create RuleData with default filters + newRuleData := func(eventID events.ID) RuleData { + return RuleData{ + EventID: eventID, + DataFilter: filters.NewDataFilter(), + RetFilter: filters.NewIntFilter(), + ScopeFilter: filters.NewScopeFilter(), + } + } - m.selectEvent(events.SchedProcessFork, newEventFlags()) - m.selectEvent(events.SchedProcessExec, newEventFlags()) - m.selectEvent(events.SchedProcessExit, newEventFlags()) + // Always-selected events: + rules[events.SchedProcessExec] = newRuleData(events.SchedProcessExec) + rules[events.SchedProcessFork] = newRuleData(events.SchedProcessFork) + rules[events.SchedProcessExit] = newRuleData(events.SchedProcessExit) // Control Plane Events + rules[events.SignalCgroupMkdir] = newRuleData(events.SignalCgroupMkdir) + rules[events.SignalCgroupRmdir] = newRuleData(events.SignalCgroupRmdir) - m.selectEvent(events.SignalCgroupMkdir, newEventFlags(eventFlagsWithSubmit(PolicyAll))) - m.selectEvent(events.SignalCgroupRmdir, newEventFlags(eventFlagsWithSubmit(PolicyAll))) -} - -func (m *Manager) selectConfiguredEvents() { // Control Plane Process Tree Events - pipeEvts := func() { - m.selectEvent(events.SchedProcessFork, newEventFlags(eventFlagsWithSubmit(PolicyAll))) - m.selectEvent(events.SchedProcessExec, newEventFlags(eventFlagsWithSubmit(PolicyAll))) - m.selectEvent(events.SchedProcessExit, newEventFlags(eventFlagsWithSubmit(PolicyAll))) + rules[events.SchedProcessFork] = newRuleData(events.SchedProcessFork) + rules[events.SchedProcessExec] = newRuleData(events.SchedProcessExec) + rules[events.SchedProcessExit] = newRuleData(events.SchedProcessExit) } signalEvts := func() { - m.selectEvent(events.SignalSchedProcessFork, newEventFlags(eventFlagsWithSubmit(PolicyAll))) - m.selectEvent(events.SignalSchedProcessExec, newEventFlags(eventFlagsWithSubmit(PolicyAll))) - m.selectEvent(events.SignalSchedProcessExit, newEventFlags(eventFlagsWithSubmit(PolicyAll))) + rules[events.SignalSchedProcessFork] = newRuleData(events.SignalSchedProcessFork) + rules[events.SignalSchedProcessExec] = newRuleData(events.SignalSchedProcessExec) + rules[events.SignalSchedProcessExit] = newRuleData(events.SignalSchedProcessExit) } - switch m.cfg.ProcTreeConfig.Source { + switch cfg.ProcTreeConfig.Source { case proctree.SourceBoth: pipeEvts() signalEvts() @@ -204,82 +174,50 @@ func (m *Manager) selectConfiguredEvents() { } // DNS Cache events - - if m.cfg.DNSCacheConfig.Enable { - m.selectEvent(events.NetPacketDNS, newEventFlags(eventFlagsWithSubmit(PolicyAll))) + if cfg.DNSCacheConfig.Enable { + rules[events.NetPacketDNS] = newRuleData(events.NetPacketDNS) } - // Pseudo events added by capture (if enabled by the user) - - getCaptureEventsFlags := func(cfg config.CaptureConfig) map[events.ID]*eventFlags { - captureEvents := make(map[events.ID]*eventFlags) - - // INFO: All capture events should be placed, at least for now, to all matched policies, or else - // the event won't be set to matched policy in eBPF and should_submit() won't submit the capture - // event to userland. - - if cfg.Exec { - captureEvents[events.CaptureExec] = newEventFlags(eventFlagsWithSubmit(PolicyAll)) - } - if cfg.FileWrite.Capture { - captureEvents[events.CaptureFileWrite] = newEventFlags(eventFlagsWithSubmit(PolicyAll)) - } - if cfg.FileRead.Capture { - captureEvents[events.CaptureFileRead] = newEventFlags(eventFlagsWithSubmit(PolicyAll)) - } - if cfg.Module { - captureEvents[events.CaptureModule] = newEventFlags(eventFlagsWithSubmit(PolicyAll)) - } - if cfg.Mem { - captureEvents[events.CaptureMem] = newEventFlags(eventFlagsWithSubmit(PolicyAll)) - } - if cfg.Bpf { - captureEvents[events.CaptureBpf] = newEventFlags(eventFlagsWithSubmit(PolicyAll)) - } - if pcaps.PcapsEnabled(cfg.Net) { - captureEvents[events.CaptureNetPacket] = newEventFlags(eventFlagsWithSubmit(PolicyAll)) - } - - return captureEvents + // Capture events (selected based on configuration) + if cfg.CaptureConfig.Exec { + rules[events.CaptureExec] = newRuleData(events.CaptureExec) } - - for id, flags := range getCaptureEventsFlags(m.cfg.CaptureConfig) { - m.selectEvent(id, flags) + if cfg.CaptureConfig.FileWrite.Capture { + rules[events.CaptureFileWrite] = newRuleData(events.CaptureFileWrite) } -} - -func (m *Manager) selectUserEvents() { - // Events chosen by the user - userEvents := make(map[events.ID]*eventFlags) - - for _, p := range m.ps.policiesList { - pId := p.ID - for eId := range p.Rules { - ef, ok := userEvents[eId] - if !ok { - ef = newEventFlags(eventFlagsWithEnabled(true)) - userEvents[eId] = ef - } - - ef.enableEmission(pId) - ef.enableSubmission(pId) - } + if cfg.CaptureConfig.FileRead.Capture { + rules[events.CaptureFileRead] = newRuleData(events.CaptureFileRead) } - - for id, flags := range userEvents { - m.selectEvent(id, flags) + if cfg.CaptureConfig.Module { + rules[events.CaptureModule] = newRuleData(events.CaptureModule) } + if cfg.CaptureConfig.Mem { + rules[events.CaptureMem] = newRuleData(events.CaptureMem) + } + if cfg.CaptureConfig.Bpf { + rules[events.CaptureBpf] = newRuleData(events.CaptureBpf) + } + if pcaps.PcapsEnabled(cfg.CaptureConfig.Net) { + rules[events.CaptureNetPacket] = newRuleData(events.CaptureNetPacket) + } + + // Create policy with initialized filters + p := NewPolicy() + p.Name = "__internal_bootstrap__" + p.Rules = rules + + return p } -func (m *Manager) updateCapsForSelectedEvents() error { +func (pm *PolicyManager) updateCapsForSelectedEvents() error { // Update capabilities rings with all events dependencies caps := capabilities.GetInstance() - for id := range m.rules { + for id := range pm.rules { if !events.Core.IsDefined(id) { return errfmt.Errorf("event %d is not defined", id) } - depsNode, err := m.evtsDepsManager.GetEvent(id) + depsNode, err := pm.evtsDepsManager.GetEvent(id) if err == nil { deps := depsNode.GetDependencies() evtCaps := deps.GetCapabilities() @@ -297,308 +235,809 @@ func (m *Manager) updateCapsForSelectedEvents() error { return nil } -func (m *Manager) initialize() error { - m.subscribeDependencyHandlers() - m.selectMandatoryEvents() - m.selectConfiguredEvents() - m.selectUserEvents() - err := m.updateCapsForSelectedEvents() - if err != nil { - return errfmt.WrapError(err) +// version returns the version of the Policies. +func (pm *PolicyManager) version() uint16 { + return 1 +} + +// AddPolicyOption is a functional option for the AddPolicy method. +type AddPolicyOption func(*addPolicyOptions) + +// addPolicyOptions contains the options for adding a policy. +type addPolicyOptions struct { + override bool +} + +// WithOverride is an AddPolicyOption that allows overriding an existing policy. +func WithOverride() AddPolicyOption { + return func(opts *addPolicyOptions) { + opts.override = true + } +} + +// AddPolicy adds a new policy or updates an existing policy in the PolicyManager. +func (pm *PolicyManager) AddPolicy(policy *Policy, opts ...AddPolicyOption) error { + if policy == nil { + return PolicyNilError() + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + options := addPolicyOptions{ + override: false, // Default behavior: no override + } + for _, opt := range opts { + opt(&options) } + if _, exists := pm.policies[policy.Name]; exists && !options.override { + return PolicyAlreadyExistsError(policy.Name) + } + + // Create a temporary copy of the relevant parts of the PolicyManager's state + tempPolicies := make(map[string]*Policy) + for k, v := range pm.policies { + tempPolicies[k] = v + } + tempRules := make(map[events.ID]EventRules) + for k, v := range pm.rules { + tempRules[k] = deepCopyEventRules(v) + } + + // Perform operations on the temporary copies + tempPolicies[policy.Name] = policy // Add or update the policy + + // Update event selection in the dependency manager + // This should be done for all selected events BEFORE updating EventRules (done below) + for eventID := range policy.Rules { + // Select event + _, err := pm.evtsDepsManager.SelectEvent(eventID) + if err != nil { + eventName := events.Core.GetDefinitionByID(eventID).GetName() + return SelectEventError(eventName) + } + } + + // Update EventRules for each event affected by the policy + for eventID := range policy.Rules { + if err := pm.updateRulesForEvent(eventID, tempRules, tempPolicies); err != nil { + return errfmt.WrapError(err) + } + } + + // If all operations are successful, commit the changes to the actual PolicyManager + pm.policies = tempPolicies + pm.rules = tempRules + + // TODO: Notify listeners (if any) about the policy change + return nil } -// IsEnabled tests if a event, or a policy per event is enabled (in the future it will also check if a policy is enabled) -// TODO: add metrics about an event being enabled/disabled, or a policy being enabled/disabled? -func (m *Manager) IsEnabled(matchedPolicies uint64, id events.ID) bool { - m.mu.RLock() - defer m.mu.RUnlock() +// RemovePolicy removes a policy from the PolicyManager. +func (pm *PolicyManager) RemovePolicy(policyName string) error { + if pm.bootstrapPolicy != nil && policyName == pm.bootstrapPolicy.Name { + return errfmt.Errorf("cannot remove bootstrap policy") + } - if !m.isEventEnabled(id) { - return false + pm.mu.Lock() + defer pm.mu.Unlock() + + policyToRemove, exists := pm.policies[policyName] + if !exists { + return PolicyNotFoundByNameError(policyName) } - return m.isRuleEnabled(matchedPolicies, id) -} + // Create temporary copies for rollback + tempPolicies := make(map[string]*Policy) + for k, v := range pm.policies { + tempPolicies[k] = v + } + tempRules := make(map[events.ID]EventRules) + for k, v := range pm.rules { + tempRules[k] = deepCopyEventRules(v) + } -// IsRuleEnabled returns true if a given event policy is enabled for a given rule -func (m *Manager) IsRuleEnabled(matchedPolicies uint64, id events.ID) bool { - m.mu.RLock() - defer m.mu.RUnlock() + // Perform operations on the temporary copies + delete(tempPolicies, policyName) // Remove the policy + + // Update event selection in the dependency manager + // This should be done for all selected events BEFORE updating EventRules (done below) + for eventID := range policyToRemove.Rules { + // Check if the event is still selected by any remaining policy + isSelected := false + for _, p := range tempPolicies { + if _, ok := p.Rules[eventID]; ok { + isSelected = true + break + } + } - return m.isRuleEnabled(matchedPolicies, id) -} + // Only unselect the event if it's not selected by any other policy + if !isSelected { + pm.evtsDepsManager.UnselectEvent(eventID) + delete(tempRules, eventID) // Remove unselected event from tempRules + } + } -// not synchronized, use IsRuleEnabled instead -func (m *Manager) isRuleEnabled(matchedPolicies uint64, id events.ID) bool { - flags, ok := m.rules[id] - if !ok { - return false + // Update EventRules for each event affected by the policy + for eventID := range policyToRemove.Rules { + // Skip if event was unselected + if _, ok := tempRules[eventID]; !ok { + continue + } + if err := pm.updateRulesForEvent(eventID, tempRules, tempPolicies); err != nil { + return errfmt.WrapError(err) + } } - return flags.policiesEmit&matchedPolicies != 0 -} + // Commit the changes to the actual PolicyManager + pm.policies = tempPolicies + pm.rules = tempRules -// IsEventEnabled returns true if a given event policy is enabled for a given rule -func (m *Manager) IsEventEnabled(id events.ID) bool { - m.mu.RLock() - defer m.mu.RUnlock() + // TODO: Notify listeners (if any) about the policy removal - return m.isEventEnabled(id) + return nil } -// not synchronized, use IsEventEnabled instead -func (m *Manager) isEventEnabled(id events.ID) bool { - flags, ok := m.rules[id] - if !ok { - return false +// deepCopyEventRules creates a deep copy of an EventRules struct. +func deepCopyEventRules(original EventRules) EventRules { + copied := EventRules{ + rulesVersion: original.rulesVersion, + rulesCount: original.rulesCount, + containerFilteredRules: original.containerFilteredRules, + enabled: original.enabled, + Rules: make([]*EventRule, len(original.Rules)), + UserlandRules: make([]*EventRule, len(original.UserlandRules)), + ruleIDToEventRule: make(map[uint]*EventRule, len(original.ruleIDToEventRule)), } - return flags.enabled + // Deep copy Rules + for i, rule := range original.Rules { + copied.Rules[i] = &EventRule{ + ID: rule.ID, + Data: rule.Data, // Data pointers can be shared + Policy: rule.Policy, // Policy pointers can be shared + SelectionType: rule.SelectionType, + DerivedRuleID: rule.DerivedRuleID, + } + } + + // Deep copy UserlandRules + for i, rule := range original.UserlandRules { + copied.UserlandRules[i] = &EventRule{ + ID: rule.ID, + Data: rule.Data, // Data pointers can be shared + Policy: rule.Policy, // Policy pointers can be shared + SelectionType: rule.SelectionType, + DerivedRuleID: rule.DerivedRuleID, + } + } + + // Deep copy ruleIDToEventRule + for k, v := range original.ruleIDToEventRule { + // Find the corresponding rule in the copied.Rules slice + for _, copiedRule := range copied.Rules { + if copiedRule.ID == v.ID { + copied.ruleIDToEventRule[k] = copiedRule + break + } + } + } + + return copied } -// EnableRule enables a rule for a given event policy -func (m *Manager) EnableRule(policyId int, id events.ID) error { - if !isIDInRange(policyId) { - return PoliciesOutOfRangeError(policyId) +// updateRulesForEvent rebuilds the EventRules for the given eventID in the tempRules map. +// It gathers applicable rules from tempPolicies, assigns RuleIDs, and increments the rules version. +func (pm *PolicyManager) updateRulesForEvent(eventID events.ID, tempRules map[events.ID]EventRules, tempPolicies map[string]*Policy) error { + if !events.Core.IsDefined(eventID) { + return errfmt.Errorf("event %d is not defined", eventID) + } + if tempRules == nil || tempPolicies == nil { + return errfmt.Errorf("nil maps provided") } - m.mu.Lock() - defer m.mu.Unlock() + var rules, userlandRules, existingDepRules []*EventRule + ruleIDToEventRule := make(map[uint]*EventRule) + ruleIDCounter := uint(0) + var containerFilteredRules []uint64 - flags, ok := m.rules[id] - if !ok { - // if you enabling/disabling a rule for an event that - // was not enabled/disabled yet, we assume the event should be enabled - flags = newEventFlags( - eventFlagsWithEnabled(true), - ) - m.rules[id] = flags + rulesVersion := uint16(0) + enabled := true // Default to true for new rules + hasOverflow := false // Initialize hasOverflow flag + + if existingEventRules, ok := tempRules[eventID]; ok { + rulesVersion = existingEventRules.rulesVersion + enabled = existingEventRules.enabled // Preserve existing enabled state + + // Save existing dependency rules (created by rules with event that depend on this event) + for _, rule := range existingEventRules.Rules { + if rule.SelectionType == SelectedByDependency { + existingDepRules = append(existingDepRules, rule) + } + } } - flags.enableEmission(policyId) + eventNode, err := pm.evtsDepsManager.GetEvent(eventID) + if err != nil { + return errfmt.WrapError(err) + } + + // Gather rules from all policies that apply to this event + for _, policy := range tempPolicies { + ruleData, ok := policy.Rules[eventID] + if !ok { + continue // This policy doesn't have rules for this event + } + + rule := &EventRule{ + ID: ruleIDCounter, + Data: &ruleData, + Policy: policy, + SelectionType: SelectedByUser, + } + + if policy == pm.bootstrapPolicy { + rule.SelectionType = SelectedByBootstrap + } + + rules = append(rules, rule) + ruleIDToEventRule[ruleIDCounter] = rule + ruleIDCounter++ + + // Add dependency rules for this specific rule + if err := pm.addTransitiveDependencyRules(eventNode, tempRules, make(map[events.ID]bool), 0, rule); err != nil { + return errfmt.WrapError(err) + } + + // Update containerFilteredRules bitmap + if policy.ContainerFilterEnabled() { + bitmapIndex := rule.ID / 64 + bitOffset := rule.ID % 64 + + // Ensure containerFilteredRules has enough bitmaps + for len(containerFilteredRules) <= int(bitmapIndex) { + containerFilteredRules = append(containerFilteredRules, 0) + } + + utils.SetBit(&containerFilteredRules[bitmapIndex], uint(bitOffset)) + } + + // Update userlandFilterableRules bitmap + if isRuleFilterableInUserland(rule) { + userlandRules = append(userlandRules, rule) + } + } + + // Add remaining dependency rules to final rules list + for _, depRule := range existingDepRules { + rules = append(rules, depRule) + ruleIDToEventRule[ruleIDCounter] = depRule + depRule.ID = ruleIDCounter + ruleIDCounter++ + } + + // Update hasOverflow flag if necessary + // Set overflow when the next rule would have ID >= 64 (more than 64 total rules) + if ruleIDCounter >= 64 { + hasOverflow = true + } + + // Update the EventRules for the event in the temporary map + tempRules[eventID] = EventRules{ + Rules: rules, + UserlandRules: userlandRules, + ruleIDToEventRule: ruleIDToEventRule, + rulesVersion: rulesVersion + 1, + rulesCount: ruleIDCounter, + containerFilteredRules: containerFilteredRules, + enabled: enabled, + hasOverflow: hasOverflow, + } return nil } -// DisableRule disables a rule for a given event policy -func (m *Manager) DisableRule(policyId int, id events.ID) error { - if !isIDInRange(policyId) { - return PoliciesOutOfRangeError(policyId) +// addTransitiveDependencyRules recursively adds dependency rules for the given event and all its transitive dependencies. +func (pm *PolicyManager) addTransitiveDependencyRules( + eventNode *dependencies.EventNode, + tempRules map[events.ID]EventRules, + visited map[events.ID]bool, + depth int, + parentRule *EventRule, +) error { + const maxDepth = 5 + + if depth > maxDepth { + return errfmt.Errorf("max dependency depth exceeded") } - m.mu.Lock() - defer m.mu.Unlock() - - flags, ok := m.rules[id] - if !ok { - // if you enabling/disabling a rule for an event that - // was not enabled/disabled yet, we assume the event should be enabled - flags = newEventFlags( - eventFlagsWithEnabled(true), - ) - m.rules[id] = flags + eventID := eventNode.GetID() + if visited[eventID] { + return errfmt.Errorf("circular dependency detected") } + visited[eventID] = true + defer delete(visited, eventID) + + for _, depID := range eventNode.GetDependencies().GetIDs() { + eventRules, ok := tempRules[depID] + if !ok { + eventRules = EventRules{ + Rules: make([]*EventRule, 0), + UserlandRules: make([]*EventRule, 0), + ruleIDToEventRule: make(map[uint]*EventRule), + enabled: true, + } + } - flags.disableEmission(policyId) + // Check if dependency rule already exists + dependencyRuleExists := false + for _, existingRule := range eventRules.Rules { + if existingRule.SelectionType == SelectedByDependency && + existingRule.Policy == parentRule.Policy && + existingRule.Data == parentRule.Data { + dependencyRuleExists = true + break + } + } + + if !dependencyRuleExists { + // Create dependency rule using parent's data and policy context + // This allows tracking which rule/policy caused this dependency + rule := &EventRule{ + ID: eventRules.rulesCount, + Data: parentRule.Data, + Policy: parentRule.Policy, + SelectionType: SelectedByDependency, + DerivedRuleID: parentRule.ID, + } + + eventRules.Rules = append(eventRules.Rules, rule) + eventRules.ruleIDToEventRule[rule.ID] = rule + + // Add to userland rules if parent has userland filters + if isRuleFilterableInUserland(rule) { + eventRules.UserlandRules = append(eventRules.UserlandRules, rule) + } + + // Update container filter bitmap if parent has container filters + if rule.Policy.ContainerFilterEnabled() { + bitmapIndex := rule.ID / 64 + bitOffset := rule.ID % 64 + + // Ensure containerFilteredRules has enough bitmaps + for len(eventRules.containerFilteredRules) <= int(bitmapIndex) { + eventRules.containerFilteredRules = append(eventRules.containerFilteredRules, 0) + } + + utils.SetBit(&eventRules.containerFilteredRules[bitmapIndex], uint(bitOffset)) + } + + eventRules.rulesCount++ + tempRules[depID] = eventRules + } + + depNode, err := pm.evtsDepsManager.GetEvent(depID) + if err != nil { + return err + } + + // Recursively add dependency rules for the dependencies of the dependency + if err := pm.addTransitiveDependencyRules(depNode, tempRules, visited, depth+1, parentRule); err != nil { + return err + } + } return nil } -// EnableEvent enables a given event -func (m *Manager) EnableEvent(id events.ID) { - m.mu.Lock() - defer m.mu.Unlock() +// isRuleFilterableInUserland checks if a rule is filterable in userland. +func isRuleFilterableInUserland(rule *EventRule) bool { + if rule == nil { + return false + } - flags, ok := m.rules[id] - if !ok { - m.rules[id] = newEventFlags( - eventFlagsWithEnabled(true), - ) - return + // Check rule.Data and its filters + if rule.Data != nil { + // TODO: if kernel filter is enabled for the data filter, don't consider it filterable in userland + if (rule.Data.DataFilter != nil && rule.Data.DataFilter.Enabled()) || + (rule.Data.RetFilter != nil && rule.Data.RetFilter.Enabled()) || + (rule.Data.ScopeFilter != nil && rule.Data.ScopeFilter.Enabled()) { + return true + } + } + + // Check policy-level filters + if rule.Policy != nil { + if (rule.Policy.UIDFilter != nil && rule.Policy.UIDFilter.Enabled()) && + ((rule.Policy.UIDFilter.Minimum() != filters.MinNotSetUInt) || + (rule.Policy.UIDFilter.Maximum() != filters.MaxNotSetUInt)) { + return true + } + + if rule.Policy.PIDFilter != nil && rule.Policy.PIDFilter.Enabled() && + ((rule.Policy.PIDFilter.Minimum() != filters.MinNotSetUInt) || + (rule.Policy.PIDFilter.Maximum() != filters.MaxNotSetUInt)) { + return true + } } - flags.enableEvent() + return false } -// DisableEvent disables a given event -func (m *Manager) DisableEvent(id events.ID) { - m.mu.Lock() - defer m.mu.Unlock() +// lookupPolicyByName returns a policy by name. +func (pm *PolicyManager) LookupPolicyByName(name string) (*Policy, error) { + pm.mu.RLock() + defer pm.mu.RUnlock() - flags, ok := m.rules[id] - if !ok { - m.rules[id] = newEventFlags( - eventFlagsWithEnabled(false), - ) - return + if p, ok := pm.policies[name]; ok { + return p, nil } - flags.disableEvent() + return nil, PolicyNotFoundByNameError(name) } +// GetRules returns the Rules slice for a given event ID. // -// Rules -// +// Warning: This function returns a direct reference to the internal Rules slice. +// While the implementation ensures that the returned slice will not be modified +// directly, it may be replaced entirely by concurrent updates to the PolicyManager. +// The caller MUST NOT modify the returned slice and should be aware that the +// slice may become stale if the PolicyManager's state is changed concurrently. +// It is the caller's responsibility to ensure that they are not relying on +// the slice to remain unchanged across calls to AddPolicy, RemovePolicy, or +// any other function that might update the PolicyManager's rules. +func (pm *PolicyManager) GetRules(eventID events.ID) []*EventRule { + pm.mu.RLock() + defer pm.mu.RUnlock() + + eventRules, ok := pm.rules[eventID] + if !ok { + return nil // Or return an empty slice: []*EventRule{} + } -func (m *Manager) IsRequiredBySignature(id events.ID) bool { - m.mu.RLock() - defer m.mu.RUnlock() + return eventRules.Rules +} - flags, ok := m.rules[id] +// GetUserlandRules returns the UserlandRules slice for a given event ID. +// +// Warning: This function returns a direct reference to the internal UserlandRules slice. +// While the implementation ensures that the returned slice will not be modified +// directly, it may be replaced entirely by concurrent updates to the PolicyManager. +// The caller MUST NOT modify the returned slice and should be aware that the +// slice may become stale if the PolicyManager's state is changed concurrently. +// It is the caller's responsibility to ensure that they are not relying on +// the slice to remain unchanged across calls to AddPolicy, RemovePolicy, or +// any other function that might update the PolicyManager's rules. +func (pm *PolicyManager) GetUserlandRules(eventID events.ID) []*EventRule { + pm.mu.RLock() + defer pm.mu.RUnlock() + + eventRules, ok := pm.rules[eventID] if !ok { - return false + return nil // Or return an empty slice: []*EventRule{} } - return flags.requiredBySignature + return eventRules.UserlandRules } -func (m *Manager) MatchEvent(id events.ID, matched uint64) uint64 { - m.mu.RLock() - defer m.mu.RUnlock() +// GetFilterMaps returns the computed filter maps for use by the overflow rules matcher. +func (pm *PolicyManager) GetFilterMaps() *FilterMaps { + pm.mu.RLock() + defer pm.mu.RUnlock() - flags, ok := m.rules[id] - if !ok { - return 0 + if pm.fMaps == nil { + return nil } - return flags.policiesEmit & matched -} + // Convert internal filterMaps to exported FilterMaps + exported := &FilterMaps{ + UIDFilters: make(map[FilterVersionKey]map[uint64][]RuleBitmap), + PIDFilters: make(map[FilterVersionKey]map[uint64][]RuleBitmap), + MntNsFilters: make(map[FilterVersionKey]map[uint64][]RuleBitmap), + PidNsFilters: make(map[FilterVersionKey]map[uint64][]RuleBitmap), + CgroupFilters: make(map[FilterVersionKey]map[uint64][]RuleBitmap), + UTSFilters: make(map[FilterVersionKey]map[string][]RuleBitmap), + CommFilters: make(map[FilterVersionKey]map[string][]RuleBitmap), + ContainerFilters: make(map[FilterVersionKey]map[string][]RuleBitmap), + ExtendedScopeFilterConfigs: make(map[events.ID]ExtendedScopeFiltersConfig), + } -func (m *Manager) MatchEventInAnyPolicy(id events.ID) uint64 { - m.mu.RLock() - defer m.mu.RUnlock() + // Convert UID filters + for k, v := range pm.fMaps.uidFilters { + exported.UIDFilters[FilterVersionKey(k)] = convertUint64RuleBitmaps(v) + } - flags, ok := m.rules[id] - if !ok { - return 0 + // Convert PID filters + for k, v := range pm.fMaps.pidFilters { + exported.PIDFilters[FilterVersionKey(k)] = convertUint64RuleBitmaps(v) + } + + // Convert Mount NS filters + for k, v := range pm.fMaps.mntNSFilters { + exported.MntNsFilters[FilterVersionKey(k)] = convertUint64RuleBitmaps(v) + } + + // Convert PID NS filters + for k, v := range pm.fMaps.pidNSFilters { + exported.PidNsFilters[FilterVersionKey(k)] = convertUint64RuleBitmaps(v) + } + + // Convert Cgroup filters + for k, v := range pm.fMaps.cgroupIdFilters { + exported.CgroupFilters[FilterVersionKey(k)] = convertUint64RuleBitmaps(v) + } + + // Convert UTS filters + for k, v := range pm.fMaps.utsFilters { + exported.UTSFilters[FilterVersionKey(k)] = convertStringRuleBitmaps(v) + } + + // Convert Comm filters + for k, v := range pm.fMaps.commFilters { + exported.CommFilters[FilterVersionKey(k)] = convertStringRuleBitmaps(v) + } + + // Convert Container filters + for k, v := range pm.fMaps.containerFilters { + exported.ContainerFilters[FilterVersionKey(k)] = convertStringRuleBitmaps(v) + } + + // Convert Extended Scope Filter Configs + for eventID, cfg := range pm.fMaps.extendedScopeFilterConfigs { + exported.ExtendedScopeFilterConfigs[eventID] = ExtendedScopeFiltersConfig(cfg) } - return (flags.policiesEmit | flags.policiesSubmit) & PolicyAll + return exported } -func (m *Manager) EventsSelected() []events.ID { - m.mu.RLock() - defer m.mu.RUnlock() +// Helper function to convert uint64 rule bitmaps +func convertUint64RuleBitmaps(input map[uint64][]ruleBitmap) map[uint64][]RuleBitmap { + output := make(map[uint64][]RuleBitmap) + for k, v := range input { + output[k] = convertRuleBitmapSlice(v) + } + return output +} - eventsSelected := make([]events.ID, 0, len(m.rules)) - for evt := range m.rules { - eventsSelected = append(eventsSelected, evt) +// Helper function to convert string rule bitmaps +func convertStringRuleBitmaps(input map[string][]ruleBitmap) map[string][]RuleBitmap { + output := make(map[string][]RuleBitmap) + for k, v := range input { + output[k] = convertRuleBitmapSlice(v) } + return output +} - return eventsSelected +// Helper function to convert ruleBitmap slice to RuleBitmap slice +func convertRuleBitmapSlice(input []ruleBitmap) []RuleBitmap { + output := make([]RuleBitmap, len(input)) + for i, rb := range input { + output[i] = RuleBitmap{ + EqualsInRules: rb.equalsInRules, + KeyUsedInRules: rb.keyUsedInRules, + } + } + return output } -func (m *Manager) IsEventSelected(id events.ID) bool { - m.mu.RLock() - defer m.mu.RUnlock() +// GetContainerFilteredRulesBitmap returns a bitmap where each bit represents a rule +// for the given event ID, and the bit is set if the corresponding rule has +// container filtering enabled. +func (pm *PolicyManager) GetContainerFilteredRulesBitmap(eventID events.ID) []uint64 { + pm.mu.RLock() + defer pm.mu.RUnlock() - _, ok := m.rules[id] - return ok + eventRules, ok := pm.rules[eventID] + if !ok { + return []uint64{0} // No rules for this event, return an empty bitmap + } + + return eventRules.containerFilteredRules } -func (m *Manager) EventsToSubmit() []events.ID { - m.mu.RLock() - defer m.mu.RUnlock() +// GetMatchedRulesInfo processes a bitmap array of matched rule IDs for a given event and returns +// a list of policy names corresponding to the matched rules that have the Emit flag set. +// Supports rules with ID > 64 through bitmap arrays. +func (pm *PolicyManager) GetMatchedRulesInfo(eventID events.ID, matchedRuleIDsBitmap []uint64) []string { + pm.mu.RLock() + defer pm.mu.RUnlock() + + var matchedPolicyNames []string + + eventRules, ok := pm.rules[eventID] + if !ok { + return matchedPolicyNames + } + + for ruleID := uint(0); ruleID < eventRules.rulesCount; ruleID++ { + // Check if this rule is matched using bitmap array utilities + if !utils.HasBitInArray(matchedRuleIDsBitmap, ruleID) { + continue + } + + rule, ok := eventRules.ruleIDToEventRule[ruleID] + if !ok { + // This should ideally not happen, as it indicates an inconsistency + // between the bitmap generated by BPF and the rules in EventRules. + logger.Errorw("Inconsistency detected in GetMatchedRulesInfo", + "eventID", eventID, + "ruleID", ruleID, + "possibleCause", "Bitmap includes a ruleID not present in EventRules", + ) + continue + } - eventsToSubmit := []events.ID{} - for evt, flags := range m.rules { - if flags.policiesSubmit != 0 { - eventsToSubmit = append(eventsToSubmit, evt) + if rule.SelectionType == SelectedByUser { + matchedPolicyNames = append(matchedPolicyNames, rule.Policy.Name) } } - return eventsToSubmit + return matchedPolicyNames } -func (m *Manager) IsEventToEmit(id events.ID) bool { - m.mu.RLock() - defer m.mu.RUnlock() +func (pm *PolicyManager) GetDerivedEventMatchedRules( + derivedEventID events.ID, + baseEventID events.ID, + baseMatchedRulesBitmap []uint64, +) []uint64 { + pm.mu.RLock() + defer pm.mu.RUnlock() - flags, ok := m.rules[id] + baseEventRules, ok := pm.rules[baseEventID] if !ok { - return false + return []uint64{} } - return flags.policiesEmit != 0 + var derivedMatchedRules []uint64 + + for ruleID := uint(0); ruleID < baseEventRules.rulesCount; ruleID++ { + // For rules >= 64, only process if event has overflow + if ruleID >= 64 && !baseEventRules.hasOverflow { + continue + } + + // Check if this rule is matched in the base event using bitmap array + if !utils.HasBitInArray(baseMatchedRulesBitmap, ruleID) { + continue + } + + baseRule, ok := baseEventRules.ruleIDToEventRule[ruleID] + if !ok || baseRule.SelectionType != SelectedByDependency { + continue + } + + // Check if this dependency rule is for our derived event + if baseRule.Data.EventID == derivedEventID { + // Set the bit for the derived rule using bitmap array utilities + utils.SetBitInArray(&derivedMatchedRules, baseRule.DerivedRuleID) + } + } + + return derivedMatchedRules } -func (m *Manager) IsEventToSubmit(id events.ID) bool { - m.mu.RLock() - defer m.mu.RUnlock() +// IsEventEnabled checks if an event is currently enabled. +func (pm *PolicyManager) IsEventEnabled(eventID events.ID) bool { + pm.mu.RLock() + defer pm.mu.RUnlock() - flags, ok := m.rules[id] + eventRules, ok := pm.rules[eventID] if !ok { - return false + return false // Event not found, consider it disabled } - return flags.policiesSubmit != 0 + return eventRules.enabled } -// -// Policies methods made available by Manager. -// Some are transitive (tidying), some are not. -// +// EnableEvent enables a specific event in the PolicyManager. +// It assumes that the eventID is always valid. +func (pm *PolicyManager) EnableEvent(eventID events.ID) { + pm.mu.Lock() + defer pm.mu.Unlock() + + eventRules := pm.rules[eventID] + eventRules.enabled = true + pm.rules[eventID] = eventRules +} -func (m *Manager) CreateUserlandIterator() utils.Iterator[*Policy] { - m.mu.RLock() - defer m.mu.RUnlock() +// DisableEvent disables a specific event in the PolicyManager. +// It assumes that the eventID is always valid. +func (pm *PolicyManager) DisableEvent(eventID events.ID) { + pm.mu.Lock() + defer pm.mu.Unlock() - // The returned iterator is not thread-safe since its underlying data is not a copy. - // A possible solution would be to use the snapshot mechanism with timestamps instead - // of version numbers. - return m.ps.createUserlandIterator() + eventRules := pm.rules[eventID] + eventRules.enabled = false + pm.rules[eventID] = eventRules } -func (m *Manager) CreateAllIterator() utils.Iterator[*Policy] { - m.mu.RLock() - defer m.mu.RUnlock() +// GetSelectedEvents returns a slice of all the event IDs that are currently selected +// either directly by a policy or as a dependency. +func (pm *PolicyManager) GetSelectedEvents() []events.ID { + pm.mu.RLock() + defer pm.mu.RUnlock() - // The returned iterator is not thread-safe since its underlying data is not a copy. - // A possible solution would be to use the snapshot mechanism with timestamps instead - // of version numbers. - return m.ps.createAllIterator() + selectedEvents := make([]events.ID, 0, len(pm.rules)) + for evt := range pm.rules { + selectedEvents = append(selectedEvents, evt) + } + + return selectedEvents } -func (m *Manager) FilterableInUserland() bool { - m.mu.RLock() - defer m.mu.RUnlock() +// IsEventSelected checks if an event is selected by any policy, either directly or as a dependency. +func (pm *PolicyManager) IsEventSelected(eventID events.ID) bool { + pm.mu.RLock() + defer pm.mu.RUnlock() - return m.ps.filterableInUserland + _, ok := pm.rules[eventID] + return ok } -func (m *Manager) WithContainerFilterEnabled() uint64 { - m.mu.RLock() - defer m.mu.RUnlock() +// HasOverflowRules checks if the specified event has more than 64 rules +func (pm *PolicyManager) HasOverflowRules(eventID events.ID) bool { + pm.mu.RLock() + defer pm.mu.RUnlock() + + eventRules, ok := pm.rules[eventID] + if !ok { + return false // Event not found, no overflow + } - return m.ps.withContainerFilterEnabled() + return eventRules.hasOverflow } -func (m *Manager) MatchedNames(matched uint64) []string { - m.mu.RLock() - defer m.mu.RUnlock() +// ShouldEmitEvent checks if an event has at least one rule that was explicitly +// selected by a user (not a dependency or bootstrap rule), indicating that the event +// should be emitted. +func (pm *PolicyManager) ShouldEmitEvent(eventID events.ID) bool { + pm.mu.RLock() + defer pm.mu.RUnlock() + + eventRules, ok := pm.rules[eventID] + if !ok { + return false // Event not found or no rules defined, not emitted + } + + for _, rule := range eventRules.Rules { + if rule.SelectionType == SelectedByUser { + return true // Found at least one rule explicitly selected by the user + } + } - return m.ps.matchedNames(matched) + return false // No rules were explicitly selected by the user } -func (m *Manager) LookupByName(name string) (*Policy, error) { - m.mu.RLock() - defer m.mu.RUnlock() +// GetAllMatchedRulesBitmap returns a bitmap array where all bits corresponding to +// rules for the given event ID are set, indicating that all rules are considered +// matched. Supports overflow rules (ID > 64). +func (pm *PolicyManager) GetAllMatchedRulesBitmap(eventID events.ID) []uint64 { + pm.mu.RLock() + defer pm.mu.RUnlock() + + eventRules, ok := pm.rules[eventID] + if !ok { + return []uint64{} // No rules for this event, return an empty bitmap array + } + + var allRulesBitmap []uint64 + for ruleID := uint(0); ruleID < eventRules.rulesCount; ruleID++ { + utils.SetBitInArray(&allRulesBitmap, ruleID) + } - return m.ps.lookupByName(name) + return allRulesBitmap } -func (m *Manager) UpdateBPF( +func (pm *PolicyManager) UpdateBPF( bpfModule *bpf.Module, cts *containers.Manager, eventsFields map[events.ID][]data.DecodeAs, - createNewMaps bool, - updateProcTree bool, -) (*PoliciesConfig, error) { - m.mu.Lock() - defer m.mu.Unlock() +) error { + pm.mu.Lock() + defer pm.mu.Unlock() - return m.ps.updateBPF(bpfModule, cts, m.rules, eventsFields, createNewMaps, updateProcTree) + return pm.updateBPF(bpfModule, cts, eventsFields) } diff --git a/pkg/policy/policy_manager_test.go b/pkg/policy/policy_manager_test.go index 7c514b084a86..e1f6fbe20ab4 100644 --- a/pkg/policy/policy_manager_test.go +++ b/pkg/policy/policy_manager_test.go @@ -1,146 +1,17 @@ package policy import ( + "fmt" "sync" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/aquasecurity/tracee/pkg/events" "github.com/aquasecurity/tracee/pkg/events/dependencies" ) -func TestPolicyManagerEnableRule(t *testing.T) { - t.Parallel() - - depsManager := dependencies.NewDependenciesManager( - func(id events.ID) events.Dependencies { - return events.Core.GetDefinitionByID(id).GetDependencies() - }) - - policyManager, err := NewManager(ManagerConfig{}, depsManager) - assert.NoError(t, err) - - policy1Mached := uint64(0b10) - policy2Mached := uint64(0b100) - policy1And2Mached := uint64(0b110) - - assert.False(t, policyManager.IsRuleEnabled(policy1Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsRuleEnabled(policy2Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsRuleEnabled(policy1And2Mached, events.SecurityBPF)) - - err = policyManager.EnableRule(1, events.SecurityBPF) - assert.NoError(t, err) - - assert.True(t, policyManager.IsRuleEnabled(policy1Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsRuleEnabled(policy2Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsRuleEnabled(policy1And2Mached, events.SecurityBPF)) - - err = policyManager.EnableRule(2, events.SecurityBPF) - assert.NoError(t, err) - - assert.True(t, policyManager.IsRuleEnabled(policy1Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsRuleEnabled(policy2Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsRuleEnabled(policy1And2Mached, events.SecurityBPF)) - - err = policyManager.EnableRule(-1, events.SecurityBPF) - assert.Error(t, err) -} - -func TestPolicyManagerDisableRule(t *testing.T) { - t.Parallel() - - depsManager := dependencies.NewDependenciesManager( - func(id events.ID) events.Dependencies { - return events.Core.GetDefinitionByID(id).GetDependencies() - }) - - policyManager, err := NewManager(ManagerConfig{}, depsManager) - assert.NoError(t, err) - - policy1Mached := uint64(0b10) - policy2Mached := uint64(0b100) - policy1And2Mached := uint64(0b110) - - err = policyManager.EnableRule(1, events.SecurityBPF) - assert.NoError(t, err) - - assert.True(t, policyManager.IsRuleEnabled(policy1Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsRuleEnabled(policy2Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsRuleEnabled(policy1And2Mached, events.SecurityBPF)) - - err = policyManager.DisableRule(1, events.SecurityBPF) - assert.NoError(t, err) - - assert.False(t, policyManager.IsRuleEnabled(policy1Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsRuleEnabled(policy2Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsRuleEnabled(policy1And2Mached, events.SecurityBPF)) - - err = policyManager.DisableRule(-1, events.SecurityBPF) - assert.Error(t, err) -} - -func TestPolicyManagerEnableAndDisableRuleConcurrent(t *testing.T) { - t.Parallel() - - eventsToEnable := []events.ID{ - events.SecurityBPF, - events.SchedGetPriorityMax, - events.SchedProcessExec, - events.SchedProcessExit, - events.Ptrace, - } - - eventsToDisable := []events.ID{ - events.SecurityBPFMap, - events.Openat2, - events.SchedProcessFork, - events.MagicWrite, - events.FileModification, - } - - depsManager := dependencies.NewDependenciesManager( - func(id events.ID) events.Dependencies { - return events.Core.GetDefinitionByID(id).GetDependencies() - }) - - policyManager, err := NewManager(ManagerConfig{}, depsManager) - assert.NoError(t, err) - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - for i := 0; i < PolicyMax; i++ { - for _, e := range eventsToEnable { - policyManager.EnableRule(i, e) - } - } - wg.Done() - }() - - wg.Add(1) - go func() { - for i := 0; i < PolicyMax; i++ { - for _, e := range eventsToDisable { - policyManager.DisableRule(i, e) - } - } - wg.Done() - }() - - wg.Wait() - - for i := 0; i < PolicyMax; i++ { - for _, e := range eventsToEnable { - assert.True(t, policyManager.IsRuleEnabled(PolicyAll, e)) - } - for _, e := range eventsToDisable { - assert.False(t, policyManager.IsRuleEnabled(PolicyAll, e)) - } - } -} - func TestPolicyManagerEnableEvent(t *testing.T) { t.Parallel() @@ -152,17 +23,17 @@ func TestPolicyManagerEnableEvent(t *testing.T) { policyManager, err := NewManager(ManagerConfig{}, depsManager) assert.NoError(t, err) - assert.False(t, policyManager.isEventEnabled(events.SecurityBPF)) - assert.False(t, policyManager.isEventEnabled(events.SecurityFileOpen)) - assert.False(t, policyManager.isEventEnabled(events.SecuritySocketAccept)) + assert.False(t, policyManager.IsEventEnabled(events.SecurityBPF)) + assert.False(t, policyManager.IsEventEnabled(events.SecurityFileOpen)) + assert.False(t, policyManager.IsEventEnabled(events.SecuritySocketAccept)) policyManager.EnableEvent(events.SecurityBPF) policyManager.EnableEvent(events.SecurityFileOpen) policyManager.EnableEvent(events.SecuritySocketAccept) - assert.True(t, policyManager.isEventEnabled(events.SecurityBPF)) - assert.True(t, policyManager.isEventEnabled(events.SecurityFileOpen)) - assert.True(t, policyManager.isEventEnabled(events.SecuritySocketAccept)) + assert.True(t, policyManager.IsEventEnabled(events.SecurityBPF)) + assert.True(t, policyManager.IsEventEnabled(events.SecurityFileOpen)) + assert.True(t, policyManager.IsEventEnabled(events.SecuritySocketAccept)) } func TestPolicyManagerDisableEvent(t *testing.T) { @@ -228,27 +99,27 @@ func TestPolicyManagerEnableAndDisableEventConcurrent(t *testing.T) { wg.Add(1) go func() { - for i := 0; i < PolicyMax; i++ { + defer wg.Done() + for i := 0; i < 100; i++ { for _, e := range eventsToEnable { policyManager.EnableEvent(e) } } - wg.Done() }() wg.Add(1) go func() { - for i := 0; i < PolicyMax; i++ { + defer wg.Done() + for i := 0; i < 100; i++ { for _, e := range eventsToDisable { policyManager.DisableEvent(e) } } - wg.Done() }() wg.Wait() - for i := 0; i < PolicyMax; i++ { + for i := 0; i < 100; i++ { for _, e := range eventsToEnable { assert.True(t, policyManager.IsEventEnabled(e)) } @@ -258,25 +129,7 @@ func TestPolicyManagerEnableAndDisableEventConcurrent(t *testing.T) { } } -func TestEnableRuleAlsoEnableEvent(t *testing.T) { - t.Parallel() - - depsManager := dependencies.NewDependenciesManager( - func(id events.ID) events.Dependencies { - return events.Core.GetDefinitionByID(id).GetDependencies() - }) - - policyManager, err := NewManager(ManagerConfig{}, depsManager) - assert.NoError(t, err) - - assert.False(t, policyManager.IsEventEnabled(events.SecurityBPF)) - - policyManager.EnableRule(1, events.SecurityBPF) - - assert.True(t, policyManager.IsEventEnabled(events.SecurityBPF)) -} - -func TestDisableRuleAlsoEnableEvent(t *testing.T) { +func TestPolicyManagerIndependentPolicies(t *testing.T) { t.Parallel() depsManager := dependencies.NewDependenciesManager( @@ -284,56 +137,46 @@ func TestDisableRuleAlsoEnableEvent(t *testing.T) { return events.Core.GetDefinitionByID(id).GetDependencies() }) - policyManager, err := NewManager(ManagerConfig{}, depsManager) - assert.NoError(t, err) - - assert.False(t, policyManager.IsEventEnabled(events.SecurityFileOpen)) - - policyManager.DisableRule(1, events.SecurityFileOpen) - - assert.True(t, policyManager.IsEventEnabled(events.SecurityFileOpen)) -} - -func TestPolicyManagerIsEnabled(t *testing.T) { - t.Parallel() - - depsManager := dependencies.NewDependenciesManager( - func(id events.ID) events.Dependencies { - return events.Core.GetDefinitionByID(id).GetDependencies() - }) - - policyManager, err := NewManager(ManagerConfig{}, depsManager) - assert.NoError(t, err) - - policy1Mached := uint64(0b10) - policy2Mached := uint64(0b100) - policy1And2Mached := uint64(0b110) - - assert.False(t, policyManager.IsEnabled(policy1Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsEnabled(policy2Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsEnabled(policy1And2Mached, events.SecurityBPF)) - - policyManager.EnableRule(1, events.SecurityBPF) - - assert.True(t, policyManager.IsEnabled(policy1Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsEnabled(policy2Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsEnabled(policy1And2Mached, events.SecurityBPF)) - - policyManager.EnableRule(2, events.SecurityBPF) - - assert.True(t, policyManager.IsEnabled(policy1Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsEnabled(policy2Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsEnabled(policy1And2Mached, events.SecurityBPF)) - - policyManager.DisableEvent(events.SecurityBPF) - - assert.False(t, policyManager.IsEnabled(policy1Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsEnabled(policy2Mached, events.SecurityBPF)) - assert.False(t, policyManager.IsEnabled(policy1And2Mached, events.SecurityBPF)) - - policyManager.EnableEvent(events.SecurityBPF) - - assert.True(t, policyManager.IsEnabled(policy1Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsEnabled(policy2Mached, events.SecurityBPF)) - assert.True(t, policyManager.IsEnabled(policy1And2Mached, events.SecurityBPF)) + pm, err := NewManager(ManagerConfig{}, depsManager) + require.NoError(t, err) + + // Create two policies with different filters and rules + p1 := NewPolicy() + p1.Name = "policy1" + err = p1.UIDFilter.Parse(">=1000") + require.NoError(t, err) + p1.Rules[events.SchedProcessFork] = RuleData{EventID: events.SchedProcessFork} + + p2 := NewPolicy() + p2.Name = "policy2" + err = p2.PIDFilter.Parse("=1") + require.NoError(t, err) + p2.Rules[events.SchedProcessExec] = RuleData{EventID: events.SchedProcessExec} + + // Test policy addition + err = pm.AddPolicy(p1) + require.NoError(t, err) + err = pm.AddPolicy(p2) + require.NoError(t, err) + + // Verify policies are independent + p2Fetched, err := pm.LookupPolicyByName("policy2") + require.NoError(t, err) + require.True(t, p2Fetched.PIDFilter.Enabled()) + require.False(t, p2Fetched.UIDFilter.Enabled()) + require.NotContains(t, p2Fetched.Rules, events.SchedProcessFork) + + // Test policy removal + err = pm.RemovePolicy("policy1") + require.NoError(t, err) + + // Verify policy1 removed and policy2 unaffected + _, err = pm.LookupPolicyByName("policy1") + expectedErr := &policyError{msg: fmt.Sprintf("policy [%s] not found", "policy1")} + require.ErrorIs(t, err, expectedErr) + + p2Fetched, err = pm.LookupPolicyByName("policy2") + require.NoError(t, err) + require.True(t, p2Fetched.PIDFilter.Enabled()) + require.Contains(t, p2Fetched.Rules, events.SchedProcessExec) } diff --git a/pkg/policy/snapshots.go b/pkg/policy/snapshots.go deleted file mode 100644 index f062920da0bf..000000000000 --- a/pkg/policy/snapshots.go +++ /dev/null @@ -1,194 +0,0 @@ -package policy - -import ( - "sync" - "time" - - "github.com/aquasecurity/tracee/pkg/errfmt" - "github.com/aquasecurity/tracee/pkg/logger" -) - -const ( - maxSnapshots = PolicyMax -) - -// snapshot is a snapshot of the Policies at a given version. -type snapshot struct { - time time.Time - version uint16 - ps *policies -} - -// snapshots is a circular buffer of snapshots. -type snapshots struct { - murw sync.RWMutex - lastVersion uint16 - snaps [maxSnapshots]*snapshot - nextIdx int - lastIdx int - storedCnt int - prune func(*policies) []error -} - -var ( - snaps *snapshots // singleton - snapsOnce sync.Once -) - -// newSnapshots creates a new snapshot. -func newSnapshots() *snapshots { - return &snapshots{ - murw: sync.RWMutex{}, - lastVersion: 0, - snaps: [maxSnapshots]*snapshot{}, - nextIdx: 0, - lastIdx: -1, // no snapshots stored - storedCnt: 0, - prune: nil, - } -} - -func Snapshots() *snapshots { - snapsOnce.Do(func() { - snaps = newSnapshots() - }) - - return snaps -} - -// TODO: This is a temporary solution to allow testing. We must make the constructor -// public and pass the prune function as a parameter. -// SetPruneFunc sets the prune function to be called by PruneSnapshotsOlderThan -// and Store (when overwriting a snapshot). -func (s *snapshots) SetPruneFunc(prune func(*policies) []error) { - s.murw.Lock() - defer s.murw.Unlock() - - s.prune = prune -} - -// Store stores a snapshot of the Policies. -func (s *snapshots) Store(ps *policies) { - s.murw.Lock() - defer s.murw.Unlock() - - s.lastVersion++ // new version - if s.lastVersion == 0 { - logger.Warnw("Policies version has wrapped around, resetting to 1") - s.lastVersion++ - } - - // TODO: set value to reference this snapshot as unique (perhaps a timestamp) - - snap := &snapshot{ - time: time.Now(), - version: s.lastVersion, - ps: ps, - } - - nextSlot := s.snaps[s.nextIdx] - if nextSlot != nil { - if s.prune == nil { - logger.Warnw("prune function not set, snapshot will not be pruned, only overwritten", "version", nextSlot.version) - } else { - errs := s.prune(nextSlot.ps) - for _, err := range errs { - logger.Errorw("failed to prune snapshot", "version", nextSlot.version, "error", err) - } - } - } - s.snaps[s.nextIdx] = snap - s.lastIdx = s.nextIdx - s.nextIdx = (s.nextIdx + 1) % maxSnapshots - - if s.storedCnt < maxSnapshots { - s.storedCnt++ - } -} - -// Get returns a snapshot of the Policies at a given version. -func (s *snapshots) Get(polsVersion uint16) (*policies, error) { - s.murw.RLock() - defer s.murw.RUnlock() - - if s.storedCnt == 0 { - return nil, errfmt.Errorf("no snapshots stored") - } - - // start from the most recent snapshot - startIdx := s.lastIdx - for i := 0; i < s.storedCnt; i++ { - idx := (startIdx - i + maxSnapshots) % maxSnapshots - snap := s.snaps[idx] - if snap.version == polsVersion { - return snap.ps, nil - } - } - - return nil, errfmt.Errorf("no snapshot found for version %d", polsVersion) -} - -// GetLast returns the most recent snapshot of the Policies. -func (s *snapshots) GetLast() (*policies, error) { - s.murw.RLock() - defer s.murw.RUnlock() - - if s.storedCnt == 0 { - return nil, errfmt.Errorf("no snapshots stored") - } - - return s.snaps[s.lastIdx].ps, nil -} - -// TODO: call this function periodically -// PruneSnapshotsOlderThan prunes snapshots older than a given duration. -func (s *snapshots) PruneSnapshotsOlderThan(d time.Duration) []error { - s.murw.Lock() - defer s.murw.Unlock() - - if s.storedCnt <= 1 { - return nil - } - - if s.prune == nil { - logger.Errorw("prune function not set, snapshots cannot be pruned") - return nil - } - - errs := []error{} - boundaryIdx := s.lastIdx - if boundaryIdx == 0 { - boundaryIdx = maxSnapshots - } - - // start from the oldest snapshot and iterate through all slots - startIdx := s.nextIdx - for i := 0; i < maxSnapshots; i++ { - idx := (startIdx + i) % maxSnapshots - - // Stop iterating when we reach the boundary. This is to avoid - // pruning the last snapshot (which is always in use). - if idx == boundaryIdx { - break - } - - snap := s.snaps[idx] - if snap == nil { // empty slot - continue - } - - // As the circular buffer is chronologically ordered, we can stop - // iterating as soon as we find a snapshot that is not older than d. - if time.Since(snap.time) <= d { - break - } - - errs = append(errs, s.prune(snap.ps)...) - - // remove snapshot even if pruning failed - s.snaps[idx] = nil - s.storedCnt-- - } - - return errs -} diff --git a/pkg/policy/snapshots_test.go b/pkg/policy/snapshots_test.go deleted file mode 100644 index 7ae608353c24..000000000000 --- a/pkg/policy/snapshots_test.go +++ /dev/null @@ -1,266 +0,0 @@ -//go:build exclude - -package policy - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/aquasecurity/tracee/pkg/logger" -) - -// resetSnapshots resets the snapshots global variable. -func resetSnapshots() { - _ = Snapshots() // ensure the singleton is initialized - snaps = newSnapshots() -} - -func setPruneFunc() { - Snapshots().SetPruneFunc(func(ps *policies) []error { - errs := []error{} - for _, bpfMap := range ps.bpfInnerMaps { - err := syscall.Close(bpfMap.FileDescriptor()) - if err != nil { - errs = append(errs, err) - } - } - - return errs - }) -} - -func TestStoreSnapshot(t *testing.T) { - resetSnapshots() - - ps := &policies{} - Snapshots().Store(ps) - assert.Equal(t, uint16(1), uint16(ps.version)) - - // after storing the snapshot, there should be one snapshot available - lastSnapshot, err := Snapshots().GetLast() - assert.NoError(t, err) - assert.Equal(t, ps, lastSnapshot) -} - -func TestGetSnapshot(t *testing.T) { - resetSnapshots() - - ps := &policies{} - Snapshots().Store(ps) - - // get the snapshot for the version just stored - snapshot, err := Snapshots().Get(1) // since our lastVersion starts at 0 and increments on StoreSnapshot - assert.NoError(t, err) - assert.Equal(t, ps, snapshot) - - // try getting a snapshot for a version that does not exist - _, err = Snapshots().Get(1000) - assert.Error(t, err) -} - -func TestGetLastSnapshot(t *testing.T) { - resetSnapshots() - - ps1 := &policies{} - Snapshots().Store(ps1) - - ps2 := &policies{} - Snapshots().Store(ps2) - - // after storing two snapshots, the last one should be ps2 - lastSnapshot, err := Snapshots().GetLast() - assert.NoError(t, err) - assert.Equal(t, ps2, lastSnapshot) -} - -func TestCircularBufferOverwrite(t *testing.T) { - resetSnapshots() - setPruneFunc() - - // create and store maxSnapshots - for i := 0; i < maxSnapshots; i++ { - ps := &policies{} - Snapshots().Store(ps) - assert.Equal(t, uint16(i+1), uint16(ps.version)) - } - - // the last stored snapshot is for version maxSnapshots - lastSnapshotBeforeOverwrite, err := Snapshots().GetLast() - assert.NoError(t, err) - - // store one more snapshot to overwrite the first snapshot in the buffer - psOverwrite := &policies{} - Snapshots().Store(psOverwrite) - assert.Equal(t, uint16(maxSnapshots+1), psOverwrite.version) - - // check if the oldest snapshot (version 1) has been overwritten - _, err = Snapshots().Get(1) - assert.Error(t, err, "expected error when retrieving overwritten snapshot") - - // check the last snapshot is the one just stored - lastSnapshotAfterOverwrite, err := Snapshots().GetLast() - assert.NoError(t, err) - assert.Equal(t, psOverwrite, lastSnapshotAfterOverwrite) - - // ensure the previous last snapshot is still retrievable - previousVersion := uint16(maxSnapshots) - snapshot, err := Snapshots().Get(previousVersion) - assert.NoError(t, err) - assert.Equal(t, lastSnapshotBeforeOverwrite, snapshot, "the last snapshot before overwrite should still be retrievable") -} - -func TestConcurrentSnapshots(t *testing.T) { - resetSnapshots() - setPruneFunc() - - const ( - numStoreRoutines = 300 - numRetrieveRoutines = 1000 - ) - var ( - readyForRetrieve int32 - mainWG sync.WaitGroup - innerWG sync.WaitGroup // for inner goroutines - firstStoreDone sync.WaitGroup - ) - - // use a channel to coordinate the start of storing goroutines - startStoreCh := make(chan struct{}) - - mainWG.Add(2) // two main goroutines: one for storing and one for retrieving - - firstStoreDone.Add(1) // only one is required to signal when storing is done - // spawn a goroutine to initiate n goroutines for storing snapshots - go func() { - defer mainWG.Done() - - for i := 0; i < numStoreRoutines; i++ { - innerWG.Add(1) - - go func() { - defer innerWG.Done() - - // wait for the start signal - <-startStoreCh - - // store a snapshot - ps := &policies{} - Snapshots().Store(ps) - - if atomic.CompareAndSwapInt32(&readyForRetrieve, 0, 1) { - firstStoreDone.Done() // signal the retrieving goroutines to start - } - }() - } - }() - - // spawn a goroutine to initiate n goroutines for retrieving the last snapshot - go func() { - defer mainWG.Done() - - for i := 0; i < numRetrieveRoutines; i++ { - innerWG.Add(1) - - go func() { - defer innerWG.Done() - - // ensure a snapshot has been stored before trying to retrieve - firstStoreDone.Wait() - - // retrieve the last snapshot - _, err := Snapshots().GetLast() - assert.NoError(t, err) - }() - } - }() - - // signal storing goroutines to start their tasks - close(startStoreCh) - - // wait for the two main goroutines to complete - mainWG.Wait() - - // wait for all inner goroutines to complete - innerWG.Wait() - - // get the last stored snapshot - lastSnapshot, err := Snapshots().GetLast() - assert.NoError(t, err) - assert.Equal(t, uint16(numStoreRoutines), uint16(lastSnapshot.version)) - - // get the numStoreRoutines-10th snapshot - snapshot, err := Snapshots().Get(uint16(numStoreRoutines - 10)) - assert.NoError(t, err) - assert.Equal(t, uint16(numStoreRoutines-10), uint16(snapshot.version)) - - // post-check to ensure storedCnt is as expected, given the concurrency - assert.True(t, snaps.storedCnt <= maxSnapshots, "Stored count should never exceed maxSnapshots") -} - -func TestWrapAround(t *testing.T) { - resetSnapshots() - - // set the lastVersion to its maximum value minus 1 - snaps.lastVersion = math.MaxUint16 - 1 - - // store a snapshot, this should increase version to math.MaxUint16 - ps1 := &policies{} - Snapshots().Store(ps1) - - // verify that the version is set to math.MaxUint16 - assert.Equal(t, uint16(math.MaxUint16), uint16(ps1.version)) - - // store another snapshot, this should trigger the wrap-around and reset the version to 1 - ps2 := &policies{} - Snapshots().Store(ps2) - - // verify that the wrap-around occurred - assert.Equal(t, uint16(1), uint16(ps2.version)) -} - -func TestPruneSnapshotsOlderThan(t *testing.T) { - resetSnapshots() - setPruneFunc() - - const ( - timeToPrune = 250 * time.Millisecond // 0.25 seconds - numSnapshots = maxSnapshots // number of snapshots to store in one go - ) - - // Helper function to store numSnapshots snapshots immediately - storeSnapshots := func() { - for i := 0; i < numSnapshots; i++ { - ps := &policies{} - Snapshots().Store(ps) - } - } - - // store and prune three times - for iteration := 1; iteration <= 3; iteration++ { - storeSnapshots() - - // Sleep for the desired prune time to ensure all snapshots are older than this duration - time.Sleep(timeToPrune) - - errs := Snapshots().PruneSnapshotsOlderThan(timeToPrune) - assert.Empty(t, errs) - - logger.Infow( - fmt.Sprintf("iteration %d", iteration), - "stored", numSnapshots, - "pruned", numSnapshots-1, - "remaining", snaps.storedCnt, - ) - - // Despite pruning all older snapshots, we expect to have one snapshot - // as the last snapshot is always retained. - assert.Equal(t, 1, int(snaps.storedCnt)) - } -} diff --git a/pkg/server/grpc/tracee.go b/pkg/server/grpc/tracee.go index c641c2e4e4a1..f21557bf6316 100644 --- a/pkg/server/grpc/tracee.go +++ b/pkg/server/grpc/tracee.go @@ -601,15 +601,11 @@ type TraceeService struct { func (s *TraceeService) StreamEvents(in *pb.StreamEventsRequest, grpcStream pb.TraceeService_StreamEventsServer) error { var stream *streams.Stream - var err error if len(in.Policies) == 0 { stream = s.tracee.SubscribeAll() } else { - stream, err = s.tracee.Subscribe(in.Policies) - if err != nil { - return err - } + stream = s.tracee.Subscribe(in.Policies) } defer s.tracee.Unsubscribe(stream) diff --git a/pkg/streams/streams.go b/pkg/streams/streams.go index fd6ae53c08f0..2462db403884 100644 --- a/pkg/streams/streams.go +++ b/pkg/streams/streams.go @@ -9,8 +9,7 @@ import ( // Stream is a stream of events type Stream struct { - // policy mask is a bitmap of policies that this stream is interested in - policyMask uint64 + policies map[string]bool // policies that this stream is interested in, empty means all // events is a channel that is used to receive events from the stream events chan trace.Event } @@ -24,7 +23,7 @@ func (s *Stream) ReceiveEvents() <-chan trace.Event { // but first check if this stream is interested in this event, // by checking the event's policy mask against the stream's policy mask. func (s *Stream) publish(ctx context.Context, event trace.Event) { - if s.shouldIgnorePolicy(event) { + if !s.shouldPublish(event) { return } @@ -42,9 +41,20 @@ func (s *Stream) publish(ctx context.Context, event trace.Event) { } } -// shouldIgnorePolicy checks if the stream should ignore the event -func (s *Stream) shouldIgnorePolicy(event trace.Event) bool { - return s.policyMask&event.MatchedPoliciesUser == 0 +// shouldPublish checks if event matches subscribed policies +func (s *Stream) shouldPublish(event trace.Event) bool { + // No policies means subscribe to all + if len(s.policies) == 0 { + return true + } + + // Check if any of the event's matched policies are in our subscription + for _, matchedPolicy := range event.MatchedPolicies { + if s.policies[matchedPolicy] { + return true + } + } + return false } // close closes the stream @@ -67,13 +77,17 @@ func NewStreamsManager() *StreamsManager { } // Subscribe adds a stream to the manager -func (sm *StreamsManager) Subscribe(policyMask uint64, chanSize int) *Stream { +func (sm *StreamsManager) Subscribe(policyNames []string, chanSize int) *Stream { sm.mutex.Lock() defer sm.mutex.Unlock() stream := &Stream{ - policyMask: policyMask, - events: make(chan trace.Event, chanSize), + policies: make(map[string]bool), + events: make(chan trace.Event, chanSize), + } + + for _, policyName := range policyNames { + stream.policies[policyName] = true } sm.subscribers[stream] = struct{}{} diff --git a/pkg/streams/streams_test.go b/pkg/streams/streams_test.go index f507155982f4..7afd9b406b70 100644 --- a/pkg/streams/streams_test.go +++ b/pkg/streams/streams_test.go @@ -2,10 +2,9 @@ package streams import ( "context" - "sync" "testing" - "gotest.tools/assert" + "github.com/stretchr/testify/assert" "github.com/aquasecurity/tracee/types/trace" ) @@ -17,150 +16,138 @@ const ( ) var ( - policy1Event = trace.Event{MatchedPoliciesUser: 0b1} - policy2Event = trace.Event{MatchedPoliciesUser: 0b10} - policy3Event = trace.Event{MatchedPoliciesUser: 0b100} - policy1And2Event = trace.Event{MatchedPoliciesUser: 0b11} + policy1Event = trace.Event{MatchedRulesUser: []uint64{0b1}} + policy2Event = trace.Event{MatchedRulesUser: []uint64{0b10}} + policy3Event = trace.Event{MatchedRulesUser: []uint64{0b100}} + policy1And2Event = trace.Event{MatchedRulesUser: []uint64{0b11}} ) -func TestStreamManager(t *testing.T) { - t.Parallel() +func TestStreamManager_PublishAndReceive(t *testing.T) { + sm := NewStreamsManager() + ctx := context.Background() - var ( - stream1Count int - stream2Count int - stream3Count int - ) + event := trace.Event{ + MatchedPolicies: []string{"policy1"}, + } - ctx := context.Background() + // Subscribe with matching policy + stream1 := sm.Subscribe([]string{"policy1"}, 1) - sm := NewStreamsManager() + // Subscribe with non-matching policy + stream2 := sm.Subscribe([]string{"policy2"}, 1) - // stream for policy1 - stream1 := sm.Subscribe(policy1Mask, 0) - - // stream for policy1 and policy2 - stream2 := sm.Subscribe(policy1And2Mask, 0) - - // stream for all policies - stream3 := sm.Subscribe(allPoliciesMask, 0) - - // consumers - consumersWG := &sync.WaitGroup{} - consumersWG.Add(3) - - go func() { - for range stream1.ReceiveEvents() { - stream1Count++ - } - consumersWG.Done() - }() - - go func() { - for range stream2.ReceiveEvents() { - stream2Count++ - } - consumersWG.Done() - }() - - go func() { - for range stream3.ReceiveEvents() { - stream3Count++ - } - consumersWG.Done() - }() - - // publishers - publishersWG := &sync.WaitGroup{} - publishersWG.Add(3) - - go func() { - for i := 0; i < 100; i++ { - sm.Publish(ctx, policy1Event) - } - publishersWG.Done() - }() - - go func() { - for i := 0; i < 100; i++ { - sm.Publish(ctx, policy2Event) - } - publishersWG.Done() - }() - - go func() { - for i := 0; i < 100; i++ { - sm.Publish(ctx, policy3Event) - } - publishersWG.Done() - }() - - publishersWG.Wait() - sm.Close() - consumersWG.Wait() - - assert.Equal(t, 100, stream1Count) - assert.Equal(t, 200, stream2Count) - assert.Equal(t, 300, stream3Count) -} + // Subscribe to all policies + stream3 := sm.Subscribe([]string{}, 1) -func Test_shouldIgnorePolicy(t *testing.T) { - t.Parallel() + // Publish event + sm.Publish(ctx, event) + + // Check stream1 received event (matching policy) + select { + case receivedEvent := <-stream1.ReceiveEvents(): + assert.Equal(t, event, receivedEvent) + default: + t.Error("Expected stream1 to receive event") + } + + // Check stream2 did not receive event (non-matching policy) + select { + case <-stream2.ReceiveEvents(): + t.Error("Stream2 should not receive event") + default: + // Expected - no event received + } + + // Check stream3 received event (all policies) + select { + case receivedEvent := <-stream3.ReceiveEvents(): + assert.Equal(t, event, receivedEvent) + default: + t.Error("Expected stream3 to receive event") + } +} +func TestStreamManager_MultiplePolices(t *testing.T) { sm := NewStreamsManager() + ctx := context.Background() + + type streamTest struct { + policies []string + expect bool + } tests := []struct { - name string - policyMask uint64 - event trace.Event - expected bool + name string + streams []streamTest + event trace.Event }{ { - name: "event matched policy 1, policy mask 1", - policyMask: 0b1, - event: policy1Event, - expected: false, - }, - { - name: "event matched policy 1, policy mask 2", - policyMask: 0b10, - event: policy1Event, - expected: true, + name: "multiple streams with different policies", + streams: []streamTest{ + {policies: []string{"policy1", "policy2"}, expect: true}, + {policies: []string{"policy3"}, expect: false}, + {policies: []string{}, expect: true}, // all policies + }, + event: trace.Event{ + MatchedPolicies: []string{"policy1"}, + }, }, { - name: "event matched policy 1, catch all policy mask", - policyMask: 0xffffffffffffffff, - event: policy1Event, - expected: false, + name: "overlapping policies between streams", + streams: []streamTest{ + {policies: []string{"policy1"}, expect: true}, + {policies: []string{"policy1", "policy2"}, expect: true}, + {policies: []string{"policy2", "policy3"}, expect: false}, + }, + event: trace.Event{ + MatchedPolicies: []string{"policy1"}, + }, }, { - name: "event matched policy 1 and policy 2, policy mask 1", - policyMask: 0b1, - event: policy1And2Event, - expected: false, - }, - { - name: "event matched policy 1 and policy 2, policy mask 2", - policyMask: 0b10, - event: policy1And2Event, - expected: false, - }, - { - name: "event matched policy 1 and policy 2, catch all policy mask", - policyMask: 0xffffffffffffffff, - event: policy1And2Event, - expected: false, + name: "event matching multiple policies", + streams: []streamTest{ + {policies: []string{"policy1"}, expect: true}, + {policies: []string{"policy2"}, expect: true}, + {policies: []string{"policy3"}, expect: false}, + }, + event: trace.Event{ + MatchedPolicies: []string{"policy1", "policy2"}, + }, }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - stream := sm.Subscribe(tt.policyMask, 0) - assert.Equal(t, tt.expected, stream.shouldIgnorePolicy(tt.event)) + var streams []*Stream + + // Create streams with different policies + for _, s := range tt.streams { + stream := sm.Subscribe(s.policies, 1) + streams = append(streams, stream) + } + + // Publish event + sm.Publish(ctx, tt.event) + + // Check each stream + for i, s := range tt.streams { + select { + case evt := <-streams[i].ReceiveEvents(): + if !s.expect { + t.Errorf("Stream %d received unexpected event: %v", i, evt) + } + assert.Equal(t, tt.event, evt) + default: + if s.expect { + t.Errorf("Stream %d did not receive expected event", i) + } + } + } + + // Cleanup + for _, stream := range streams { + sm.Unsubscribe(stream) + } }) } } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 197af631bd2b..12a6b2ecba3e 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -35,6 +35,69 @@ func SetBit(n *uint64, offset uint) { *n |= (1 << offset) } +// Bitmap array functions for handling rules with ID > 64 + +// HasBitInArray checks if a bit is set in a bitmap array at the given ruleID +func HasBitInArray(bitmaps []uint64, ruleID uint) bool { + bitmapIndex := ruleID / 64 + bitOffset := ruleID % 64 + + if int(bitmapIndex) >= len(bitmaps) { + return false + } + + return HasBit(bitmaps[bitmapIndex], bitOffset) +} + +// SetBitInArray sets a bit in a bitmap array at the given ruleID +func SetBitInArray(bitmaps *[]uint64, ruleID uint) { + bitmapIndex := ruleID / 64 + bitOffset := ruleID % 64 + + // Ensure the bitmap array is large enough + for len(*bitmaps) <= int(bitmapIndex) { + *bitmaps = append(*bitmaps, 0) + } + + SetBit(&(*bitmaps)[bitmapIndex], bitOffset) +} + +// ClearBitInArray clears a bit in a bitmap array at the given ruleID +func ClearBitInArray(bitmaps *[]uint64, ruleID uint) { + bitmapIndex := ruleID / 64 + bitOffset := ruleID % 64 + + if int(bitmapIndex) >= len(*bitmaps) { + return // Bit is already "cleared" (doesn't exist) + } + + ClearBit(&(*bitmaps)[bitmapIndex], bitOffset) +} + +// OrBitmapArrays performs a bitwise OR operation between two bitmap arrays +// and stores the result in the first array +func OrBitmapArrays(dest *[]uint64, src []uint64) { + // Ensure dest is large enough to accommodate src + for len(*dest) < len(src) { + *dest = append(*dest, 0) + } + + // Perform OR operation for each bitmap + for i := 0; i < len(src); i++ { + (*dest)[i] |= src[i] + } +} + +// IsBitmapArrayEmpty checks if all bits in the bitmap array are zero +func IsBitmapArrayEmpty(bitmaps []uint64) bool { + for _, bitmap := range bitmaps { + if bitmap != 0 { + return false + } + } + return true +} + func Min(x, y uint64) uint64 { if x < y { return x diff --git a/tests/integration/dependencies_test.go b/tests/integration/dependencies_test.go index 30dcb3a55bf5..6b9ff200df4f 100644 --- a/tests/integration/dependencies_test.go +++ b/tests/integration/dependencies_test.go @@ -223,13 +223,13 @@ func Test_EventsDependencies(t *testing.T) { func createGenericEventForCmdEvents(eventId events.ID) trace.Event { return trace.Event{ - HostName: anyHost, - ProcessName: anyComm, - ProcessorID: anyProcessorID, - ProcessID: anyPID, - UserID: anyUID, - EventID: int(eventId), - MatchedPoliciesUser: anyPolicy, + HostName: anyHost, + ProcessName: anyComm, + ProcessorID: anyProcessorID, + ProcessID: anyPID, + UserID: anyUID, + EventID: int(eventId), + MatchedRulesUser: anyPolicy, } } diff --git a/tests/integration/event_filters_test.go b/tests/integration/event_filters_test.go index acdf1e168057..890e42dbb147 100644 --- a/tests/integration/event_filters_test.go +++ b/tests/integration/event_filters_test.go @@ -6,6 +6,7 @@ import ( "fmt" "math" "path/filepath" + "reflect" "sort" "strings" "testing" @@ -38,9 +39,8 @@ func Test_EventFilters(t *testing.T) { // events matched in single policies - detached workloads { name: "container: event: trace only events from new containers", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "container-event", @@ -81,9 +81,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "mntns/pidns: trace events only from mount/pid namespace 0", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "mntns/pidns", @@ -111,9 +110,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "mntns: trace events from all mount namespaces but current", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "mntns", @@ -139,9 +137,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "pidns: trace events from all pid namespaces but current", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "pidns", @@ -167,9 +164,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: mntns: pidns: event: trace events set in a single policy from current pid/mount namespaces", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm_mntns_pidns_event", @@ -213,9 +209,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: trace events set in a single policy from ping command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event", @@ -257,9 +252,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: trace events set in a single policy from ping command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 5, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event", @@ -297,9 +291,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "event: data: trace event set in a specific policy with data pathname finishing with 'ls'", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "event-data", @@ -339,9 +332,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "event: data: trace event set in a specific policy with data pathname starting with * wildcard", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "event-data", @@ -375,9 +367,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event set in a specific policy with data from ls command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-data", @@ -416,9 +407,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: trace events set in two specific policies from ls and uname commands", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 4, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-4", @@ -438,7 +428,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 2, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-2", @@ -482,9 +471,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "exec: event: trace events in separate policies from who and uname executable", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "exec-event-1", @@ -504,7 +492,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 2, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "exec-event-2", @@ -597,9 +584,8 @@ func Test_EventFilters(t *testing.T) { // }, { name: "pid: trace events from pid 1", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "pid-1", @@ -636,9 +622,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "uid: comm: trace uid 0 from ls command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "uid-0-comm", @@ -671,9 +656,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "uid: comm: trace only uid>0 from ls command (should be empty)", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "uid-0-comm", @@ -704,9 +688,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: trace filesystem events from ls command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "event-fs", @@ -743,9 +726,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "exec: event: trace only setns events from \"/usr/bin/dockerd\" executable", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "exec-event", @@ -783,9 +765,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "pid: trace new (should be empty)", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "pid-new", @@ -816,9 +797,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: trace events set in a specific policy from ls command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 64, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-64", @@ -850,9 +830,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: trace events set in a specific policy from ls command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 64, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-64", @@ -868,7 +847,6 @@ func Test_EventFilters(t *testing.T) { }, { // no events expected - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-42", @@ -900,9 +878,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: trace events set in a specific policy from ls and who commands", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 64, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-64", @@ -917,7 +894,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-42", @@ -958,9 +934,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "event: data: context: only security_file_open from \"execve\" syscall", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "event-data-context", @@ -1001,9 +976,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: do a file write", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event", @@ -1044,9 +1018,8 @@ func Test_EventFilters(t *testing.T) { // // created and an event like anti_debugging is not known in advance. // { // name: "comm: event: data: sign: trace sys events + signature events in separate policies", - // policyFiles: []testutils.PolicyFileWithID{ + // policyFiles: []testutils.PolicyFile{ // { - // Id: 3, // PolicyFile: v1beta1.PolicyFile{ // Name: "comm-event", // Scope: []string{"comm=ping"}, @@ -1060,7 +1033,6 @@ func Test_EventFilters(t *testing.T) { // }, // }, // { - // Id: 5, // PolicyFile: v1beta1.PolicyFile{ // Name: "event-data", // Scope: []string{}, @@ -1074,7 +1046,6 @@ func Test_EventFilters(t *testing.T) { // }, // }, // { - // Id: 9, // PolicyFile: v1beta1.PolicyFile{ // Name: "signature", // Scope: []string{}, @@ -1115,9 +1086,8 @@ func Test_EventFilters(t *testing.T) { // events matched in multiple policies - intertwined workloads { name: "comm: event: trace events from ping command in multiple policies", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 3, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-3", @@ -1137,7 +1107,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 5, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-5", @@ -1175,9 +1144,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: trace events from ping command in multiple policies", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 3, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-3", @@ -1197,7 +1165,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 5, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-5", @@ -1241,9 +1208,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: trace events from ping command in multiple policies", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 3, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-3", @@ -1263,7 +1229,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 5, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-5", @@ -1283,7 +1248,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 7, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-7", @@ -1303,7 +1267,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 9, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-9", @@ -1349,9 +1312,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: trace events from nc command for net_tcp_connect event", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "net-event-1", @@ -1389,9 +1351,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: trace only events from from ls and who commands in multiple policies", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 64, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-64", @@ -1406,7 +1367,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-42", @@ -1448,9 +1408,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: trace at least one event in multiple policies from ls and who commands", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 64, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-64", @@ -1465,7 +1424,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-42", @@ -1512,9 +1470,8 @@ func Test_EventFilters(t *testing.T) { // - emit read and write events, as defined in expected events { name: "comm: event: trace events read and write set in a single policy from fakeprog1 command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event", @@ -1556,9 +1513,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "event: trace execve event set in a specific policy from fakeprog1 command", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "event-pol-42", @@ -1593,9 +1549,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event set in a specific policy with data from fakeprog1 and fakeprog2 commands", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 64, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-data-64", @@ -1619,7 +1574,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-data-42", @@ -1675,9 +1629,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: retval: trace event set in a specific policy with retval from fakeprog1 and fakeprog2 commands", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 64, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-retval-64", @@ -1700,7 +1653,6 @@ func Test_EventFilters(t *testing.T) { }, { // no events expected - Id: 42, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "comm-event-retval-42", @@ -1750,9 +1702,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event security_file_open set in multiple policies using multiple filter types", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-1", @@ -1776,7 +1727,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 2, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-2", @@ -1800,7 +1750,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 3, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-3", @@ -1849,9 +1798,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event security_file_open and magic_write using multiple filter types combined", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-mw-combined-pol-1", @@ -1880,7 +1828,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 2, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-mw-combined-pol-2", @@ -1934,9 +1881,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event magic_write set in multiple policies using multiple filter types", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "mw-pol-1", @@ -1958,7 +1904,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 2, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "mw-pol-2", @@ -2000,9 +1945,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event security_file_open set in multiple policies (with and without in-kernel filter)", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-1", @@ -2024,7 +1968,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 2, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-2", @@ -2072,9 +2015,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event security_file_open set in multiple policies (with and without in-kernel filter) mixed in same policy", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-1", @@ -2097,7 +2039,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 2, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-2", @@ -2136,9 +2077,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event security_mmap_file using multiple filter types", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "smf-pol-1", @@ -2179,9 +2119,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event security_file_open and magic_write using multiple filter types", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-mw-pol-1", @@ -2229,9 +2168,8 @@ func Test_EventFilters(t *testing.T) { }, { name: "comm: event: data: trace event with pathname exceeding 255 characters", - policyFiles: []testutils.PolicyFileWithID{ + policyFiles: []testutils.PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-1", @@ -2253,7 +2191,6 @@ func Test_EventFilters(t *testing.T) { }, }, { - Id: 2, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "sfo-pol-2", @@ -2393,13 +2330,16 @@ const ( anyEventID = -1 anyPID = -1 anyUID = -1 - anyPolicy = 0 anyPolicyName = "" ) +var ( + anyPolicy = []uint64{0} +) + type testCase struct { name string - policyFiles []testutils.PolicyFileWithID + policyFiles []testutils.PolicyFile cmdEvents []cmdEvents useSyscaller bool coolDown time.Duration // cool down before running the test case @@ -2464,15 +2404,15 @@ func expectEvent( args ...trace.Argument, ) trace.Event { return trace.Event{ - ProcessorID: processorID, - ProcessID: pid, - UserID: uid, - ProcessName: comm, - HostName: host, - EventID: int(eventID), - MatchedPolicies: matchPolName, - MatchedPoliciesUser: matchPols, - Args: args, + ProcessorID: processorID, + ProcessID: pid, + UserID: uid, + ProcessName: comm, + HostName: host, + EventID: int(eventID), + MatchedPolicies: matchPolName, + MatchedRulesUser: []uint64{matchPols}, + Args: args, } } @@ -2701,7 +2641,7 @@ func ExpectAtLeastOneForEach(t *testing.T, cmdEvents []cmdEvents, actual *eventB checkPID := expEvt.ProcessID != anyPID checkUID := expEvt.UserID != anyUID checkEventID := expEvt.EventID != anyEventID - checkPolicy := expEvt.MatchedPoliciesUser != anyPolicy + checkPolicy := !reflect.DeepEqual(expEvt.MatchedRulesUser, anyPolicy) checkPolicyName := len(expEvt.MatchedPolicies) > 0 && expEvt.MatchedPolicies[0] != anyPolicyName for _, actEvt := range actEvtsCopy { @@ -2730,7 +2670,7 @@ func ExpectAtLeastOneForEach(t *testing.T, cmdEvents []cmdEvents, actual *eventB if checkEventID && actEvt.EventID != expEvt.EventID { continue } - if checkPolicy && actEvt.MatchedPoliciesUser != expEvt.MatchedPoliciesUser { + if checkPolicy && !reflect.DeepEqual(actEvt.MatchedRulesUser, expEvt.MatchedRulesUser) { continue } if checkPolicyName { @@ -2867,7 +2807,7 @@ func ExpectAnyOfEvts(t *testing.T, cmdEvents []cmdEvents, actual *eventBuffer, u checkPID := expEvt.ProcessID != anyPID checkUID := expEvt.UserID != anyUID checkEventID := expEvt.EventID != anyEventID - checkPolicy := expEvt.MatchedPoliciesUser != anyPolicy + checkPolicy := !reflect.DeepEqual(expEvt.MatchedRulesUser, anyPolicy) checkPolicyName := len(expEvt.MatchedPolicies) > 0 && expEvt.MatchedPolicies[0] != anyPolicyName if len(cmd.expectedEvents) > 0 && proc.expectedEvts == 0 { @@ -2900,7 +2840,7 @@ func ExpectAnyOfEvts(t *testing.T, cmdEvents []cmdEvents, actual *eventBuffer, u if checkEventID && actEvt.EventID != expEvt.EventID { continue } - if checkPolicy && actEvt.MatchedPoliciesUser != expEvt.MatchedPoliciesUser { + if checkPolicy && !reflect.DeepEqual(actEvt.MatchedRulesUser, expEvt.MatchedRulesUser) { continue } if checkPolicyName { @@ -3008,7 +2948,7 @@ func ExpectAllEvtsEqualToOne(t *testing.T, cmdEvents []cmdEvents, actual *eventB checkPID := expEvt.ProcessID != anyPID checkUID := expEvt.UserID != anyUID checkEventID := expEvt.EventID != anyEventID - checkPolicy := expEvt.MatchedPoliciesUser != anyPolicy + checkPolicy := !reflect.DeepEqual(expEvt.MatchedRulesUser, anyPolicy) checkPolicyName := len(expEvt.MatchedPolicies) > 0 && expEvt.MatchedPolicies[0] != anyPolicyName for _, actEvt := range actEvtsCopy { @@ -3037,8 +2977,8 @@ func ExpectAllEvtsEqualToOne(t *testing.T, cmdEvents []cmdEvents, actual *eventB if checkEventID && !assert.ObjectsAreEqual(expEvt.EventID, actEvt.EventID) { return fmt.Errorf("Event %+v:\nevent Id mismatch: expected %d, got %d", expEvt, expEvt.EventID, actEvt.EventID) } - if checkPolicy && !assert.ObjectsAreEqual(expEvt.MatchedPoliciesUser, actEvt.MatchedPoliciesUser) { - return fmt.Errorf("Event %+v:\nmatched policies mismatch: expected %d, got %d", expEvt, expEvt.MatchedPoliciesUser, actEvt.MatchedPoliciesUser) + if checkPolicy && !reflect.DeepEqual(expEvt.MatchedRulesUser, actEvt.MatchedRulesUser) { + return fmt.Errorf("Event %+v:\nmatched policies mismatch: expected %v, got %v", expEvt, expEvt.MatchedRulesUser, actEvt.MatchedRulesUser) } if checkPolicyName && !assertUnorderedStringSlicesEqual(expEvt.MatchedPolicies, actEvt.MatchedPolicies) { return fmt.Errorf("Event %+v:\nmatched policies mismatch: expected %v, got %v", expEvt, expEvt.MatchedPolicies, actEvt.MatchedPolicies) @@ -3121,7 +3061,7 @@ func ExpectAllInOrderSequentially(t *testing.T, cmdEvents []cmdEvents, actual *e checkPID := expEvt.ProcessID != anyPID checkUID := expEvt.UserID != anyUID checkEventID := expEvt.EventID != anyEventID - checkPolicy := expEvt.MatchedPoliciesUser != anyPolicy + checkPolicy := !reflect.DeepEqual(expEvt.MatchedRulesUser, anyPolicy) checkPolicyName := len(expEvt.MatchedPolicies) > 0 && expEvt.MatchedPolicies[0] != anyPolicyName if checkHost && !assert.ObjectsAreEqual(expEvt.HostName, actEvt.HostName) { @@ -3145,8 +3085,8 @@ func ExpectAllInOrderSequentially(t *testing.T, cmdEvents []cmdEvents, actual *e if checkEventID && !assert.ObjectsAreEqual(expEvt.EventID, actEvt.EventID) { return fmt.Errorf("Event %+v:\nevent Id mismatch: expected %d, got %d", expEvt, expEvt.EventID, actEvt.EventID) } - if checkPolicy && !assert.ObjectsAreEqual(expEvt.MatchedPoliciesUser, actEvt.MatchedPoliciesUser) { - return fmt.Errorf("Event %+v:\nmatched policies mismatch: expected %d, got %d", expEvt, expEvt.MatchedPoliciesUser, actEvt.MatchedPoliciesUser) + if checkPolicy && !assert.ObjectsAreEqual(expEvt.MatchedRulesUser, actEvt.MatchedRulesUser) { + return fmt.Errorf("Event %+v:\nmatched policies mismatch: expected %d, got %d", expEvt, expEvt.MatchedRulesUser, actEvt.MatchedRulesUser) } if checkPolicyName && !assertUnorderedStringSlicesEqual(expEvt.MatchedPolicies, actEvt.MatchedPolicies) { return fmt.Errorf("Event %+v:\nmatched policies mismatch: expected %v, got %v", expEvt, expEvt.MatchedPolicies, actEvt.MatchedPolicies) diff --git a/tests/testutils/policies.go b/tests/testutils/policies.go index 6a8b061f2b11..c6c4e46d213c 100644 --- a/tests/testutils/policies.go +++ b/tests/testutils/policies.go @@ -24,9 +24,8 @@ func BuildPoliciesFromEvents(eventsToChoose []events.ID) []*policy.Policy { policyRules = append(policyRules, rule) } - policiesFiles := []PolicyFileWithID{ + policiesFiles := []PolicyFile{ { - Id: 1, PolicyFile: v1beta1.PolicyFile{ Metadata: v1beta1.Metadata{ Name: "test-policy", @@ -42,11 +41,11 @@ func BuildPoliciesFromEvents(eventsToChoose []events.ID) []*policy.Policy { return NewPolicies(policiesFiles) } -// NewPolicies creates a slice of policies setting the ID of each policy to the given ID. -func NewPolicies(polsFilesID []PolicyFileWithID) []*policy.Policy { +// NewPolicies creates a slice of policies +func NewPolicies(polFiles []PolicyFile) []*policy.Policy { var polsFiles []k8s.PolicyInterface - for _, polFile := range polsFilesID { + for _, polFile := range polFiles { polsFiles = append(polsFiles, polFile.PolicyFile) } @@ -62,23 +61,21 @@ func NewPolicies(polsFilesID []PolicyFileWithID) []*policy.Policy { for i := range policies { found := false - for j := range polsFilesID { - if policies[i].Name == polsFilesID[j].PolicyFile.Metadata.Name { - policies[i].ID = polsFilesID[j].Id - 1 + for j := range polFiles { + if policies[i].Name == polFiles[j].PolicyFile.Metadata.Name { found = true break } } if !found { - panic(fmt.Errorf("policy %s not found in polsFilesID", policies[i].Name)) + panic(fmt.Errorf("policy %s not found in polFiles", policies[i].Name)) } } return policies } -type PolicyFileWithID struct { +type PolicyFile struct { PolicyFile v1beta1.PolicyFile - Id int } diff --git a/types/trace/trace.go b/types/trace/trace.go index 5a9a167ba01c..3e1ec5b0fec0 100644 --- a/types/trace/trace.go +++ b/types/trace/trace.go @@ -15,41 +15,41 @@ import ( // Event is a single result of an ebpf event process. It is used as a payload later delivered to tracee-rules. type Event struct { - Timestamp int `json:"timestamp"` - ThreadStartTime int `json:"threadStartTime"` - ProcessorID int `json:"processorId"` - ProcessID int `json:"processId"` - CgroupID uint `json:"cgroupId"` - ThreadID int `json:"threadId"` - ParentProcessID int `json:"parentProcessId"` - HostProcessID int `json:"hostProcessId"` - HostThreadID int `json:"hostThreadId"` - HostParentProcessID int `json:"hostParentProcessId"` - UserID int `json:"userId"` - MountNS int `json:"mountNamespace"` - PIDNS int `json:"pidNamespace"` - ProcessName string `json:"processName"` - Executable File `json:"executable"` - HostName string `json:"hostName"` - ContainerID string `json:"containerId"` - Container Container `json:"container,omitempty"` - Kubernetes Kubernetes `json:"kubernetes,omitempty"` - EventID int `json:"eventId,string"` - EventName string `json:"eventName"` - PoliciesVersion uint16 `json:"-"` - MatchedPoliciesKernel uint64 `json:"-"` - MatchedPoliciesUser uint64 `json:"-"` - MatchedPolicies []string `json:"matchedPolicies,omitempty"` - ArgsNum int `json:"argsNum"` - ReturnValue int `json:"returnValue"` - Syscall string `json:"syscall"` - StackAddresses []uint64 `json:"stackAddresses"` - ContextFlags ContextFlags `json:"contextFlags"` - ThreadEntityId uint32 `json:"threadEntityId"` // thread task unique identifier (*) - ProcessEntityId uint32 `json:"processEntityId"` // process unique identifier (*) - ParentEntityId uint32 `json:"parentEntityId"` // parent process unique identifier (*) - Args []Argument `json:"args"` // args are ordered according their appearance in the original event - Metadata *Metadata `json:"metadata,omitempty"` + Timestamp int `json:"timestamp"` + ThreadStartTime int `json:"threadStartTime"` + ProcessorID int `json:"processorId"` + ProcessID int `json:"processId"` + CgroupID uint `json:"cgroupId"` + ThreadID int `json:"threadId"` + ParentProcessID int `json:"parentProcessId"` + HostProcessID int `json:"hostProcessId"` + HostThreadID int `json:"hostThreadId"` + HostParentProcessID int `json:"hostParentProcessId"` + UserID int `json:"userId"` + MountNS int `json:"mountNamespace"` + PIDNS int `json:"pidNamespace"` + ProcessName string `json:"processName"` + Executable File `json:"executable"` + HostName string `json:"hostName"` + ContainerID string `json:"containerId"` + Container Container `json:"container,omitempty"` + Kubernetes Kubernetes `json:"kubernetes,omitempty"` + EventID int `json:"eventId,string"` + EventName string `json:"eventName"` + RulesVersion uint16 `json:"-"` + MatchedRulesKernel []uint64 `json:"-"` // Bitmap array supporting rules with ID > 64 + MatchedRulesUser []uint64 `json:"-"` // Bitmap array supporting rules with ID > 64 + MatchedPolicies []string `json:"matchedPolicies,omitempty"` + ArgsNum int `json:"argsNum"` + ReturnValue int `json:"returnValue"` + Syscall string `json:"syscall"` + StackAddresses []uint64 `json:"stackAddresses"` + ContextFlags ContextFlags `json:"contextFlags"` + ThreadEntityId uint32 `json:"threadEntityId"` // thread task unique identifier (*) + ProcessEntityId uint32 `json:"processEntityId"` // process unique identifier (*) + ParentEntityId uint32 `json:"parentEntityId"` // parent process unique identifier (*) + Args []Argument `json:"args"` // args are ordered according their appearance in the original event + Metadata *Metadata `json:"metadata,omitempty"` } // (*) For an OS task to be uniquely identified, tracee builds a hash consisting of: