Skip to content

Commit 9bc64bd

Browse files
w1ldptrkuba-moo
authored andcommitted
net/sched: act_ct: Always fill offloading tuple iifidx
Referenced commit doesn't always set iifidx when offloading the flow to hardware. Fix the following cases: - nf_conn_act_ct_ext_fill() is called before extension is created with nf_conn_act_ct_ext_add() in tcf_ct_act(). This can cause rule offload with unspecified iifidx when connection is offloaded after only single original-direction packet has been processed by tc data path. Always fill the new nf_conn_act_ct_ext instance after creating it in nf_conn_act_ct_ext_add(). - Offloading of unidirectional UDP NEW connections is now supported, but ct flow iifidx field is not updated when connection is promoted to bidirectional which can result reply-direction iifidx to be zero when refreshing the connection. Fill in the extension and update flow iifidx before calling flow_offload_refresh(). Fixes: 9795ded ("net/sched: act_ct: Fill offloading tuple iifidx") Reviewed-by: Paul Blakey <paulb@nvidia.com> Signed-off-by: Vlad Buslov <vladbu@nvidia.com> Reviewed-by: Simon Horman <horms@kernel.org> Fixes: 6a9bad0 ("net/sched: act_ct: offload UDP NEW connections") Link: https://lore.kernel.org/r/20231103151410.764271-1-vladbu@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent caf3100 commit 9bc64bd

File tree

3 files changed

+32
-15
lines changed

3 files changed

+32
-15
lines changed

include/net/netfilter/nf_conntrack_act_ct.h

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
2020
#endif
2121
}
2222

23-
static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
23+
static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
24+
enum ip_conntrack_info ctinfo)
25+
{
26+
#if IS_ENABLED(CONFIG_NET_ACT_CT)
27+
struct nf_conn_act_ct_ext *act_ct_ext;
28+
29+
act_ct_ext = nf_conn_act_ct_ext_find(ct);
30+
if (dev_net(skb->dev) == &init_net && act_ct_ext)
31+
act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
32+
#endif
33+
}
34+
35+
static inline struct
36+
nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
37+
struct nf_conn *ct,
38+
enum ip_conntrack_info ctinfo)
2439
{
2540
#if IS_ENABLED(CONFIG_NET_ACT_CT)
2641
struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
2944
return act_ct;
3045

3146
act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
47+
nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
3248
return act_ct;
3349
#else
3450
return NULL;
3551
#endif
3652
}
3753

38-
static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
39-
enum ip_conntrack_info ctinfo)
40-
{
41-
#if IS_ENABLED(CONFIG_NET_ACT_CT)
42-
struct nf_conn_act_ct_ext *act_ct_ext;
43-
44-
act_ct_ext = nf_conn_act_ct_ext_find(ct);
45-
if (dev_net(skb->dev) == &init_net && act_ct_ext)
46-
act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
47-
#endif
48-
}
49-
5054
#endif /* _NF_CONNTRACK_ACT_CT_H */

net/openvswitch/conntrack.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
985985
if (err)
986986
return err;
987987

988-
nf_conn_act_ct_ext_add(ct);
988+
nf_conn_act_ct_ext_add(skb, ct, ctinfo);
989989
} else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
990990
labels_nonzero(&info->labels.mask)) {
991991
err = ovs_ct_set_labels(ct, key, &info->labels.value,

net/sched/act_ct.c

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
376376
entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
377377
}
378378

379+
static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
380+
{
381+
struct nf_conn_act_ct_ext *act_ct_ext;
382+
383+
act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
384+
if (act_ct_ext) {
385+
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
386+
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
387+
}
388+
}
389+
379390
static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
380391
struct nf_conn *ct,
381392
bool tcp, bool bidirectional)
@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
671682
else
672683
ctinfo = IP_CT_ESTABLISHED_REPLY;
673684

685+
nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
686+
tcf_ct_flow_ct_ext_ifidx_update(flow);
674687
flow_offload_refresh(nf_ft, flow, force_refresh);
675688
if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
676689
/* Process this flow in SW to allow promoting to ASSURED */
@@ -1034,7 +1047,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
10341047
tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
10351048

10361049
if (!nf_ct_is_confirmed(ct))
1037-
nf_conn_act_ct_ext_add(ct);
1050+
nf_conn_act_ct_ext_add(skb, ct, ctinfo);
10381051

10391052
/* This will take care of sending queued events
10401053
* even if the connection is already confirmed.

0 commit comments

Comments
 (0)