@@ -1763,6 +1763,186 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
1763
1763
return nfp_net_mbox_reconfig_and_unlock (nn , cmd );
1764
1764
}
1765
1765
1766
+ static void
1767
+ nfp_net_fs_fill_v4 (struct nfp_net * nn , struct nfp_fs_entry * entry , u32 op , u32 * addr )
1768
+ {
1769
+ unsigned int i ;
1770
+
1771
+ union {
1772
+ struct {
1773
+ __be16 loc ;
1774
+ u8 k_proto , m_proto ;
1775
+ __be32 k_sip , m_sip , k_dip , m_dip ;
1776
+ __be16 k_sport , m_sport , k_dport , m_dport ;
1777
+ };
1778
+ __be32 val [7 ];
1779
+ } v4_rule ;
1780
+
1781
+ nn_writel (nn , * addr , op );
1782
+ * addr += sizeof (u32 );
1783
+
1784
+ v4_rule .loc = cpu_to_be16 (entry -> loc );
1785
+ v4_rule .k_proto = entry -> key .l4_proto ;
1786
+ v4_rule .m_proto = entry -> msk .l4_proto ;
1787
+ v4_rule .k_sip = entry -> key .sip4 ;
1788
+ v4_rule .m_sip = entry -> msk .sip4 ;
1789
+ v4_rule .k_dip = entry -> key .dip4 ;
1790
+ v4_rule .m_dip = entry -> msk .dip4 ;
1791
+ v4_rule .k_sport = entry -> key .sport ;
1792
+ v4_rule .m_sport = entry -> msk .sport ;
1793
+ v4_rule .k_dport = entry -> key .dport ;
1794
+ v4_rule .m_dport = entry -> msk .dport ;
1795
+
1796
+ for (i = 0 ; i < ARRAY_SIZE (v4_rule .val ); i ++ , * addr += sizeof (__be32 ))
1797
+ nn_writel (nn , * addr , be32_to_cpu (v4_rule .val [i ]));
1798
+ }
1799
+
1800
+ static void
1801
+ nfp_net_fs_fill_v6 (struct nfp_net * nn , struct nfp_fs_entry * entry , u32 op , u32 * addr )
1802
+ {
1803
+ unsigned int i ;
1804
+
1805
+ union {
1806
+ struct {
1807
+ __be16 loc ;
1808
+ u8 k_proto , m_proto ;
1809
+ __be32 k_sip [4 ], m_sip [4 ], k_dip [4 ], m_dip [4 ];
1810
+ __be16 k_sport , m_sport , k_dport , m_dport ;
1811
+ };
1812
+ __be32 val [19 ];
1813
+ } v6_rule ;
1814
+
1815
+ nn_writel (nn , * addr , op );
1816
+ * addr += sizeof (u32 );
1817
+
1818
+ v6_rule .loc = cpu_to_be16 (entry -> loc );
1819
+ v6_rule .k_proto = entry -> key .l4_proto ;
1820
+ v6_rule .m_proto = entry -> msk .l4_proto ;
1821
+ for (i = 0 ; i < 4 ; i ++ ) {
1822
+ v6_rule .k_sip [i ] = entry -> key .sip6 [i ];
1823
+ v6_rule .m_sip [i ] = entry -> msk .sip6 [i ];
1824
+ v6_rule .k_dip [i ] = entry -> key .dip6 [i ];
1825
+ v6_rule .m_dip [i ] = entry -> msk .dip6 [i ];
1826
+ }
1827
+ v6_rule .k_sport = entry -> key .sport ;
1828
+ v6_rule .m_sport = entry -> msk .sport ;
1829
+ v6_rule .k_dport = entry -> key .dport ;
1830
+ v6_rule .m_dport = entry -> msk .dport ;
1831
+
1832
+ for (i = 0 ; i < ARRAY_SIZE (v6_rule .val ); i ++ , * addr += sizeof (__be32 ))
1833
+ nn_writel (nn , * addr , be32_to_cpu (v6_rule .val [i ]));
1834
+ }
1835
+
1836
+ #define NFP_FS_QUEUE_ID GENMASK(22, 16)
1837
+ #define NFP_FS_ACT GENMASK(15, 0)
1838
+ #define NFP_FS_ACT_DROP BIT(0)
1839
+ #define NFP_FS_ACT_Q BIT(1)
1840
+ static void
1841
+ nfp_net_fs_fill_act (struct nfp_net * nn , struct nfp_fs_entry * entry , u32 addr )
1842
+ {
1843
+ u32 action = 0 ; /* 0 means default passthrough */
1844
+
1845
+ if (entry -> action == RX_CLS_FLOW_DISC )
1846
+ action = NFP_FS_ACT_DROP ;
1847
+ else if (!(entry -> flow_type & FLOW_RSS ))
1848
+ action = FIELD_PREP (NFP_FS_QUEUE_ID , entry -> action ) | NFP_FS_ACT_Q ;
1849
+
1850
+ nn_writel (nn , addr , action );
1851
+ }
1852
+
1853
+ int nfp_net_fs_add_hw (struct nfp_net * nn , struct nfp_fs_entry * entry )
1854
+ {
1855
+ u32 addr = nn -> tlv_caps .mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL ;
1856
+ int err ;
1857
+
1858
+ err = nfp_net_mbox_lock (nn , NFP_NET_CFG_FS_SZ );
1859
+ if (err )
1860
+ return err ;
1861
+
1862
+ switch (entry -> flow_type & ~FLOW_RSS ) {
1863
+ case TCP_V4_FLOW :
1864
+ case UDP_V4_FLOW :
1865
+ case SCTP_V4_FLOW :
1866
+ case IPV4_USER_FLOW :
1867
+ nfp_net_fs_fill_v4 (nn , entry , NFP_NET_CFG_MBOX_CMD_FS_ADD_V4 , & addr );
1868
+ break ;
1869
+ case TCP_V6_FLOW :
1870
+ case UDP_V6_FLOW :
1871
+ case SCTP_V6_FLOW :
1872
+ case IPV6_USER_FLOW :
1873
+ nfp_net_fs_fill_v6 (nn , entry , NFP_NET_CFG_MBOX_CMD_FS_ADD_V6 , & addr );
1874
+ break ;
1875
+ case ETHER_FLOW :
1876
+ nn_writel (nn , addr , NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE );
1877
+ addr += sizeof (u32 );
1878
+ nn_writew (nn , addr , be16_to_cpu (entry -> key .l3_proto ));
1879
+ addr += sizeof (u32 );
1880
+ break ;
1881
+ }
1882
+
1883
+ nfp_net_fs_fill_act (nn , entry , addr );
1884
+
1885
+ err = nfp_net_mbox_reconfig_and_unlock (nn , NFP_NET_CFG_MBOX_CMD_FLOW_STEER );
1886
+ if (err ) {
1887
+ nn_err (nn , "Add new fs rule failed with %d\n" , err );
1888
+ return - EIO ;
1889
+ }
1890
+
1891
+ return 0 ;
1892
+ }
1893
+
1894
+ int nfp_net_fs_del_hw (struct nfp_net * nn , struct nfp_fs_entry * entry )
1895
+ {
1896
+ u32 addr = nn -> tlv_caps .mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL ;
1897
+ int err ;
1898
+
1899
+ err = nfp_net_mbox_lock (nn , NFP_NET_CFG_FS_SZ );
1900
+ if (err )
1901
+ return err ;
1902
+
1903
+ switch (entry -> flow_type & ~FLOW_RSS ) {
1904
+ case TCP_V4_FLOW :
1905
+ case UDP_V4_FLOW :
1906
+ case SCTP_V4_FLOW :
1907
+ case IPV4_USER_FLOW :
1908
+ nfp_net_fs_fill_v4 (nn , entry , NFP_NET_CFG_MBOX_CMD_FS_DEL_V4 , & addr );
1909
+ break ;
1910
+ case TCP_V6_FLOW :
1911
+ case UDP_V6_FLOW :
1912
+ case SCTP_V6_FLOW :
1913
+ case IPV6_USER_FLOW :
1914
+ nfp_net_fs_fill_v6 (nn , entry , NFP_NET_CFG_MBOX_CMD_FS_DEL_V6 , & addr );
1915
+ break ;
1916
+ case ETHER_FLOW :
1917
+ nn_writel (nn , addr , NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE );
1918
+ addr += sizeof (u32 );
1919
+ nn_writew (nn , addr , be16_to_cpu (entry -> key .l3_proto ));
1920
+ addr += sizeof (u32 );
1921
+ break ;
1922
+ }
1923
+
1924
+ nfp_net_fs_fill_act (nn , entry , addr );
1925
+
1926
+ err = nfp_net_mbox_reconfig_and_unlock (nn , NFP_NET_CFG_MBOX_CMD_FLOW_STEER );
1927
+ if (err ) {
1928
+ nn_err (nn , "Delete fs rule failed with %d\n" , err );
1929
+ return - EIO ;
1930
+ }
1931
+
1932
+ return 0 ;
1933
+ }
1934
+
1935
+ static void nfp_net_fs_clean (struct nfp_net * nn )
1936
+ {
1937
+ struct nfp_fs_entry * entry , * tmp ;
1938
+
1939
+ list_for_each_entry_safe (entry , tmp , & nn -> fs .list , node ) {
1940
+ nfp_net_fs_del_hw (nn , entry );
1941
+ list_del (& entry -> node );
1942
+ kfree (entry );
1943
+ }
1944
+ }
1945
+
1766
1946
static void nfp_net_stat64 (struct net_device * netdev ,
1767
1947
struct rtnl_link_stats64 * stats )
1768
1948
{
@@ -2740,6 +2920,8 @@ int nfp_net_init(struct nfp_net *nn)
2740
2920
INIT_LIST_HEAD (& nn -> mbox_amsg .list );
2741
2921
INIT_WORK (& nn -> mbox_amsg .work , nfp_net_mbox_amsg_work );
2742
2922
2923
+ INIT_LIST_HEAD (& nn -> fs .list );
2924
+
2743
2925
return register_netdev (nn -> dp .netdev );
2744
2926
2745
2927
err_clean_mbox :
@@ -2759,6 +2941,7 @@ void nfp_net_clean(struct nfp_net *nn)
2759
2941
unregister_netdev (nn -> dp .netdev );
2760
2942
nfp_net_ipsec_clean (nn );
2761
2943
nfp_ccm_mbox_clean (nn );
2944
+ nfp_net_fs_clean (nn );
2762
2945
flush_work (& nn -> mbox_amsg .work );
2763
2946
nfp_net_reconfig_wait_posted (nn );
2764
2947
}
0 commit comments