@@ -589,10 +589,6 @@ static int axienet_device_reset(struct net_device *ndev)
589
589
struct axienet_local * lp = netdev_priv (ndev );
590
590
int ret ;
591
591
592
- ret = __axienet_device_reset (lp );
593
- if (ret )
594
- return ret ;
595
-
596
592
lp -> max_frm_size = XAE_MAX_VLAN_FRAME_SIZE ;
597
593
lp -> options |= XAE_OPTION_VLAN ;
598
594
lp -> options &= (~XAE_OPTION_JUMBO );
@@ -606,11 +602,17 @@ static int axienet_device_reset(struct net_device *ndev)
606
602
lp -> options |= XAE_OPTION_JUMBO ;
607
603
}
608
604
609
- ret = axienet_dma_bd_init (ndev );
610
- if (ret ) {
611
- netdev_err (ndev , "%s: descriptor allocation failed\n" ,
612
- __func__ );
613
- return ret ;
605
+ if (!lp -> use_dmaengine ) {
606
+ ret = __axienet_device_reset (lp );
607
+ if (ret )
608
+ return ret ;
609
+
610
+ ret = axienet_dma_bd_init (ndev );
611
+ if (ret ) {
612
+ netdev_err (ndev , "%s: descriptor allocation failed\n" ,
613
+ __func__ );
614
+ return ret ;
615
+ }
614
616
}
615
617
616
618
axienet_status = axienet_ior (lp , XAE_RCW1_OFFSET );
@@ -1125,41 +1127,21 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1125
1127
static void axienet_dma_err_handler (struct work_struct * work );
1126
1128
1127
1129
/**
1128
- * axienet_open - Driver open routine .
1129
- * @ndev: Pointer to net_device structure
1130
+ * axienet_init_legacy_dma - init the dma legacy code .
1131
+ * @ndev: Pointer to net_device structure
1130
1132
*
1131
1133
* Return: 0, on success.
1132
- * non-zero error value on failure
1134
+ * non-zero error value on failure
1135
+ *
1136
+ * This is the dma initialization code. It also allocates interrupt
1137
+ * service routines, enables the interrupt lines and ISR handling.
1133
1138
*
1134
- * This is the driver open routine. It calls phylink_start to start the
1135
- * PHY device.
1136
- * It also allocates interrupt service routines, enables the interrupt lines
1137
- * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1138
- * descriptors are initialized.
1139
1139
*/
1140
- static int axienet_open (struct net_device * ndev )
1140
+ static int axienet_init_legacy_dma (struct net_device * ndev )
1141
1141
{
1142
1142
int ret ;
1143
1143
struct axienet_local * lp = netdev_priv (ndev );
1144
1144
1145
- dev_dbg (& ndev -> dev , "axienet_open()\n" );
1146
-
1147
- /* When we do an Axi Ethernet reset, it resets the complete core
1148
- * including the MDIO. MDIO must be disabled before resetting.
1149
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1150
- */
1151
- axienet_lock_mii (lp );
1152
- ret = axienet_device_reset (ndev );
1153
- axienet_unlock_mii (lp );
1154
-
1155
- ret = phylink_of_phy_connect (lp -> phylink , lp -> dev -> of_node , 0 );
1156
- if (ret ) {
1157
- dev_err (lp -> dev , "phylink_of_phy_connect() failed: %d\n" , ret );
1158
- return ret ;
1159
- }
1160
-
1161
- phylink_start (lp -> phylink );
1162
-
1163
1145
/* Enable worker thread for Axi DMA error handling */
1164
1146
INIT_WORK (& lp -> dma_err_task , axienet_dma_err_handler );
1165
1147
@@ -1193,13 +1175,61 @@ static int axienet_open(struct net_device *ndev)
1193
1175
err_tx_irq :
1194
1176
napi_disable (& lp -> napi_tx );
1195
1177
napi_disable (& lp -> napi_rx );
1196
- phylink_stop (lp -> phylink );
1197
- phylink_disconnect_phy (lp -> phylink );
1198
1178
cancel_work_sync (& lp -> dma_err_task );
1199
1179
dev_err (lp -> dev , "request_irq() failed\n" );
1200
1180
return ret ;
1201
1181
}
1202
1182
1183
+ /**
1184
+ * axienet_open - Driver open routine.
1185
+ * @ndev: Pointer to net_device structure
1186
+ *
1187
+ * Return: 0, on success.
1188
+ * non-zero error value on failure
1189
+ *
1190
+ * This is the driver open routine. It calls phylink_start to start the
1191
+ * PHY device.
1192
+ * It also allocates interrupt service routines, enables the interrupt lines
1193
+ * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1194
+ * descriptors are initialized.
1195
+ */
1196
+ static int axienet_open (struct net_device * ndev )
1197
+ {
1198
+ int ret ;
1199
+ struct axienet_local * lp = netdev_priv (ndev );
1200
+
1201
+ dev_dbg (& ndev -> dev , "%s\n" , __func__ );
1202
+
1203
+ /* When we do an Axi Ethernet reset, it resets the complete core
1204
+ * including the MDIO. MDIO must be disabled before resetting.
1205
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1206
+ */
1207
+ axienet_lock_mii (lp );
1208
+ ret = axienet_device_reset (ndev );
1209
+ axienet_unlock_mii (lp );
1210
+
1211
+ ret = phylink_of_phy_connect (lp -> phylink , lp -> dev -> of_node , 0 );
1212
+ if (ret ) {
1213
+ dev_err (lp -> dev , "phylink_of_phy_connect() failed: %d\n" , ret );
1214
+ return ret ;
1215
+ }
1216
+
1217
+ phylink_start (lp -> phylink );
1218
+
1219
+ if (!lp -> use_dmaengine ) {
1220
+ ret = axienet_init_legacy_dma (ndev );
1221
+ if (ret )
1222
+ goto err_phy ;
1223
+ }
1224
+
1225
+ return 0 ;
1226
+
1227
+ err_phy :
1228
+ phylink_stop (lp -> phylink );
1229
+ phylink_disconnect_phy (lp -> phylink );
1230
+ return ret ;
1231
+ }
1232
+
1203
1233
/**
1204
1234
* axienet_stop - Driver stop routine.
1205
1235
* @ndev: Pointer to net_device structure
@@ -1216,27 +1246,29 @@ static int axienet_stop(struct net_device *ndev)
1216
1246
1217
1247
dev_dbg (& ndev -> dev , "axienet_close()\n" );
1218
1248
1219
- napi_disable (& lp -> napi_tx );
1220
- napi_disable (& lp -> napi_rx );
1249
+ if (!lp -> use_dmaengine ) {
1250
+ napi_disable (& lp -> napi_tx );
1251
+ napi_disable (& lp -> napi_rx );
1252
+ }
1221
1253
1222
1254
phylink_stop (lp -> phylink );
1223
1255
phylink_disconnect_phy (lp -> phylink );
1224
1256
1225
1257
axienet_setoptions (ndev , lp -> options &
1226
1258
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN ));
1227
1259
1228
- axienet_dma_stop (lp );
1260
+ if (!lp -> use_dmaengine ) {
1261
+ axienet_dma_stop (lp );
1262
+ cancel_work_sync (& lp -> dma_err_task );
1263
+ free_irq (lp -> tx_irq , ndev );
1264
+ free_irq (lp -> rx_irq , ndev );
1265
+ axienet_dma_bd_release (ndev );
1266
+ }
1229
1267
1230
1268
axienet_iow (lp , XAE_IE_OFFSET , 0 );
1231
1269
1232
- cancel_work_sync (& lp -> dma_err_task );
1233
-
1234
1270
if (lp -> eth_irq > 0 )
1235
1271
free_irq (lp -> eth_irq , ndev );
1236
- free_irq (lp -> tx_irq , ndev );
1237
- free_irq (lp -> rx_irq , ndev );
1238
-
1239
- axienet_dma_bd_release (ndev );
1240
1272
return 0 ;
1241
1273
}
1242
1274
@@ -1412,14 +1444,16 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
1412
1444
data [29 ] = axienet_ior (lp , XAE_FMI_OFFSET );
1413
1445
data [30 ] = axienet_ior (lp , XAE_AF0_OFFSET );
1414
1446
data [31 ] = axienet_ior (lp , XAE_AF1_OFFSET );
1415
- data [32 ] = axienet_dma_in32 (lp , XAXIDMA_TX_CR_OFFSET );
1416
- data [33 ] = axienet_dma_in32 (lp , XAXIDMA_TX_SR_OFFSET );
1417
- data [34 ] = axienet_dma_in32 (lp , XAXIDMA_TX_CDESC_OFFSET );
1418
- data [35 ] = axienet_dma_in32 (lp , XAXIDMA_TX_TDESC_OFFSET );
1419
- data [36 ] = axienet_dma_in32 (lp , XAXIDMA_RX_CR_OFFSET );
1420
- data [37 ] = axienet_dma_in32 (lp , XAXIDMA_RX_SR_OFFSET );
1421
- data [38 ] = axienet_dma_in32 (lp , XAXIDMA_RX_CDESC_OFFSET );
1422
- data [39 ] = axienet_dma_in32 (lp , XAXIDMA_RX_TDESC_OFFSET );
1447
+ if (!lp -> use_dmaengine ) {
1448
+ data [32 ] = axienet_dma_in32 (lp , XAXIDMA_TX_CR_OFFSET );
1449
+ data [33 ] = axienet_dma_in32 (lp , XAXIDMA_TX_SR_OFFSET );
1450
+ data [34 ] = axienet_dma_in32 (lp , XAXIDMA_TX_CDESC_OFFSET );
1451
+ data [35 ] = axienet_dma_in32 (lp , XAXIDMA_TX_TDESC_OFFSET );
1452
+ data [36 ] = axienet_dma_in32 (lp , XAXIDMA_RX_CR_OFFSET );
1453
+ data [37 ] = axienet_dma_in32 (lp , XAXIDMA_RX_SR_OFFSET );
1454
+ data [38 ] = axienet_dma_in32 (lp , XAXIDMA_RX_CDESC_OFFSET );
1455
+ data [39 ] = axienet_dma_in32 (lp , XAXIDMA_RX_TDESC_OFFSET );
1456
+ }
1423
1457
}
1424
1458
1425
1459
static void
@@ -1880,9 +1914,6 @@ static int axienet_probe(struct platform_device *pdev)
1880
1914
u64_stats_init (& lp -> rx_stat_sync );
1881
1915
u64_stats_init (& lp -> tx_stat_sync );
1882
1916
1883
- netif_napi_add (ndev , & lp -> napi_rx , axienet_rx_poll );
1884
- netif_napi_add (ndev , & lp -> napi_tx , axienet_tx_poll );
1885
-
1886
1917
lp -> axi_clk = devm_clk_get_optional (& pdev -> dev , "s_axi_lite_clk" );
1887
1918
if (!lp -> axi_clk ) {
1888
1919
/* For backward compatibility, if named AXI clock is not present,
@@ -2008,80 +2039,85 @@ static int axienet_probe(struct platform_device *pdev)
2008
2039
goto cleanup_clk ;
2009
2040
}
2010
2041
2011
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2012
- np = of_parse_phandle (pdev -> dev .of_node , "axistream-connected" , 0 );
2013
- if (np ) {
2014
- struct resource dmares ;
2042
+ if (!of_find_property (pdev -> dev .of_node , "dmas" , NULL )) {
2043
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2044
+ np = of_parse_phandle (pdev -> dev .of_node , "axistream-connected" , 0 );
2015
2045
2016
- ret = of_address_to_resource (np , 0 , & dmares );
2017
- if (ret ) {
2018
- dev_err (& pdev -> dev ,
2019
- "unable to get DMA resource\n" );
2046
+ if (np ) {
2047
+ struct resource dmares ;
2048
+
2049
+ ret = of_address_to_resource (np , 0 , & dmares );
2050
+ if (ret ) {
2051
+ dev_err (& pdev -> dev ,
2052
+ "unable to get DMA resource\n" );
2053
+ of_node_put (np );
2054
+ goto cleanup_clk ;
2055
+ }
2056
+ lp -> dma_regs = devm_ioremap_resource (& pdev -> dev ,
2057
+ & dmares );
2058
+ lp -> rx_irq = irq_of_parse_and_map (np , 1 );
2059
+ lp -> tx_irq = irq_of_parse_and_map (np , 0 );
2020
2060
of_node_put (np );
2061
+ lp -> eth_irq = platform_get_irq_optional (pdev , 0 );
2062
+ } else {
2063
+ /* Check for these resources directly on the Ethernet node. */
2064
+ lp -> dma_regs = devm_platform_get_and_ioremap_resource (pdev , 1 , NULL );
2065
+ lp -> rx_irq = platform_get_irq (pdev , 1 );
2066
+ lp -> tx_irq = platform_get_irq (pdev , 0 );
2067
+ lp -> eth_irq = platform_get_irq_optional (pdev , 2 );
2068
+ }
2069
+ if (IS_ERR (lp -> dma_regs )) {
2070
+ dev_err (& pdev -> dev , "could not map DMA regs\n" );
2071
+ ret = PTR_ERR (lp -> dma_regs );
2072
+ goto cleanup_clk ;
2073
+ }
2074
+ if (lp -> rx_irq <= 0 || lp -> tx_irq <= 0 ) {
2075
+ dev_err (& pdev -> dev , "could not determine irqs\n" );
2076
+ ret = - ENOMEM ;
2021
2077
goto cleanup_clk ;
2022
2078
}
2023
- lp -> dma_regs = devm_ioremap_resource (& pdev -> dev ,
2024
- & dmares );
2025
- lp -> rx_irq = irq_of_parse_and_map (np , 1 );
2026
- lp -> tx_irq = irq_of_parse_and_map (np , 0 );
2027
- of_node_put (np );
2028
- lp -> eth_irq = platform_get_irq_optional (pdev , 0 );
2029
- } else {
2030
- /* Check for these resources directly on the Ethernet node. */
2031
- lp -> dma_regs = devm_platform_get_and_ioremap_resource (pdev , 1 , NULL );
2032
- lp -> rx_irq = platform_get_irq (pdev , 1 );
2033
- lp -> tx_irq = platform_get_irq (pdev , 0 );
2034
- lp -> eth_irq = platform_get_irq_optional (pdev , 2 );
2035
- }
2036
- if (IS_ERR (lp -> dma_regs )) {
2037
- dev_err (& pdev -> dev , "could not map DMA regs\n" );
2038
- ret = PTR_ERR (lp -> dma_regs );
2039
- goto cleanup_clk ;
2040
- }
2041
- if ((lp -> rx_irq <= 0 ) || (lp -> tx_irq <= 0 )) {
2042
- dev_err (& pdev -> dev , "could not determine irqs\n" );
2043
- ret = - ENOMEM ;
2044
- goto cleanup_clk ;
2045
- }
2046
2079
2047
- /* Reset core now that clocks are enabled, prior to accessing MDIO */
2048
- ret = __axienet_device_reset (lp );
2049
- if (ret )
2050
- goto cleanup_clk ;
2080
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
2081
+ ret = __axienet_device_reset (lp );
2082
+ if (ret )
2083
+ goto cleanup_clk ;
2084
+
2085
+ /* Autodetect the need for 64-bit DMA pointers.
2086
+ * When the IP is configured for a bus width bigger than 32 bits,
2087
+ * writing the MSB registers is mandatory, even if they are all 0.
2088
+ * We can detect this case by writing all 1's to one such register
2089
+ * and see if that sticks: when the IP is configured for 32 bits
2090
+ * only, those registers are RES0.
2091
+ * Those MSB registers were introduced in IP v7.1, which we check first.
2092
+ */
2093
+ if ((axienet_ior (lp , XAE_ID_OFFSET ) >> 24 ) >= 0x9 ) {
2094
+ void __iomem * desc = lp -> dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4 ;
2051
2095
2052
- /* Autodetect the need for 64-bit DMA pointers.
2053
- * When the IP is configured for a bus width bigger than 32 bits,
2054
- * writing the MSB registers is mandatory, even if they are all 0.
2055
- * We can detect this case by writing all 1's to one such register
2056
- * and see if that sticks: when the IP is configured for 32 bits
2057
- * only, those registers are RES0.
2058
- * Those MSB registers were introduced in IP v7.1, which we check first.
2059
- */
2060
- if ((axienet_ior (lp , XAE_ID_OFFSET ) >> 24 ) >= 0x9 ) {
2061
- void __iomem * desc = lp -> dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4 ;
2062
-
2063
- iowrite32 (0x0 , desc );
2064
- if (ioread32 (desc ) == 0 ) { /* sanity check */
2065
- iowrite32 (0xffffffff , desc );
2066
- if (ioread32 (desc ) > 0 ) {
2067
- lp -> features |= XAE_FEATURE_DMA_64BIT ;
2068
- addr_width = 64 ;
2069
- dev_info (& pdev -> dev ,
2070
- "autodetected 64-bit DMA range\n" );
2071
- }
2072
2096
iowrite32 (0x0 , desc );
2097
+ if (ioread32 (desc ) == 0 ) { /* sanity check */
2098
+ iowrite32 (0xffffffff , desc );
2099
+ if (ioread32 (desc ) > 0 ) {
2100
+ lp -> features |= XAE_FEATURE_DMA_64BIT ;
2101
+ addr_width = 64 ;
2102
+ dev_info (& pdev -> dev ,
2103
+ "autodetected 64-bit DMA range\n" );
2104
+ }
2105
+ iowrite32 (0x0 , desc );
2106
+ }
2107
+ }
2108
+ if (!IS_ENABLED (CONFIG_64BIT ) && lp -> features & XAE_FEATURE_DMA_64BIT ) {
2109
+ dev_err (& pdev -> dev , "64-bit addressable DMA is not compatible with 32-bit archecture\n" );
2110
+ ret = - EINVAL ;
2111
+ goto cleanup_clk ;
2073
2112
}
2074
- }
2075
- if (!IS_ENABLED (CONFIG_64BIT ) && lp -> features & XAE_FEATURE_DMA_64BIT ) {
2076
- dev_err (& pdev -> dev , "64-bit addressable DMA is not compatible with 32-bit archecture\n" );
2077
- ret = - EINVAL ;
2078
- goto cleanup_clk ;
2079
- }
2080
2113
2081
- ret = dma_set_mask_and_coherent (& pdev -> dev , DMA_BIT_MASK (addr_width ));
2082
- if (ret ) {
2083
- dev_err (& pdev -> dev , "No suitable DMA available\n" );
2084
- goto cleanup_clk ;
2114
+ ret = dma_set_mask_and_coherent (& pdev -> dev , DMA_BIT_MASK (addr_width ));
2115
+ if (ret ) {
2116
+ dev_err (& pdev -> dev , "No suitable DMA available\n" );
2117
+ goto cleanup_clk ;
2118
+ }
2119
+ netif_napi_add (ndev , & lp -> napi_rx , axienet_rx_poll );
2120
+ netif_napi_add (ndev , & lp -> napi_tx , axienet_tx_poll );
2085
2121
}
2086
2122
2087
2123
/* Check for Ethernet core IRQ (optional) */
@@ -2099,8 +2135,8 @@ static int axienet_probe(struct platform_device *pdev)
2099
2135
}
2100
2136
2101
2137
lp -> coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD ;
2102
- lp -> coalesce_usec_rx = XAXIDMA_DFT_RX_USEC ;
2103
2138
lp -> coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD ;
2139
+ lp -> coalesce_usec_rx = XAXIDMA_DFT_RX_USEC ;
2104
2140
lp -> coalesce_usec_tx = XAXIDMA_DFT_TX_USEC ;
2105
2141
2106
2142
ret = axienet_mdio_setup (lp );
0 commit comments