Skip to content

Commit 6b1b40f

Browse files
Sarath Babu Naidu Gaddamkuba-moo
authored andcommitted
net: axienet: Preparatory changes for dmaengine support
The axiethernet driver has inbuilt dma programming. In order to add dmaengine support and make it's integration seamless the current axidma inbuilt programming code is put under use_dmaengine check. It also performs minor code reordering to minimize conditional use_dmaengine checks and there is no functional change. It uses "dmas" property to identify whether it should use a dmaengine framework or inbuilt axidma programming. Signed-off-by: Sarath Babu Naidu Gaddam <sarath.babu.naidu.gaddam@amd.com> Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com> Link: https://lore.kernel.org/r/1700074613-1977070-3-git-send-email-radhey.shyam.pandey@amd.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 5e63c5e commit 6b1b40f

File tree

2 files changed

+162
-124
lines changed

2 files changed

+162
-124
lines changed

drivers/net/ethernet/xilinx/xilinx_axienet.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -435,6 +435,7 @@ struct axidma_bd {
435435
* @coalesce_usec_rx: IRQ coalesce delay for RX
436436
* @coalesce_count_tx: Store the irq coalesce on TX side.
437437
* @coalesce_usec_tx: IRQ coalesce delay for TX
438+
* @use_dmaengine: flag to check dmaengine framework usage.
438439
*/
439440
struct axienet_local {
440441
struct net_device *ndev;
@@ -499,6 +500,7 @@ struct axienet_local {
499500
u32 coalesce_usec_rx;
500501
u32 coalesce_count_tx;
501502
u32 coalesce_usec_tx;
503+
u8 use_dmaengine;
502504
};
503505

504506
/**

drivers/net/ethernet/xilinx/xilinx_axienet_main.c

Lines changed: 160 additions & 124 deletions
Original file line numberDiff line numberDiff line change
@@ -589,10 +589,6 @@ static int axienet_device_reset(struct net_device *ndev)
589589
struct axienet_local *lp = netdev_priv(ndev);
590590
int ret;
591591

592-
ret = __axienet_device_reset(lp);
593-
if (ret)
594-
return ret;
595-
596592
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
597593
lp->options |= XAE_OPTION_VLAN;
598594
lp->options &= (~XAE_OPTION_JUMBO);
@@ -606,11 +602,17 @@ static int axienet_device_reset(struct net_device *ndev)
606602
lp->options |= XAE_OPTION_JUMBO;
607603
}
608604

609-
ret = axienet_dma_bd_init(ndev);
610-
if (ret) {
611-
netdev_err(ndev, "%s: descriptor allocation failed\n",
612-
__func__);
613-
return ret;
605+
if (!lp->use_dmaengine) {
606+
ret = __axienet_device_reset(lp);
607+
if (ret)
608+
return ret;
609+
610+
ret = axienet_dma_bd_init(ndev);
611+
if (ret) {
612+
netdev_err(ndev, "%s: descriptor allocation failed\n",
613+
__func__);
614+
return ret;
615+
}
614616
}
615617

616618
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -1125,41 +1127,21 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
11251127
static void axienet_dma_err_handler(struct work_struct *work);
11261128

11271129
/**
1128-
* axienet_open - Driver open routine.
1129-
* @ndev: Pointer to net_device structure
1130+
* axienet_init_legacy_dma - init the dma legacy code.
1131+
* @ndev: Pointer to net_device structure
11301132
*
11311133
* Return: 0, on success.
1132-
* non-zero error value on failure
1134+
* non-zero error value on failure
1135+
*
1136+
* This is the dma initialization code. It also allocates interrupt
1137+
* service routines, enables the interrupt lines and ISR handling.
11331138
*
1134-
* This is the driver open routine. It calls phylink_start to start the
1135-
* PHY device.
1136-
* It also allocates interrupt service routines, enables the interrupt lines
1137-
* and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1138-
* descriptors are initialized.
11391139
*/
1140-
static int axienet_open(struct net_device *ndev)
1140+
static int axienet_init_legacy_dma(struct net_device *ndev)
11411141
{
11421142
int ret;
11431143
struct axienet_local *lp = netdev_priv(ndev);
11441144

1145-
dev_dbg(&ndev->dev, "axienet_open()\n");
1146-
1147-
/* When we do an Axi Ethernet reset, it resets the complete core
1148-
* including the MDIO. MDIO must be disabled before resetting.
1149-
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
1150-
*/
1151-
axienet_lock_mii(lp);
1152-
ret = axienet_device_reset(ndev);
1153-
axienet_unlock_mii(lp);
1154-
1155-
ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1156-
if (ret) {
1157-
dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1158-
return ret;
1159-
}
1160-
1161-
phylink_start(lp->phylink);
1162-
11631145
/* Enable worker thread for Axi DMA error handling */
11641146
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
11651147

@@ -1193,13 +1175,61 @@ static int axienet_open(struct net_device *ndev)
11931175
err_tx_irq:
11941176
napi_disable(&lp->napi_tx);
11951177
napi_disable(&lp->napi_rx);
1196-
phylink_stop(lp->phylink);
1197-
phylink_disconnect_phy(lp->phylink);
11981178
cancel_work_sync(&lp->dma_err_task);
11991179
dev_err(lp->dev, "request_irq() failed\n");
12001180
return ret;
12011181
}
12021182

1183+
/**
1184+
* axienet_open - Driver open routine.
1185+
* @ndev: Pointer to net_device structure
1186+
*
1187+
* Return: 0, on success.
1188+
* non-zero error value on failure
1189+
*
1190+
* This is the driver open routine. It calls phylink_start to start the
1191+
* PHY device.
1192+
* It also allocates interrupt service routines, enables the interrupt lines
1193+
* and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1194+
* descriptors are initialized.
1195+
*/
1196+
static int axienet_open(struct net_device *ndev)
1197+
{
1198+
int ret;
1199+
struct axienet_local *lp = netdev_priv(ndev);
1200+
1201+
dev_dbg(&ndev->dev, "%s\n", __func__);
1202+
1203+
/* When we do an Axi Ethernet reset, it resets the complete core
1204+
* including the MDIO. MDIO must be disabled before resetting.
1205+
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
1206+
*/
1207+
axienet_lock_mii(lp);
1208+
ret = axienet_device_reset(ndev);
1209+
axienet_unlock_mii(lp);
1210+
1211+
ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1212+
if (ret) {
1213+
dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1214+
return ret;
1215+
}
1216+
1217+
phylink_start(lp->phylink);
1218+
1219+
if (!lp->use_dmaengine) {
1220+
ret = axienet_init_legacy_dma(ndev);
1221+
if (ret)
1222+
goto err_phy;
1223+
}
1224+
1225+
return 0;
1226+
1227+
err_phy:
1228+
phylink_stop(lp->phylink);
1229+
phylink_disconnect_phy(lp->phylink);
1230+
return ret;
1231+
}
1232+
12031233
/**
12041234
* axienet_stop - Driver stop routine.
12051235
* @ndev: Pointer to net_device structure
@@ -1216,27 +1246,29 @@ static int axienet_stop(struct net_device *ndev)
12161246

12171247
dev_dbg(&ndev->dev, "axienet_close()\n");
12181248

1219-
napi_disable(&lp->napi_tx);
1220-
napi_disable(&lp->napi_rx);
1249+
if (!lp->use_dmaengine) {
1250+
napi_disable(&lp->napi_tx);
1251+
napi_disable(&lp->napi_rx);
1252+
}
12211253

12221254
phylink_stop(lp->phylink);
12231255
phylink_disconnect_phy(lp->phylink);
12241256

12251257
axienet_setoptions(ndev, lp->options &
12261258
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
12271259

1228-
axienet_dma_stop(lp);
1260+
if (!lp->use_dmaengine) {
1261+
axienet_dma_stop(lp);
1262+
cancel_work_sync(&lp->dma_err_task);
1263+
free_irq(lp->tx_irq, ndev);
1264+
free_irq(lp->rx_irq, ndev);
1265+
axienet_dma_bd_release(ndev);
1266+
}
12291267

12301268
axienet_iow(lp, XAE_IE_OFFSET, 0);
12311269

1232-
cancel_work_sync(&lp->dma_err_task);
1233-
12341270
if (lp->eth_irq > 0)
12351271
free_irq(lp->eth_irq, ndev);
1236-
free_irq(lp->tx_irq, ndev);
1237-
free_irq(lp->rx_irq, ndev);
1238-
1239-
axienet_dma_bd_release(ndev);
12401272
return 0;
12411273
}
12421274

@@ -1412,14 +1444,16 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
14121444
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
14131445
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
14141446
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1415-
data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1416-
data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1417-
data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1418-
data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1419-
data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1420-
data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1421-
data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1422-
data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1447+
if (!lp->use_dmaengine) {
1448+
data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1449+
data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1450+
data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1451+
data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1452+
data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1453+
data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1454+
data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1455+
data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1456+
}
14231457
}
14241458

14251459
static void
@@ -1880,9 +1914,6 @@ static int axienet_probe(struct platform_device *pdev)
18801914
u64_stats_init(&lp->rx_stat_sync);
18811915
u64_stats_init(&lp->tx_stat_sync);
18821916

1883-
netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
1884-
netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
1885-
18861917
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
18871918
if (!lp->axi_clk) {
18881919
/* For backward compatibility, if named AXI clock is not present,
@@ -2008,80 +2039,85 @@ static int axienet_probe(struct platform_device *pdev)
20082039
goto cleanup_clk;
20092040
}
20102041

2011-
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2012-
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2013-
if (np) {
2014-
struct resource dmares;
2042+
if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
2043+
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2044+
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
20152045

2016-
ret = of_address_to_resource(np, 0, &dmares);
2017-
if (ret) {
2018-
dev_err(&pdev->dev,
2019-
"unable to get DMA resource\n");
2046+
if (np) {
2047+
struct resource dmares;
2048+
2049+
ret = of_address_to_resource(np, 0, &dmares);
2050+
if (ret) {
2051+
dev_err(&pdev->dev,
2052+
"unable to get DMA resource\n");
2053+
of_node_put(np);
2054+
goto cleanup_clk;
2055+
}
2056+
lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2057+
&dmares);
2058+
lp->rx_irq = irq_of_parse_and_map(np, 1);
2059+
lp->tx_irq = irq_of_parse_and_map(np, 0);
20202060
of_node_put(np);
2061+
lp->eth_irq = platform_get_irq_optional(pdev, 0);
2062+
} else {
2063+
/* Check for these resources directly on the Ethernet node. */
2064+
lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2065+
lp->rx_irq = platform_get_irq(pdev, 1);
2066+
lp->tx_irq = platform_get_irq(pdev, 0);
2067+
lp->eth_irq = platform_get_irq_optional(pdev, 2);
2068+
}
2069+
if (IS_ERR(lp->dma_regs)) {
2070+
dev_err(&pdev->dev, "could not map DMA regs\n");
2071+
ret = PTR_ERR(lp->dma_regs);
2072+
goto cleanup_clk;
2073+
}
2074+
if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2075+
dev_err(&pdev->dev, "could not determine irqs\n");
2076+
ret = -ENOMEM;
20212077
goto cleanup_clk;
20222078
}
2023-
lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2024-
&dmares);
2025-
lp->rx_irq = irq_of_parse_and_map(np, 1);
2026-
lp->tx_irq = irq_of_parse_and_map(np, 0);
2027-
of_node_put(np);
2028-
lp->eth_irq = platform_get_irq_optional(pdev, 0);
2029-
} else {
2030-
/* Check for these resources directly on the Ethernet node. */
2031-
lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2032-
lp->rx_irq = platform_get_irq(pdev, 1);
2033-
lp->tx_irq = platform_get_irq(pdev, 0);
2034-
lp->eth_irq = platform_get_irq_optional(pdev, 2);
2035-
}
2036-
if (IS_ERR(lp->dma_regs)) {
2037-
dev_err(&pdev->dev, "could not map DMA regs\n");
2038-
ret = PTR_ERR(lp->dma_regs);
2039-
goto cleanup_clk;
2040-
}
2041-
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
2042-
dev_err(&pdev->dev, "could not determine irqs\n");
2043-
ret = -ENOMEM;
2044-
goto cleanup_clk;
2045-
}
20462079

2047-
/* Reset core now that clocks are enabled, prior to accessing MDIO */
2048-
ret = __axienet_device_reset(lp);
2049-
if (ret)
2050-
goto cleanup_clk;
2080+
/* Reset core now that clocks are enabled, prior to accessing MDIO */
2081+
ret = __axienet_device_reset(lp);
2082+
if (ret)
2083+
goto cleanup_clk;
2084+
2085+
/* Autodetect the need for 64-bit DMA pointers.
2086+
* When the IP is configured for a bus width bigger than 32 bits,
2087+
* writing the MSB registers is mandatory, even if they are all 0.
2088+
* We can detect this case by writing all 1's to one such register
2089+
* and see if that sticks: when the IP is configured for 32 bits
2090+
* only, those registers are RES0.
2091+
* Those MSB registers were introduced in IP v7.1, which we check first.
2092+
*/
2093+
if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2094+
void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
20512095

2052-
/* Autodetect the need for 64-bit DMA pointers.
2053-
* When the IP is configured for a bus width bigger than 32 bits,
2054-
* writing the MSB registers is mandatory, even if they are all 0.
2055-
* We can detect this case by writing all 1's to one such register
2056-
* and see if that sticks: when the IP is configured for 32 bits
2057-
* only, those registers are RES0.
2058-
* Those MSB registers were introduced in IP v7.1, which we check first.
2059-
*/
2060-
if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2061-
void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2062-
2063-
iowrite32(0x0, desc);
2064-
if (ioread32(desc) == 0) { /* sanity check */
2065-
iowrite32(0xffffffff, desc);
2066-
if (ioread32(desc) > 0) {
2067-
lp->features |= XAE_FEATURE_DMA_64BIT;
2068-
addr_width = 64;
2069-
dev_info(&pdev->dev,
2070-
"autodetected 64-bit DMA range\n");
2071-
}
20722096
iowrite32(0x0, desc);
2097+
if (ioread32(desc) == 0) { /* sanity check */
2098+
iowrite32(0xffffffff, desc);
2099+
if (ioread32(desc) > 0) {
2100+
lp->features |= XAE_FEATURE_DMA_64BIT;
2101+
addr_width = 64;
2102+
dev_info(&pdev->dev,
2103+
"autodetected 64-bit DMA range\n");
2104+
}
2105+
iowrite32(0x0, desc);
2106+
}
2107+
}
2108+
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2109+
dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2110+
ret = -EINVAL;
2111+
goto cleanup_clk;
20732112
}
2074-
}
2075-
if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2076-
dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2077-
ret = -EINVAL;
2078-
goto cleanup_clk;
2079-
}
20802113

2081-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2082-
if (ret) {
2083-
dev_err(&pdev->dev, "No suitable DMA available\n");
2084-
goto cleanup_clk;
2114+
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2115+
if (ret) {
2116+
dev_err(&pdev->dev, "No suitable DMA available\n");
2117+
goto cleanup_clk;
2118+
}
2119+
netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2120+
netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
20852121
}
20862122

20872123
/* Check for Ethernet core IRQ (optional) */
@@ -2099,8 +2135,8 @@ static int axienet_probe(struct platform_device *pdev)
20992135
}
21002136

21012137
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2102-
lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
21032138
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2139+
lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
21042140
lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
21052141

21062142
ret = axienet_mdio_setup(lp);

0 commit comments

Comments
 (0)