From 3f71a51749bf5b1b0b61493c952f9f43823a66ee Mon Sep 17 00:00:00 2001 From: Vijayakannan Ayyathurai Date: Thu, 10 Jul 2025 21:55:00 +0800 Subject: [PATCH 1/5] drivers: ethernet: Add platform driver for MMIO mapping reuse The Ethernet device model consists of multiple subsystem components, such as MDIO, PHY, MAC and PTP_CLOCK. These components are mapped into a single PCIe BAR location with same base address. This platform driver retrieves the MMIO mapping details and provides a framework to share it with all the child subsystem components. This approach avoid the duplicate remapping, ensures efficient re-use of MMIO mappings across related devices. Example device tree structure for first ethernet instance: parent0: parent0 { compatible = "intel,eth-plat"; interrupt-parent = <&intc>; vendor-id = <0x8086>; device-id = <0xXXXX>; igc0: igc0 { compatible = "intel,igc-mac"; /* * MAC specific properties. */ status = "okay"; }; mdio0: mdio0 { compatible = "intel,igc-mdio"; #address-cells = <1>; #size-cells = <0>; ethphy0: ethernet-phy@0 { compatible = "ethernet-phy"; /* * PHY specific properties. */ reg = <0x0>; }; }; }; This framework is modular and re-usable for other PCIe based Ethernet devices. It can also be extended to support additional platform specific information shared across child nodes. Signed-off-by: Vijayakannan Ayyathurai --- drivers/ethernet/CMakeLists.txt | 1 + drivers/ethernet/Kconfig | 1 + drivers/ethernet/intel/CMakeLists.txt | 4 + drivers/ethernet/intel/Kconfig | 10 ++ drivers/ethernet/intel/eth_intel_plat.c | 95 +++++++++++++++++++ dts/bindings/ethernet/intel,eth-plat.yaml | 8 ++ .../zephyr/drivers/ethernet/eth_intel_plat.h | 28 ++++++ 7 files changed, 147 insertions(+) create mode 100644 drivers/ethernet/intel/CMakeLists.txt create mode 100644 drivers/ethernet/intel/Kconfig create mode 100644 drivers/ethernet/intel/eth_intel_plat.c create mode 100644 dts/bindings/ethernet/intel,eth-plat.yaml create mode 100644 include/zephyr/drivers/ethernet/eth_intel_plat.h diff --git a/drivers/ethernet/CMakeLists.txt b/drivers/ethernet/CMakeLists.txt index 9fdfb6c566f7..10e95d7d16b3 100644 --- a/drivers/ethernet/CMakeLists.txt +++ b/drivers/ethernet/CMakeLists.txt @@ -65,3 +65,4 @@ add_subdirectory(phy) add_subdirectory(eth_nxp_enet_qos) add_subdirectory(nxp_imx_netc) add_subdirectory(dwc_xgmac) +add_subdirectory(intel) diff --git a/drivers/ethernet/Kconfig b/drivers/ethernet/Kconfig index b771cd106f8a..ea5633dcce6f 100644 --- a/drivers/ethernet/Kconfig +++ b/drivers/ethernet/Kconfig @@ -86,6 +86,7 @@ source "drivers/ethernet/phy/Kconfig" source "drivers/ethernet/nxp_imx_netc/Kconfig" source "drivers/ethernet/Kconfig.renesas_ra" +source "drivers/ethernet/intel/Kconfig" endif # "Ethernet Drivers" diff --git a/drivers/ethernet/intel/CMakeLists.txt b/drivers/ethernet/intel/CMakeLists.txt new file mode 100644 index 000000000000..519bf5534780 --- /dev/null +++ b/drivers/ethernet/intel/CMakeLists.txt @@ -0,0 +1,4 @@ +# Copyright (c) 2025 Intel Corporation. +# SPDX-License-Identifier: Apache-2.0 + +zephyr_library_sources_ifdef(CONFIG_ETH_INTEL_PLAT eth_intel_plat.c) diff --git a/drivers/ethernet/intel/Kconfig b/drivers/ethernet/intel/Kconfig new file mode 100644 index 000000000000..921cb9a68c50 --- /dev/null +++ b/drivers/ethernet/intel/Kconfig @@ -0,0 +1,10 @@ +# Copyright (c) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +config ETH_INTEL_PLAT + bool "Intel Ethernet parent Platform device driver" + depends on DT_HAS_INTEL_ETH_PLAT_ENABLED + help + Enable Platform driver to retrieve the MMIO mapping details and + share them with all the child devices such as MDIO, PHY, MAC + and PTP_CLOCK. diff --git a/drivers/ethernet/intel/eth_intel_plat.c b/drivers/ethernet/intel/eth_intel_plat.c new file mode 100644 index 000000000000..68afc43781a0 --- /dev/null +++ b/drivers/ethernet/intel/eth_intel_plat.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2025 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +#include +LOG_MODULE_REGISTER(intel_eth_plat, CONFIG_ETHERNET_LOG_LEVEL); + +#define DT_DRV_COMPAT intel_eth_plat + +/* Device id supported in igc */ +enum i226_sku { + INTEL_IGC_I226_LMVP = 0x5503, + INTEL_IGC_I226_LM = 0x125B, + INTEL_IGC_I226_V = 0x125C, + INTEL_IGC_I226_IT = 0x125D, + INTEL_IGC_I226_BLANK_NVM = 0x125F, +}; + +struct intel_eth_plat_cfg { + struct pcie_dev *pcie; +}; + +struct intel_eth_plat_data { + DEVICE_MMIO_RAM; + mm_reg_t base; +}; + +uint32_t eth_intel_get_pcie_bdf(const struct device *dev) +{ + const struct intel_eth_plat_cfg *cfg = dev->config; + + return cfg->pcie->bdf; +} + +static int eth_intel_validate_sku(const struct device *dev) +{ + const struct intel_eth_plat_cfg *cfg = dev->config; + pcie_id_t pcie_id = cfg->pcie->id; + + switch (PCIE_ID_TO_DEV(pcie_id)) { + case INTEL_IGC_I226_LMVP: + case INTEL_IGC_I226_LM: + case INTEL_IGC_I226_V: + case INTEL_IGC_I226_IT: + return 0; + case INTEL_IGC_I226_BLANK_NVM: + default: + break; + } + + LOG_ERR("SKU validation failed & pcie_id is %x", pcie_id); + + return -EIO; +} + +static int intel_eth_plat_init(const struct device *dev) +{ + const struct intel_eth_plat_cfg *cfg = dev->config; + struct pcie_bar mbar; + int ret; + + ret = eth_intel_validate_sku(dev); + if (ret < 0) { + return ret; + } + + if (cfg->pcie->bdf == PCIE_BDF_NONE || !pcie_probe_mbar(cfg->pcie->bdf, 0, &mbar)) { + LOG_ERR("Cannot get mbar"); + return -ENOENT; + } + + pcie_set_cmd(cfg->pcie->bdf, PCIE_CONF_CMDSTAT_MEM | PCIE_CONF_CMDSTAT_MASTER, true); + + device_map(DEVICE_MMIO_RAM_PTR(dev), mbar.phys_addr, mbar.size, K_MEM_CACHE_NONE); + + return 0; +} + +#define INTEL_ETH_PLAT_INIT(n) \ + DEVICE_PCIE_INST_DECLARE(n); \ + static struct intel_eth_plat_data plat_data_##n; \ + static const struct intel_eth_plat_cfg plat_cfg_##n = { \ + DEVICE_PCIE_INST_INIT(n, pcie), \ + }; \ + DEVICE_DT_INST_DEFINE(n, intel_eth_plat_init, NULL, &plat_data_##n, &plat_cfg_##n, \ + POST_KERNEL, CONFIG_PCIE_INIT_PRIORITY, NULL); + +DT_INST_FOREACH_STATUS_OKAY(INTEL_ETH_PLAT_INIT) diff --git a/dts/bindings/ethernet/intel,eth-plat.yaml b/dts/bindings/ethernet/intel,eth-plat.yaml new file mode 100644 index 000000000000..9bdcbf917853 --- /dev/null +++ b/dts/bindings/ethernet/intel,eth-plat.yaml @@ -0,0 +1,8 @@ +# Copyright (c) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +description: Intel Ethernet parent platform device. + +compatible: "intel,eth-plat" + +include: [base.yaml, pcie-device.yaml] diff --git a/include/zephyr/drivers/ethernet/eth_intel_plat.h b/include/zephyr/drivers/ethernet/eth_intel_plat.h new file mode 100644 index 000000000000..23045ed047ae --- /dev/null +++ b/include/zephyr/drivers/ethernet/eth_intel_plat.h @@ -0,0 +1,28 @@ +/* + * Ethernet Platform Utilities for Intel Devices + * + * This module provides utility functions to interact with the PCIe features + * of Intel Ethernet devices, facilitating the retrieval of device-specific + * PCIe configuration details such as BDF (Bus/Device/Function) and device ID. + * + * Copyright (c) 2025 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_DRIVERS_ETH_INTEL_PLAT_H__ +#define ZEPHYR_INCLUDE_DRIVERS_ETH_INTEL_PLAT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Retrieve the PCIe Bus/Device/Function of the device. + * + * @param dev Pointer to the device structure. + * @return PCIe BDF address as a uint32_t. + */ +extern uint32_t eth_intel_get_pcie_bdf(const struct device *dev); + +#endif /* ZEPHYR_INCLUDE_DRIVERS_ETH_INTEL_PLAT_H__ */ From 8b6fdef7f2147f0490945f20c42e6e0289da54bf Mon Sep 17 00:00:00 2001 From: Vijayakannan Ayyathurai Date: Thu, 10 Jul 2025 21:55:31 +0800 Subject: [PATCH 2/5] drivers: mdio: Add Intel i226 MDIO driver support Intel i226 MAC supports MDIO C22 and MDIO C45. Standard PHY registers are accessible through MDIO C22, whereas PMAPMD and PCS are accssible through MDIO C45. Signed-off-by: Vijayakannan Ayyathurai --- drivers/mdio/CMakeLists.txt | 1 + drivers/mdio/Kconfig | 1 + drivers/mdio/Kconfig.intel_igc | 20 +++ drivers/mdio/mdio_intel_igc.c | 177 ++++++++++++++++++++++++++ dts/bindings/mdio/intel,igc-mdio.yaml | 8 ++ 5 files changed, 207 insertions(+) create mode 100644 drivers/mdio/Kconfig.intel_igc create mode 100644 drivers/mdio/mdio_intel_igc.c create mode 100644 dts/bindings/mdio/intel,igc-mdio.yaml diff --git a/drivers/mdio/CMakeLists.txt b/drivers/mdio/CMakeLists.txt index fc3ce620c202..ee772e79f807 100644 --- a/drivers/mdio/CMakeLists.txt +++ b/drivers/mdio/CMakeLists.txt @@ -20,3 +20,4 @@ zephyr_library_sources_ifdef(CONFIG_MDIO_RENESAS_RA mdio_renesas_ra.c) zephyr_library_sources_ifdef(CONFIG_MDIO_LAN865X mdio_lan865x.c) zephyr_library_sources_ifdef(CONFIG_MDIO_SENSRY_SY1XX mdio_sy1xx.c) zephyr_library_sources_ifdef(CONFIG_MDIO_XILINX_AXI_ENET mdio_xilinx_axienet.c) +zephyr_library_sources_ifdef(CONFIG_MDIO_INTEL_IGC mdio_intel_igc.c) diff --git a/drivers/mdio/Kconfig b/drivers/mdio/Kconfig index 658bbe78b7b9..8b842ac6a8ed 100644 --- a/drivers/mdio/Kconfig +++ b/drivers/mdio/Kconfig @@ -41,6 +41,7 @@ source "drivers/mdio/Kconfig.renesas_ra" source "drivers/mdio/Kconfig.lan865x" source "drivers/mdio/Kconfig.sy1xx" source "drivers/mdio/Kconfig.xilinx_axienet" +source "drivers/mdio/Kconfig.intel_igc" config MDIO_INIT_PRIORITY int "Init priority" diff --git a/drivers/mdio/Kconfig.intel_igc b/drivers/mdio/Kconfig.intel_igc new file mode 100644 index 000000000000..6d99970cf6fd --- /dev/null +++ b/drivers/mdio/Kconfig.intel_igc @@ -0,0 +1,20 @@ +# Copyright 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +menuconfig MDIO_INTEL_IGC + bool "Intel IGC MDIO driver" + default y + depends on DT_HAS_INTEL_IGC_MDIO_ENABLED + help + Enable Intel IGC MDIO driver. + +if MDIO_INTEL_IGC + +config MDIO_INTEL_BUSY_CHECK_TIMEOUT + int "MDIO_INTEL_IGC busy wait timeout" + default 10000 + help + This timeout in microseconds, specifies the duration to wait for the + completion of an MDIO read or write cycle. + +endif # MDIO_INTEL_IGC diff --git a/drivers/mdio/mdio_intel_igc.c b/drivers/mdio/mdio_intel_igc.c new file mode 100644 index 000000000000..74e63c76d065 --- /dev/null +++ b/drivers/mdio/mdio_intel_igc.c @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2025 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define DT_DRV_COMPAT intel_igc_mdio + +#include +#include +#include +#include + +#include +LOG_MODULE_REGISTER(intel_igc_mdio, CONFIG_MDIO_LOG_LEVEL); + +#define INTEL_IGC_MDIC_OFFSET 0x00020 +#define INTEL_IGC_MDIC_DATA_MASK GENMASK(15, 0) +#define INTEL_IGC_MDIC_REG_MASK GENMASK(20, 16) +#define INTEL_IGC_MDIC_PHY_MASK GENMASK(25, 21) +#define INTEL_IGC_MDIC_OP_MASK GENMASK(27, 26) +#define INTEL_IGC_MDIC_READY BIT(28) +#define INTEL_IGC_MMDCTRL 0xD +#define INTEL_IGC_MMDCTRL_ACTYPE_MASK GENMASK(15, 14) +#define INTEL_IGC_MMDCTRL_DEVAD_MASK GENMASK(4, 0) +#define INTEL_IGC_MMDDATA 0xE +#define INTEL_IGC_DEFAULT_DEVNUM 0 + +struct intel_igc_mdio_cfg { + const struct device *const platform; +}; + +struct intel_igc_mdio_data { + struct k_mutex mutex; +}; + +static int intel_igc_mdio(const struct device *dev, uint32_t command) +{ + const struct intel_igc_mdio_cfg *cfg = dev->config; + struct intel_igc_mdio_data *data = dev->data; + mm_reg_t mdic; + int ret; + + mdic = DEVICE_MMIO_GET(cfg->platform) + INTEL_IGC_MDIC_OFFSET; + + k_mutex_lock(&data->mutex, K_FOREVER); + sys_write32(command, mdic); + /* Wait for the read or write transaction to complete */ + if (!WAIT_FOR((sys_read32(mdic) & INTEL_IGC_MDIC_READY), + CONFIG_MDIO_INTEL_BUSY_CHECK_TIMEOUT, k_usleep(1))) { + LOG_ERR("MDIC operation timed out"); + k_mutex_unlock(&data->mutex); + return -ETIMEDOUT; + } + ret = sys_read32(mdic); + k_mutex_unlock(&data->mutex); + + return ret; +} + +static int intel_igc_mdio_read(const struct device *dev, uint8_t prtad, uint8_t regad, + uint16_t *user_data) +{ + int ret = 0; + uint32_t command = FIELD_PREP(INTEL_IGC_MDIC_PHY_MASK, prtad) | + FIELD_PREP(INTEL_IGC_MDIC_REG_MASK, regad) | + FIELD_PREP(INTEL_IGC_MDIC_OP_MASK, MDIO_OP_C22_READ); + + ret = intel_igc_mdio(dev, command); + if (ret < 0) { + return ret; + } + + *user_data = FIELD_GET(INTEL_IGC_MDIC_DATA_MASK, ret); + + return 0; +} + +static int intel_igc_mdio_write(const struct device *dev, uint8_t prtad, uint8_t regad, + uint16_t user_data) +{ + int ret; + + uint32_t command = FIELD_PREP(INTEL_IGC_MDIC_PHY_MASK, prtad) | + FIELD_PREP(INTEL_IGC_MDIC_REG_MASK, regad) | + FIELD_PREP(INTEL_IGC_MDIC_OP_MASK, MDIO_OP_C22_WRITE) | + FIELD_PREP(INTEL_IGC_MDIC_DATA_MASK, user_data); + + ret = intel_igc_mdio(dev, command); + + return ret < 0 ? ret : 0; +} + +static int intel_igc_mdio_pre_handle_c45(const struct device *dev, uint8_t prtad, uint8_t devnum, + uint16_t regad) +{ + int ret; + + /* Set device number using MMDCTRL */ + ret = intel_igc_mdio_write(dev, prtad, INTEL_IGC_MMDCTRL, + (uint16_t)(FIELD_PREP(INTEL_IGC_MMDCTRL_DEVAD_MASK, devnum))); + if (ret < 0) { + return ret; + } + + /* Set register address using MMDDATA */ + ret = intel_igc_mdio_write(dev, prtad, INTEL_IGC_MMDDATA, regad); + if (ret < 0) { + return ret; + } + + /* Set device number and access type as data using MMDCTRL */ + return intel_igc_mdio_write(dev, prtad, INTEL_IGC_MMDCTRL, + (uint16_t)(FIELD_PREP(INTEL_IGC_MMDCTRL_ACTYPE_MASK, 1) | + FIELD_PREP(INTEL_IGC_MMDCTRL_DEVAD_MASK, devnum))); +} + +static int intel_igc_mdio_post_handle_c45(const struct device *dev, uint8_t prtad) +{ + /* Restore default device number using MMDCTRL */ + return intel_igc_mdio_write(dev, prtad, INTEL_IGC_MMDCTRL, INTEL_IGC_DEFAULT_DEVNUM); +} + +static int intel_igc_mdio_read_c45(const struct device *dev, uint8_t prtad, uint8_t devnum, + uint16_t regad, uint16_t *user_data) +{ + int ret = intel_igc_mdio_pre_handle_c45(dev, prtad, devnum, regad); + + if (ret < 0) { + return ret; + } + + /* Read user data using MMDDATA */ + ret = intel_igc_mdio_read(dev, prtad, INTEL_IGC_MMDDATA, user_data); + if (ret < 0) { + return ret; + } + + return intel_igc_mdio_post_handle_c45(dev, prtad); +} + +static int intel_igc_mdio_write_c45(const struct device *dev, uint8_t prtad, uint8_t devnum, + uint16_t regad, uint16_t user_data) +{ + int ret = intel_igc_mdio_pre_handle_c45(dev, prtad, devnum, regad); + + if (ret < 0) { + return ret; + } + + /* Write the user_data using MMDDATA */ + ret = intel_igc_mdio_write(dev, prtad, INTEL_IGC_MMDDATA, user_data); + if (ret < 0) { + return ret; + } + + return intel_igc_mdio_post_handle_c45(dev, prtad); +} + +static DEVICE_API(mdio, mdio_api) = { + .read = intel_igc_mdio_read, + .write = intel_igc_mdio_write, + .read_c45 = intel_igc_mdio_read_c45, + .write_c45 = intel_igc_mdio_write_c45, +}; + +#define INTEL_IGC_MDIO_INIT(n) \ + static struct intel_igc_mdio_data mdio_data_##n = { \ + .mutex = Z_MUTEX_INITIALIZER(mdio_data_##n.mutex), \ + }; \ + static struct intel_igc_mdio_cfg mdio_cfg_##n = { \ + .platform = DEVICE_DT_GET(DT_INST_PARENT(n)), \ + }; \ + DEVICE_DT_INST_DEFINE(n, NULL, NULL, &mdio_data_##n, &mdio_cfg_##n, POST_KERNEL, \ + CONFIG_MDIO_INIT_PRIORITY, &mdio_api); + +DT_INST_FOREACH_STATUS_OKAY(INTEL_IGC_MDIO_INIT) diff --git a/dts/bindings/mdio/intel,igc-mdio.yaml b/dts/bindings/mdio/intel,igc-mdio.yaml new file mode 100644 index 000000000000..f8a64e8666f3 --- /dev/null +++ b/dts/bindings/mdio/intel,igc-mdio.yaml @@ -0,0 +1,8 @@ +# Copyright 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +description: Intel IGC MDIO device. + +compatible: "intel,igc-mdio" + +include: [mdio-controller.yaml, base.yaml] From e4e9f4a1e485058f9ff023f16811b2797c6d55b3 Mon Sep 17 00:00:00 2001 From: Vijayakannan Ayyathurai Date: Thu, 10 Jul 2025 21:55:48 +0800 Subject: [PATCH 3/5] drivers: ethernet: intel: Add i226 Ethernet MAC device driver The Intel i226 Ethernet Controller is a PCIe Gen 2 one-lane modular endpoint device that integrates a GbE Media Access Control (MAC) and Physical Layer (PHY) port. This driver provides support for MAC and DMA-specific initialization and runtime TX/RX operations. Key features: - MSI-X interrupts for TX/RX DMA channels. - Multiple TX/RX DMA channel support with exclusive bottom-half. - Implements a circular descriptor ring architechture with producer-consumer semantics for high performance pkt processing. - Full duplex support for 10/100/1000 Mbps. - Half duplex support for 10/100 Mbps. - Auto-negotiation for 10/100/1000 Mbps. - MTU customization for flexible packet sizes. - MAC address filtering based on: - Random MAC generation. - Local-mac-address mentioned in device tree. - EEPROM pre-programmed mac address. - Setting mac address via net shell. - Support for multiple Ethernet interface instances. Signed-off-by: Vijayakannan Ayyathurai Signed-off-by: Ling Pei Lee --- drivers/ethernet/intel/CMakeLists.txt | 1 + drivers/ethernet/intel/Kconfig | 2 + drivers/ethernet/intel/Kconfig.intel_igc | 29 + drivers/ethernet/intel/eth_intel_igc.c | 1333 +++++++++++++++++++ drivers/ethernet/intel/eth_intel_igc_priv.h | 500 +++++++ dts/bindings/ethernet/intel,igc-mac.yaml | 26 + 6 files changed, 1891 insertions(+) create mode 100644 drivers/ethernet/intel/Kconfig.intel_igc create mode 100644 drivers/ethernet/intel/eth_intel_igc.c create mode 100644 drivers/ethernet/intel/eth_intel_igc_priv.h create mode 100644 dts/bindings/ethernet/intel,igc-mac.yaml diff --git a/drivers/ethernet/intel/CMakeLists.txt b/drivers/ethernet/intel/CMakeLists.txt index 519bf5534780..ff35a0600610 100644 --- a/drivers/ethernet/intel/CMakeLists.txt +++ b/drivers/ethernet/intel/CMakeLists.txt @@ -1,4 +1,5 @@ # Copyright (c) 2025 Intel Corporation. # SPDX-License-Identifier: Apache-2.0 +zephyr_library_sources_ifdef(CONFIG_ETH_INTEL_IGC eth_intel_igc.c) zephyr_library_sources_ifdef(CONFIG_ETH_INTEL_PLAT eth_intel_plat.c) diff --git a/drivers/ethernet/intel/Kconfig b/drivers/ethernet/intel/Kconfig index 921cb9a68c50..3052e5b39b36 100644 --- a/drivers/ethernet/intel/Kconfig +++ b/drivers/ethernet/intel/Kconfig @@ -8,3 +8,5 @@ config ETH_INTEL_PLAT Enable Platform driver to retrieve the MMIO mapping details and share them with all the child devices such as MDIO, PHY, MAC and PTP_CLOCK. + +source "drivers/ethernet/intel/Kconfig.intel_igc" diff --git a/drivers/ethernet/intel/Kconfig.intel_igc b/drivers/ethernet/intel/Kconfig.intel_igc new file mode 100644 index 000000000000..a8d37b5adcfd --- /dev/null +++ b/drivers/ethernet/intel/Kconfig.intel_igc @@ -0,0 +1,29 @@ +# Copyright (c) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +menuconfig ETH_INTEL_IGC + bool "Intel IGC MAC driver" + default y + depends on DT_HAS_INTEL_IGC_MAC_ENABLED + select MDIO + select PCIE_MSI_MULTI_VECTOR + select PCIE_MSI_X + help + Ethernet device driver for Intel i226 MAC. + +if ETH_INTEL_IGC + +config ETH_INTEL_IGC_INT_PRIORITY + int "Interrupt priority" + default 6 + help + MSI-X vectors priority for IGC interrupts. + +config ETH_INTEL_IGC_NET_MTU + int "MTU size" + default 1500 + range 64 1522 + help + Maximum Transmission Unit (MTU) size configuration. + +endif # ETH_INTEL_IGC diff --git a/drivers/ethernet/intel/eth_intel_igc.c b/drivers/ethernet/intel/eth_intel_igc.c new file mode 100644 index 000000000000..6a2ee666e15a --- /dev/null +++ b/drivers/ethernet/intel/eth_intel_igc.c @@ -0,0 +1,1333 @@ +/* + * Copyright (c) 2025 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +LOG_MODULE_REGISTER(eth_intel_igc, CONFIG_ETHERNET_LOG_LEVEL); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../eth.h" +#include "eth_intel_igc_priv.h" + +#define DT_DRV_COMPAT intel_igc_mac + +/** + * @brief Amend the register value as per the mask and set/clear the bit. + */ +static void igc_modify(mm_reg_t base, uint32_t offset, uint32_t config, bool set) +{ + uint32_t val = sys_read32(base + offset); + + if (set) { + val |= config; + } else { + val &= ~config; + } + + sys_write32(val, base + offset); +} + +/** + * @brief Significant register changes required another register operation + * to take effect. This status register read mimics that logic. + */ +static void igc_reg_refresh(mm_reg_t base) +{ + sys_read32(base + INTEL_IGC_STATUS); +} + +/** + * @brief Get the index of a specific transmit descriptor within the ring. + * This getter also works for multiple queues. + */ +static uint16_t get_tx_desc_idx(const struct device *dev, union dma_tx_desc *current_desc, + uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + union dma_tx_desc *tx_desc_base; + + tx_desc_base = (union dma_tx_desc *)&data->tx.desc[queue * cfg->num_tx_desc]; + + return (current_desc - tx_desc_base); +} + +/** + * @brief Get the index of a specific receive descriptor within the ring. + * This getter also works for multiple queues. + */ +static uint16_t get_rx_desc_idx(const struct device *dev, union dma_rx_desc *current_desc, + uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + union dma_rx_desc *rx_desc_base; + + rx_desc_base = (union dma_rx_desc *)&data->rx.desc[queue * cfg->num_rx_desc]; + + return (current_desc - rx_desc_base); +} + +/** + * @brief Check if the next descriptor is unavailable for DMA operation. + */ +static bool is_desc_unavailable(uint32_t next_desc_idx, uint32_t current_rd_ptr_idx) +{ + return (next_desc_idx == current_rd_ptr_idx); +} + +/** + * @brief Check if the DMA operation is complete by using the writeback.dd bit. + */ +static bool is_dma_done(bool dd) +{ + return dd; +} + +/** + * @brief This function retrieves the next available transmit descriptor from the ring + * and ensures that the descriptor is available for DMA operation. + */ +static union dma_tx_desc *eth_intel_igc_get_tx_desc(const struct device *dev, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t current_wr_idx, next_wr_idx; + union dma_tx_desc *desc; + + k_sem_take(&data->tx.sem[queue], K_FOREVER); + + current_wr_idx = data->tx.ring_wr_ptr[queue]; + next_wr_idx = (current_wr_idx + 1) % cfg->num_tx_desc; + + if (is_desc_unavailable(next_wr_idx, data->tx.ring_rd_ptr[queue])) { + k_sem_give(&data->tx.sem[queue]); + return NULL; + } + + desc = data->tx.desc + (queue * cfg->num_tx_desc + current_wr_idx); + data->tx.ring_wr_ptr[queue] = next_wr_idx; + + k_sem_give(&data->tx.sem[queue]); + + return desc; +} + +/** + * @brief This function checks if the DMA operation is complete by verifying the + * writeback.dd bit. If the DMA operation is complete, it updates the read pointer + * and clears the descriptor. + */ +static union dma_tx_desc *eth_intel_igc_release_tx_desc(const struct device *dev, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t current_rd_idx, next_rd_idx; + union dma_tx_desc *desc; + + k_sem_take(&data->tx.sem[queue], K_FOREVER); + + current_rd_idx = data->tx.ring_rd_ptr[queue]; + desc = data->tx.desc + (queue * cfg->num_tx_desc + current_rd_idx); + + if (is_dma_done(desc->writeback.dd)) { + next_rd_idx = (current_rd_idx + 1) % cfg->num_tx_desc; + data->tx.ring_rd_ptr[queue] = next_rd_idx; + memset((void *)desc, 0, sizeof(union dma_tx_desc)); + } else { + desc = NULL; + } + + k_sem_give(&data->tx.sem[queue]); + + return desc; +} + +/** + * @brief This function returns the total number of consumed transmit descriptors from + * overall transmit descriptor ring of the mentioned queue. + */ +static uint32_t eth_intel_igc_completed_txdesc_num(const struct device *dev, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t rd_idx, count = 0; + union dma_tx_desc *desc; + + rd_idx = data->tx.ring_rd_ptr[queue]; + while (rd_idx != data->tx.ring_wr_ptr[queue]) { + desc = (data->tx.desc + (queue * cfg->num_tx_desc + rd_idx)); + if (!is_dma_done(desc->writeback.dd)) { + break; + } + rd_idx = (rd_idx + 1) % cfg->num_tx_desc; + count++; + } + + return count; +} + +/** + * @brief This function retrieves the next available receive descriptor from the ring + * and ensures that the descriptor is available for DMA operation. + */ +static union dma_rx_desc *eth_intel_igc_get_rx_desc(const struct device *dev, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t current_wr_idx, next_wr_idx; + union dma_rx_desc *desc; + + k_sem_take(&data->rx.sem[queue], K_FOREVER); + + current_wr_idx = data->rx.ring_wr_ptr[queue]; + next_wr_idx = (current_wr_idx + 1) % cfg->num_rx_desc; + + if (is_desc_unavailable(next_wr_idx, data->rx.ring_rd_ptr[queue])) { + k_sem_give(&data->rx.sem[queue]); + return NULL; + } + + desc = data->rx.desc + (queue * cfg->num_rx_desc + current_wr_idx); + data->rx.ring_wr_ptr[queue] = next_wr_idx; + + k_sem_give(&data->rx.sem[queue]); + + return desc; +} + +/** + * @brief This function checks if the DMA operation is complete by verifying the + * writeback.dd bit. If the DMA operation is complete, it updates the read pointer + * and clears the descriptor. + */ +static union dma_rx_desc *eth_intel_igc_release_rx_desc(const struct device *dev, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t current_rd_idx, next_rd_idx; + union dma_rx_desc *desc = NULL; + + k_sem_take(&data->rx.sem[queue], K_FOREVER); + + current_rd_idx = data->rx.ring_rd_ptr[queue]; + desc = data->rx.desc + (queue * cfg->num_rx_desc + current_rd_idx); + + if (is_dma_done(desc->writeback.dd)) { + next_rd_idx = (current_rd_idx + 1) % cfg->num_rx_desc; + data->rx.ring_rd_ptr[queue] = next_rd_idx; + } else { + desc = NULL; + } + + k_sem_give(&data->rx.sem[queue]); + + return desc; +} + +/** + * @brief This function return total number of consumed receive descriptors from overall + * receive descriptor ring of the mentioned queue. + */ +static uint32_t eth_intel_igc_completed_rxdesc_num(const struct device *dev, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + union dma_rx_desc *desc = NULL; + uint32_t idx, count = 0; + + idx = data->rx.ring_rd_ptr[queue]; + while (idx != data->rx.ring_wr_ptr[queue]) { + desc = (data->rx.desc + (queue * cfg->num_rx_desc + idx)); + if (!is_dma_done(desc->writeback.dd)) { + break; + } + idx = (idx + 1) % cfg->num_rx_desc; + count++; + } + + return count; +} + +/** + * @brief Interrupt Service Routine for handling queue interrupts. + */ +static void eth_intel_igc_queue_isr(void *arg) +{ + const struct eth_intel_igc_intr_info *info = (struct eth_intel_igc_intr_info *)arg; + const struct device *dev = info->mac; + struct eth_intel_igc_mac_data *data = dev->data; + uint8_t msix = info->id; + + igc_modify(data->base, INTEL_IGC_EICS, BIT(msix), false); + k_work_submit(&data->tx_work[msix]); + k_work_schedule(&data->rx_work[msix], K_MSEC(0)); + + sys_read32(data->base + INTEL_IGC_ICR); + igc_modify(data->base, INTEL_IGC_EIMS, BIT(msix), true); +} + +static int eth_intel_igc_connect_queue_msix_vector(pcie_bdf_t bdf, const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + + for (uint8_t msix = 0; msix < cfg->num_queues; msix++) { + struct eth_intel_igc_intr_info *info = &data->intr_info[msix]; + + info->id = msix; + info->mac = dev; + if (!pcie_msi_vector_connect(bdf, &data->msi_vec[msix], + (void *)eth_intel_igc_queue_isr, info, 0)) { + LOG_ERR("Failed to connect queue_%d interrupt handler", msix); + return -EIO; + } + } + + return 0; +} + +static int eth_intel_igc_pcie_msix_setup(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + pcie_bdf_t bdf; + int ret; + + bdf = eth_intel_get_pcie_bdf(cfg->platform); + if (bdf == PCIE_BDF_NONE) { + LOG_ERR("Failed to get PCIe BDF"); + return -EINVAL; + } + + ret = pcie_msi_vectors_allocate(bdf, CONFIG_ETH_INTEL_IGC_INT_PRIORITY, data->msi_vec, + cfg->num_msix); + if (ret < cfg->num_msix) { + LOG_ERR("Failed to allocate MSI-X vectors"); + return -EIO; + } + + ret = eth_intel_igc_connect_queue_msix_vector(bdf, dev); + if (ret < 0) { + return ret; + } + + if (!pcie_msi_enable(bdf, data->msi_vec, cfg->num_msix, 0)) { + LOG_ERR("Failed to enable MSI-X vectors"); + return -EIO; + } + + return 0; +} + +/** + * @brief This function maps the IGC device interrupts order to MSI-X vectors. + */ +static void eth_intel_igc_map_intr_to_vector(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t config, reg_idx; + + /* Set MSI-X capability */ + config = INTEL_IGC_GPIE_NSICR | INTEL_IGC_GPIE_MSIX_MODE | INTEL_IGC_GPIE_EIAME | + INTEL_IGC_GPIE_PBA; + igc_modify(data->base, INTEL_IGC_GPIE, config, true); + + /* Configure IVAR RX and TX for each queue interrupt */ + for (uint8_t msix = 0; msix < cfg->num_queues; msix++) { + reg_idx = msix >> 1; + config = sys_read32(data->base + (INTEL_IGC_IVAR_BASE_ADDR + (reg_idx << 2))); + + if (msix % 2) { + config &= INTEL_IGC_IVAR_MSI_CLEAR_TX1_TX3; + config |= (msix | INTEL_IGC_IVAR_INT_VALID_BIT) << 24; + config &= INTEL_IGC_IVAR_MSI_CLEAR_RX1_RX3; + config |= (msix | INTEL_IGC_IVAR_INT_VALID_BIT) << 16; + } else { + config &= INTEL_IGC_IVAR_MSI_CLEAR_TX0_TX2; + config |= (msix | INTEL_IGC_IVAR_INT_VALID_BIT) << 8; + config &= INTEL_IGC_IVAR_MSI_CLEAR_RX0_RX2; + config |= (msix | INTEL_IGC_IVAR_INT_VALID_BIT); + } + + sys_write32(config, data->base + (INTEL_IGC_IVAR_BASE_ADDR + (reg_idx << 2))); + } +} + +/** + * @brief Helper function to write MAC address to RAL and RAH registers. + */ +static void eth_intel_igc_set_mac_addr(mm_reg_t base, int index, const uint8_t *mac_addr, + uint32_t rah_config) +{ + uint32_t mac_addr_hi = (mac_addr[5] << 8) | mac_addr[4] | rah_config; + uint32_t mac_addr_lo = + (mac_addr[3] << 24) | (mac_addr[2] << 16) | (mac_addr[1] << 8) | mac_addr[0]; + + sys_write32(mac_addr_hi, base + INTEL_IGC_RAH(index)); + igc_reg_refresh(base); + sys_write32(mac_addr_lo, base + INTEL_IGC_RAL(index)); + igc_reg_refresh(base); +} + +/** + * @brief This function configures the MAC address filtering for the IGC, allowing it + * to filter packets based on the MAC address and filter mode. + */ +static void eth_intel_igc_set_mac_filter(const struct device *dev, + enum eth_igc_mac_filter_mode mode, const uint8_t *mac_addr, + int index, uint8_t queue) +{ + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t config = 0; + + /* Queue number Settings */ + config &= ~RAH_QSEL_MASK; + config |= ((queue << RAH_QSEL_SHIFT) | RAH_QSEL_ENABLE); + + if (mode == SRC_ADDR) { + config |= (config & ~RAH_ASEL_MASK) | RAH_ASEL_SRC_ADDR; + } + + config |= INTEL_IGC_RAH_AV; + eth_intel_igc_set_mac_addr(data->base, index, mac_addr, config); +} + +static void eth_intel_igc_phylink_cb(const struct device *phy, struct phy_link_state *state, + void *user_data) +{ + struct eth_intel_igc_mac_data *data = (struct eth_intel_igc_mac_data *)user_data; + + if (state->is_up) { + net_eth_carrier_on(data->iface); + } else { + net_eth_carrier_off(data->iface); + } +} + +static void eth_intel_igc_intr_enable(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t config = 0; + + /* Clear pending interrupt */ + sys_read32(data->base + INTEL_IGC_ICR); + + /* Prepare bit mask of Queue interrupt */ + for (uint8_t msix = 0; msix < cfg->num_queues; msix++) { + config |= BIT(msix); + } + + sys_write32(config, data->base + INTEL_IGC_EIAC); + igc_modify(data->base, INTEL_IGC_EIAM, config, true); + sys_write32(config, data->base + INTEL_IGC_EIMS); + igc_reg_refresh(data->base); +} + +static void eth_intel_igc_iface_init(struct net_if *iface) +{ + const struct device *dev = net_if_get_device(iface); + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + + /* Set interface */ + data->iface = iface; + + /* Initialize ethernet L2 */ + ethernet_init(iface); + + /* Set MAC address */ + if (net_if_set_link_addr(data->iface, data->mac_addr, sizeof(data->mac_addr), + NET_LINK_ETHERNET) < 0) { + LOG_ERR("Failed to set mac address"); + return; + } + + eth_intel_igc_set_mac_filter(dev, DEST_ADDR, data->mac_addr, 0, 0); + + if (device_is_ready(cfg->phy)) { + phy_link_callback_set(cfg->phy, eth_intel_igc_phylink_cb, (void *)data); + } else { + LOG_ERR("PHY device is not ready"); + return; + } + + eth_intel_igc_intr_enable(dev); +} + +static enum ethernet_hw_caps eth_intel_igc_get_caps(const struct device *dev) +{ + ARG_UNUSED(dev); + + return ETHERNET_LINK_10BASE | ETHERNET_LINK_100BASE | ETHERNET_LINK_1000BASE; +} + +static int eth_intel_igc_set_config(const struct device *dev, enum ethernet_config_type type, + const struct ethernet_config *eth_cfg) +{ + struct eth_intel_igc_mac_data *data = dev->data; + + if (type == ETHERNET_CONFIG_TYPE_MAC_ADDRESS) { + memcpy(data->mac_addr, eth_cfg->mac_address.addr, sizeof(eth_cfg->mac_address)); + net_if_set_link_addr(data->iface, data->mac_addr, sizeof(data->mac_addr), + NET_LINK_ETHERNET); + eth_intel_igc_set_mac_filter(dev, DEST_ADDR, data->mac_addr, 0, 0); + return 0; + } + + return -ENOTSUP; +} + +static const struct device *eth_intel_igc_get_phy(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + + return cfg->phy; +} + +#if defined(CONFIG_NET_STATISTICS_ETHERNET) +static struct net_stats_eth *eth_intel_igc_get_stats(const struct device *dev) +{ + struct eth_intel_igc_mac_data *data = dev->data; + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct net_stats_eth *stats = &data->stats; + + stats->bytes.sent += sys_read32(data->base + INTEL_IGC_TOTL); + stats->bytes.received += sys_read32(data->base + INTEL_IGC_TORL); + stats->pkts.tx += sys_read32(data->base + INTEL_IGC_TPT); + stats->pkts.rx += sys_read32(data->base + INTEL_IGC_TPR); + stats->broadcast.tx += sys_read32(data->base + INTEL_IGC_BPTC); + stats->broadcast.rx += sys_read32(data->base + INTEL_IGC_BPRC); + stats->multicast.tx += sys_read32(data->base + INTEL_IGC_MPTC); + stats->multicast.rx += sys_read32(data->base + INTEL_IGC_MPRC); + stats->errors.rx += sys_read32(data->base + INTEL_IGC_RERC); + stats->error_details.rx_length_errors += sys_read32(data->base + INTEL_IGC_RLEC); + stats->error_details.rx_crc_errors += sys_read32(data->base + INTEL_IGC_CRCERRS); + stats->error_details.rx_frame_errors += sys_read32(data->base + INTEL_IGC_RJC); + stats->error_details.rx_no_buffer_count += sys_read32(data->base + INTEL_IGC_RNBC); + stats->error_details.rx_long_length_errors += sys_read32(data->base + INTEL_IGC_ROC); + stats->error_details.rx_short_length_errors += sys_read32(data->base + INTEL_IGC_RUC); + stats->error_details.rx_align_errors += sys_read32(data->base + INTEL_IGC_ALGNERRC); + stats->error_details.rx_buf_alloc_failed += sys_read32(data->base + INTEL_IGC_MPC); + stats->error_details.tx_aborted_errors += sys_read32(data->base + INTEL_IGC_DC); + stats->flow_control.rx_flow_control_xon += sys_read32(data->base + INTEL_IGC_XONRXC); + stats->flow_control.rx_flow_control_xoff += sys_read32(data->base + INTEL_IGC_XOFFRXC); + stats->flow_control.tx_flow_control_xon += sys_read32(data->base + INTEL_IGC_XONTXC); + stats->flow_control.tx_flow_control_xoff += sys_read32(data->base + INTEL_IGC_XOFFTXC); + stats->collisions += sys_read32(data->base + INTEL_IGC_COLC); + for (uint8_t queue = 0; queue < cfg->num_queues; queue++) { + stats->tx_dropped += sys_read32(data->base + INTEL_IGC_TQDPC(queue)); + } + + return &data->stats; +} +#endif /* CONFIG_NET_STATISTICS_ETHERNET */ + +/** + * @brief This function releases completed transmit descriptors, cleans up the associated + * net_buf and net_pkt, and frees any allocated resources. + */ +static void eth_intel_igc_tx_clean(struct eth_intel_igc_mac_data *data, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = data->mac->config; + uint32_t idx = 0, clean_count; + union dma_tx_desc *desc; + + clean_count = eth_intel_igc_completed_txdesc_num(data->mac, queue); + for (uint8_t count = 0; count < clean_count; count++) { + desc = eth_intel_igc_release_tx_desc(data->mac, queue); + if (desc == NULL) { + LOG_ERR("No more transmit descriptor available to release"); + continue; + } + + idx = get_tx_desc_idx(data->mac, desc, queue); + net_pkt_frag_unref(*(data->tx.frag + queue * cfg->num_tx_desc + idx)); + net_pkt_unref(*(data->tx.pkt + queue * cfg->num_tx_desc + idx)); + } +} + +/** + * @brief This function retrieves the next available transmit descriptor from the ring, + * sets up the descriptor with the fragment data, and updates the write pointer. + */ +static int eth_intel_igc_tx_frag(const struct device *dev, struct net_pkt *pkt, + struct net_buf *frag, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint16_t total_len = net_pkt_get_len(pkt); + union dma_tx_desc *desc; + uint32_t idx = 0; + + desc = eth_intel_igc_get_tx_desc(dev, queue); + if (desc == NULL) { + LOG_ERR("No more transmit descriptors available"); + return -ENOMEM; + } + + idx = get_tx_desc_idx(dev, desc, queue); + /* Store the pkt and header frag, then clear it during transmit clean. */ + *(data->tx.frag + queue * cfg->num_tx_desc + idx) = frag->frags ? 0 : pkt->frags; + *(data->tx.pkt + queue * cfg->num_tx_desc + idx) = frag->frags ? 0 : pkt; + + desc->read.data_buf_addr = (uint64_t)sys_cpu_to_le64((uint64_t)frag->data); + /* Copy the total payload length */ + desc->read.payloadlen = total_len; + /* Copy the particular frag's buffer length */ + desc->read.data_len = frag->len; + desc->read.desc_type = INTEL_IGC_TX_DESC_TYPE; + desc->read.ifcs = true; + desc->read.dext = true; + + /* DMA engine processes the entire packet as a single unit */ + if (frag->frags == NULL) { + desc->read.eop = true; + desc->read.rs = true; + + sys_write32(data->tx.ring_wr_ptr[queue], data->base + INTEL_IGC_TDT(queue)); + } + + return 0; +} + +/** + * @brief This function handles the network packet transmission by processing each + * fragmented frames and sending it through appropriate queue. + */ +static int eth_intel_igc_tx_data(const struct device *dev, struct net_pkt *pkt) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint8_t queue = 0; + int ret = 0; + + if (!net_if_is_up(data->iface)) { + LOG_ERR("Ethernet interface is down"); + return -ENETDOWN; + } + + /* Hold the packet until transmit clean done. */ + net_pkt_ref(pkt); + + queue = net_tx_priority2tc(net_pkt_priority(pkt)); + if (queue >= cfg->num_queues) { + queue = cfg->num_queues - 1; + } + + for (struct net_buf *frag = pkt->frags; frag; frag = frag->frags) { + /* Hold the Header fragment until transmit clean done */ + if (frag == pkt->frags) { + net_pkt_frag_ref(frag); + } + + ret = eth_intel_igc_tx_frag(dev, pkt, frag, queue); + if (ret < 0) { + LOG_ERR("Failed to transmit in queue number: %d", queue); + } + } + + return ret; +} + +/** + * @brief Identify the address family of received packets as per header type. + */ +static sa_family_t eth_intel_igc_get_sa_family(uint8_t *rx_buf) +{ + struct net_eth_hdr *eth_hdr = (struct net_eth_hdr *)rx_buf; + + switch (ntohs(eth_hdr->type)) { + case NET_ETH_PTYPE_IP: + return AF_INET; + case NET_ETH_PTYPE_IPV6: + return AF_INET6; + default: + break; + } + + return AF_UNSPEC; +} + +/** + * @brief This function updates the tail pointer of the RX descriptor ring, retrieves the + * next available RX descriptor, and prepare it for receiving incoming packets by setting + * the packet buffer address. + */ +static void eth_intel_igc_rx_data_hdl(struct eth_intel_igc_mac_data *data, uint8_t queue, + uint32_t idx, union dma_rx_desc *desc) +{ + const struct eth_intel_igc_mac_cfg *cfg = data->mac->config; + + sys_write32(idx, data->base + INTEL_IGC_RDT(queue)); + desc = eth_intel_igc_get_rx_desc(data->mac, queue); + if (desc == NULL) { + LOG_ERR("No more rx descriptor available"); + return; + } + + /* Find descriptor position and prepare it for next DMA cycle */ + idx = get_rx_desc_idx(data->mac, desc, queue); + desc->read.pkt_buf_addr = (uint64_t)sys_cpu_to_le64( + (uint64_t)(data->rx.buf + (queue * cfg->num_rx_desc + idx) * ETH_MAX_FRAME_SZ)); + desc->writeback.dd = 0; +} + +static void eth_intel_igc_rx_data_hdl_err(struct eth_intel_igc_mac_data *data, uint8_t queue, + uint32_t idx, union dma_rx_desc *desc, + struct net_pkt *pkt) +{ + net_pkt_unref(pkt); + eth_intel_igc_rx_data_hdl(data, queue, idx, desc); +} + +/** + * @brief This function retrieves completed receive descriptors, allocates net_pkt + * buffers, copies the received data into the buffers, and submits the packets to + * the network stack. + */ +static void eth_intel_igc_rx_data(struct eth_intel_igc_mac_data *data, uint8_t queue) +{ + const struct eth_intel_igc_mac_cfg *cfg = data->mac->config; + uint32_t count, idx = 0, completed_count = 0; + uint8_t *rx_buf; + union dma_rx_desc *desc; + struct net_pkt *pkt; + int ret; + + completed_count = eth_intel_igc_completed_rxdesc_num(data->mac, queue); + for (count = 0; count < completed_count; count++) { + /* Retrieve the position of next descriptor to be processed */ + idx = data->rx.ring_rd_ptr[queue]; + desc = eth_intel_igc_release_rx_desc(data->mac, queue); + if (desc == NULL) { + LOG_ERR("RX descriptor is NULL"); + continue; + } + + if (!net_if_is_up(data->iface) || !desc->writeback.pkt_len) { + LOG_ERR("RX interface is down or pkt_len is %d", desc->writeback.pkt_len); + eth_intel_igc_rx_data_hdl_err(data, queue, idx, desc, NULL); + continue; + } + + /* Get the DMA buffer pointer as per index */ + rx_buf = data->rx.buf + ((queue * cfg->num_rx_desc + idx) * ETH_MAX_FRAME_SZ); + + /* Allocate packet buffer as per address family */ + pkt = net_pkt_rx_alloc_with_buffer(data->iface, desc->writeback.pkt_len, + eth_intel_igc_get_sa_family(rx_buf), 0, + K_MSEC(200)); + if (pkt == NULL) { + LOG_ERR("Failed to allocate Receive buffer"); + eth_intel_igc_rx_data_hdl_err(data, queue, idx, desc, NULL); + continue; + } + + /* Write DMA buffer to packet */ + ret = net_pkt_write(pkt, (void *)rx_buf, desc->writeback.pkt_len); + if (ret < 0) { + LOG_ERR("Failed to write Receive buffer to packet"); + eth_intel_igc_rx_data_hdl_err(data, queue, idx, desc, pkt); + continue; + } + + /* Process received packet */ + ret = net_recv_data(data->iface, pkt); + if (ret < 0) { + LOG_ERR("Failed to enqueue the Receive packet: %d", queue); + eth_intel_igc_rx_data_hdl_err(data, queue, idx, desc, pkt); + continue; + } + + eth_intel_igc_rx_data_hdl(data, queue, idx, desc); + } +} + +/** + * @brief Configure and Enable the Transmit Control Register. + */ +static void eth_intel_igc_tctl_setup(mm_reg_t tctl) +{ + uint32_t config = sys_read32(tctl); + + config &= ~INTEL_IGC_TCTL_CT; + config |= INTEL_IGC_TCTL_EN | INTEL_IGC_TCTL_PSP | INTEL_IGC_TCTL_RTLC | + INTEL_IGC_COLLISION_THRESHOLD << INTEL_IGC_TCTL_CT_SHIFT; + + sys_write32(config, tctl); +} + +/** + * @brief IGC expects the DMA descriptors to be aligned to 128-byte boundaries for efficient + * access and to avoid potential data corruption or performance issues. + */ +static void *eth_intel_igc_aligned_alloc(uint32_t size) +{ + void *desc_base = k_aligned_alloc(4096, size); + + if (desc_base == NULL) { + return NULL; + } + memset(desc_base, 0, size); + + return desc_base; +} + +/** + * @brief This function initializes the transmit DMA descriptor ring for all queues. + */ +static void eth_intel_igc_init_tx_desc_ring(const struct eth_intel_igc_mac_cfg *cfg, + struct eth_intel_igc_mac_data *data) +{ + uint64_t desc_addr; + uint32_t config; + + for (uint8_t queue = 0; queue < cfg->num_queues; queue++) { + k_sem_init(&data->tx.sem[queue], 1, 1); + + /* Disable the transmit descriptor ring */ + sys_write32(0, data->base + INTEL_IGC_TXDCTL(queue)); + igc_reg_refresh(data->base); + + /* Program the transmit descriptor ring total length */ + sys_write32(cfg->num_tx_desc * sizeof(union dma_tx_desc), + data->base + INTEL_IGC_TDLEN(queue)); + + /* Program the descriptor base address */ + desc_addr = (uint64_t)(data->tx.desc + (queue * cfg->num_tx_desc)); + sys_write32((uint32_t)(desc_addr >> 32), data->base + INTEL_IGC_TDBAH(queue)); + sys_write32((uint32_t)desc_addr, data->base + INTEL_IGC_TDBAL(queue)); + + /* Reset Head and Tail Descriptor Pointers */ + sys_write32(0, data->base + INTEL_IGC_TDH(queue)); + sys_write32(0, data->base + INTEL_IGC_TDT(queue)); + + /* Configure TX DMA interrupt trigger thresholds */ + config = INTEL_IGC_TX_PTHRESH_VAL << INTEL_IGC_TX_PTHRESH_SHIFT | + INTEL_IGC_TX_HTHRESH_VAL << INTEL_IGC_TX_HTHRESH_SHIFT | + INTEL_IGC_TX_WTHRESH_VAL << INTEL_IGC_TX_WTHRESH_SHIFT; + /* Enable the transmit descriptor ring */ + config |= INTEL_IGC_TXDCTL_QUEUE_ENABLE; + sys_write32(config, data->base + INTEL_IGC_TXDCTL(queue)); + } +} + +/** + * @brief This function initializes the transmit DMA descriptor ring. + * It sets up the descriptor base addresses, lengths, and control registers. + */ +static int eth_intel_igc_tx_dma_init(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t size; + + /* Calculate the total size of the TX descriptor buffer */ + size = cfg->num_queues * cfg->num_tx_desc * sizeof(union dma_tx_desc); + /* Allocate memory for the TX descriptor buffer */ + size = ROUND_UP(size, sizeof(union dma_tx_desc)); + data->tx.desc = (union dma_tx_desc *)eth_intel_igc_aligned_alloc(size); + if (data->tx.desc == NULL) { + LOG_ERR("Transmit descriptor buffer alloc failed"); + return -ENOBUFS; + } + + eth_intel_igc_init_tx_desc_ring(cfg, data); + + /* Configure internal transmit descriptor buffer size */ + sys_write32(INTEL_IGC_TXPBS_TXPBSIZE_DEFAULT, data->base + INTEL_IGC_TXPBS); + eth_intel_igc_tctl_setup(data->base + INTEL_IGC_TCTL); + + return 0; +} + +/** + * @brief Configure and Enable the Receive Control Register. + */ +static void eth_intel_igc_rctl_setup(mm_reg_t rctl) +{ + uint32_t config = sys_read32(rctl); + + /* Multicast / Unicast Table Offset */ + config &= ~(0x3 << INTEL_IGC_RCTL_MO_SHIFT); + /* Do not store bad packets */ + config &= ~INTEL_IGC_RCTL_SBP; + /* Turn off VLAN filters */ + config &= ~INTEL_IGC_RCTL_VFE; + config |= INTEL_IGC_RCTL_EN | INTEL_IGC_RCTL_BAM | + /* Strip the CRC */ + INTEL_IGC_RCTL_SECRC | INTEL_IGC_RCTL_SZ_2048; + + sys_write32(config, rctl); +} + +/** + * @brief This function ensures that each Receive DMA descriptor is populated with + * a pre-allocated packet buffer, enabling the device to receive and store incoming + * packets efficiently. + */ +static int eth_intel_igc_rx_desc_prepare(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + union dma_rx_desc *desc; + uint8_t *buf; + + /* Allocate memory for receive DMA buffer */ + data->rx.buf = (uint8_t *)k_calloc(cfg->num_queues * cfg->num_rx_desc, ETH_MAX_FRAME_SZ); + if (data->rx.buf == NULL) { + LOG_ERR("Receive DMA buffer alloc failed"); + return -ENOBUFS; + } + + /* Assign allocated memory to each descriptor as per it's index */ + for (uint8_t queue = 0; queue < cfg->num_queues; queue++) { + desc = &data->rx.desc[queue * cfg->num_rx_desc]; + + for (uint32_t desc_idx = 0; desc_idx < cfg->num_rx_desc; desc_idx++) { + /* Set the pkt buffer address */ + buf = data->rx.buf + + ((queue * cfg->num_rx_desc + desc_idx) * ETH_MAX_FRAME_SZ); + + desc[desc_idx].read.pkt_buf_addr = (uint64_t)sys_cpu_to_le64((uint64_t)buf); + desc[desc_idx].read.hdr_buf_addr = (uint64_t)&desc[desc_idx]; + } + + /* Update tail pointer in hardware and copy the same for driver reference */ + sys_write32(cfg->num_rx_desc - 1, data->base + INTEL_IGC_RDT(queue)); + data->rx.ring_wr_ptr[queue] = cfg->num_rx_desc - 1; + } + + return 0; +} + +/** + * @brief This function initializes the receive DMA descriptor ring for all queues. + */ +static void eth_intel_igc_init_rx_desc_ring(const struct eth_intel_igc_mac_cfg *cfg, + struct eth_intel_igc_mac_data *data) +{ + uint64_t desc_addr; + uint32_t config; + + for (uint8_t queue = 0; queue < cfg->num_queues; queue++) { + k_sem_init(&data->rx.sem[queue], 1, 1); + + /* Disable the receive descriptor ring */ + sys_write32(0, data->base + INTEL_IGC_RXDCTL(queue)); + igc_reg_refresh(data->base); + + /* Program the receive descriptor ring total length */ + sys_write32(cfg->num_rx_desc * sizeof(union dma_rx_desc), + data->base + INTEL_IGC_RDLEN(queue)); + + /* Program the descriptor base address */ + desc_addr = (uint64_t)(data->rx.desc + (queue * cfg->num_rx_desc)); + sys_write32((uint32_t)(desc_addr >> 32), data->base + INTEL_IGC_RDBAH(queue)); + sys_write32((uint32_t)desc_addr, data->base + INTEL_IGC_RDBAL(queue)); + + /* Configure the receive descriptor control */ + config = INTEL_IGC_SRRCTL_BSIZEPKT(ETH_MAX_FRAME_SZ); + config |= INTEL_IGC_SRRCTL_BSIZEHDR(INTEL_IGC_RXBUFFER_256); + config |= INTEL_IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; + config |= INTEL_IGC_SRRCTL_DROP_EN; + sys_write32(config, data->base + INTEL_IGC_SRRCTL(queue)); + + /* Reset Head and Tail Descriptor Pointers */ + sys_write32(0, data->base + INTEL_IGC_RDH(queue)); + sys_write32(0, data->base + INTEL_IGC_RDT(queue)); + + config = sys_read32(data->base + INTEL_IGC_RXDCTL(queue)); + config &= INTEL_IGC_RX_THRESH_RESET; + /* Configure RX DMA interrupt trigger thresholds */ + config |= INTEL_IGC_RX_PTHRESH_VAL << INTEL_IGC_RX_PTHRESH_SHIFT | + INTEL_IGC_RX_HTHRESH_VAL << INTEL_IGC_RX_HTHRESH_SHIFT | + INTEL_IGC_RX_WTHRESH_VAL << INTEL_IGC_RX_WTHRESH_SHIFT; + /* Enable the receive descriptor ring */ + config |= INTEL_IGC_RXDCTL_QUEUE_ENABLE; + sys_write32(config, data->base + INTEL_IGC_RXDCTL(queue)); + igc_reg_refresh(data->base); + } +} + +/** + * @brief This function initializes the receive descriptor ring. + * It sets up the descriptor base address, length, and control registers. + */ +static int eth_intel_igc_rx_dma_init(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + uint32_t size; + int ret; + + /* + * TODO: enable RSS mapping. + * TODO: enable interrupt moderation here. + * TODO: enable Checksum offload here. + * TODO: enable VLAN offload here. + */ + + /* Disable receive logic until descriptor setup */ + igc_modify(data->base, INTEL_IGC_RCTL, INTEL_IGC_RCTL_EN, false); + sys_write32(0, data->base + INTEL_IGC_RXCSUM); + + /* Calculate the total size of the RX descriptor buffer */ + size = cfg->num_queues * cfg->num_rx_desc * sizeof(union dma_rx_desc); + size = ROUND_UP(size, sizeof(union dma_rx_desc)); + + /* Allocate memory for the RX descriptor buffer */ + data->rx.desc = (union dma_rx_desc *)eth_intel_igc_aligned_alloc(size); + if (data->rx.desc == NULL) { + LOG_ERR("Receive descriptor buffer alloc failed"); + return -ENOBUFS; + } + + eth_intel_igc_init_rx_desc_ring(cfg, data); + + /* Configure internal receive descriptor buffer size */ + sys_write32(INTEL_IGC_RXPBS_RXPBSIZE_DEFAULT, data->base + INTEL_IGC_RXPBS); + + ret = eth_intel_igc_rx_desc_prepare(dev); + if (ret < 0) { + LOG_ERR("Receive descriptor prepare failed"); + return ret; + } + eth_intel_igc_rctl_setup(data->base + INTEL_IGC_RCTL); + + return ret; +} + +/** + * @brief This function validates the MAC address and returns true on success. + */ +static bool eth_intel_igc_is_valid_mac_addr(uint8_t *mac_addr) +{ + if (UNALIGNED_GET((uint32_t *)mac_addr) == INTEL_IGC_DEF_MAC_ADDR) { + return false; + } + + if (UNALIGNED_GET((uint32_t *)(mac_addr + 0)) == 0x00000000 && + UNALIGNED_GET((uint16_t *)(mac_addr + 4)) == 0x0000) { + LOG_DBG("Invalid Mac Address"); + return false; + } + + if (mac_addr[0] & 0x01) { + LOG_DBG("Multicast MAC address"); + return false; + } + + return true; +} + +/* @brief When the device is configured to use MAC address from EEPROM, i226 firmware + * will populate both RAL and RAH with the user provided MAC address. + */ +static void eth_intel_igc_get_preloaded_mac_addr(mm_reg_t base, uint8_t *mac_addr) +{ + uint32_t mac_addr_hi, mac_addr_lo; + + mac_addr_lo = sys_read32(base + INTEL_IGC_RAL(0)); + mac_addr_hi = sys_read32(base + INTEL_IGC_RAH(0)); + + mac_addr[0] = (uint8_t)(mac_addr_lo & 0xFF); + mac_addr[1] = (uint8_t)((mac_addr_lo >> 8) & 0xFF); + mac_addr[2] = (uint8_t)((mac_addr_lo >> 16) & 0xFF); + mac_addr[3] = (uint8_t)((mac_addr_lo >> 24) & 0xFF); + + mac_addr[4] = (uint8_t)(mac_addr_hi & 0xFF); + mac_addr[5] = (uint8_t)((mac_addr_hi >> 8) & 0xFF); +} + +static void eth_intel_igc_get_mac_addr(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + + if (cfg->random_mac_address) { + LOG_INF("Assign Random MAC address"); + gen_random_mac(data->mac_addr, 0, 0xA0, 0xC9); + return; + } + + if (eth_intel_igc_is_valid_mac_addr(data->mac_addr)) { + LOG_INF("Assign MAC address from Device Tree"); + return; + } + + eth_intel_igc_get_preloaded_mac_addr(data->base, data->mac_addr); + if (eth_intel_igc_is_valid_mac_addr(data->mac_addr)) { + LOG_INF("Assign MAC address from EEPROM"); + return; + } +} + +static void eth_intel_igc_rx_addrs_init(const struct device *dev) +{ + struct eth_intel_igc_mac_data *data = dev->data; + uint8_t reset_addr[NET_ETH_ADDR_LEN] = {0}; + uint16_t rar_count = 128; + + eth_intel_igc_get_mac_addr(dev); + LOG_INF("IGC MAC addr %02X:%02X:%02X:%02X:%02X:%02X", data->mac_addr[0], data->mac_addr[1], + data->mac_addr[2], data->mac_addr[3], data->mac_addr[4], data->mac_addr[5]); + + /* Program valid MAC address in index 0 */ + eth_intel_igc_set_mac_addr(data->base, 0, data->mac_addr, INTEL_IGC_RAH_AV); + + /* Program reset_addr to unused rar index */ + for (uint8_t rar = 1; rar < rar_count; rar++) { + eth_intel_igc_set_mac_addr(data->base, rar, reset_addr, INTEL_IGC_RAH_AV); + } +} + +/** + * @brief This function disables the PCIe master access to the device, ensuring that + * the device is ready to be controlled by the driver. + */ +static int eth_intel_igc_disable_pcie_master(mm_reg_t base) +{ + uint32_t timeout = INTEL_IGC_GIO_MASTER_DISABLE_TIMEOUT; + mm_reg_t igc_stat = base + INTEL_IGC_STATUS; + + igc_modify(base, INTEL_IGC_CTRL, INTEL_IGC_CTRL_GIO_MASTER_DISABLE, true); + /* Wait for the INTEL_IGC_STATUS_GIO_MASTER_ENABLE bit to clear */ + if (WAIT_FOR((sys_read32(igc_stat) & INTEL_IGC_STATUS_GIO_MASTER_ENABLE) == 0, timeout, + k_msleep(1))) { + return 0; + } + + LOG_ERR("Timeout waiting for GIO Master Request to complete"); + return -ETIMEDOUT; +} + +static void eth_intel_igc_init_speed(struct eth_intel_igc_mac_data *data) +{ + mm_reg_t base = data->base; + + igc_modify(base, INTEL_IGC_CTRL, INTEL_IGC_CTRL_FRCSPD | INTEL_IGC_CTRL_FRCDPX, false); + igc_modify(base, INTEL_IGC_CTRL, INTEL_IGC_CTRL_SLU, true); +} + +static void eth_intel_igc_get_dev_ownership(struct eth_intel_igc_mac_data *data) +{ + igc_modify(data->base, INTEL_IGC_CTRL_EXT, INTEL_IGC_CTRL_EXT_DRV_LOAD, true); +} + +static int eth_intel_igc_init_mac_hw(const struct device *dev) +{ + struct eth_intel_igc_mac_data *data = dev->data; + int ret = 0; + + ret = eth_intel_igc_disable_pcie_master(data->base); + if (ret < 0) { + return ret; + } + + sys_write32(UINT32_MAX, data->base + INTEL_IGC_IMC); + sys_write32(0, data->base + INTEL_IGC_RCTL); + sys_write32(INTEL_IGC_TCTL_PSP, data->base + INTEL_IGC_TCTL); + igc_reg_refresh(data->base); + + /* MAC Reset */ + igc_modify(data->base, INTEL_IGC_CTRL, INTEL_IGC_CTRL_DEV_RST, true); + k_msleep(INTEL_IGC_RESET_DELAY); + + /* MAC receive address Init */ + eth_intel_igc_rx_addrs_init(dev); + + eth_intel_igc_get_dev_ownership(data); + eth_intel_igc_map_intr_to_vector(dev); + eth_intel_igc_init_speed(data); + + return ret; +} + +static int eth_intel_igc_init(const struct device *dev) +{ + const struct eth_intel_igc_mac_cfg *cfg = dev->config; + struct eth_intel_igc_mac_data *data = dev->data; + int ret = 0; + + data->mac = dev; + data->base = DEVICE_MMIO_GET(cfg->platform); + if (!data->base) { + LOG_ERR("Failed to get MMIO base address"); + return -ENODEV; + } + + ret = eth_intel_igc_init_mac_hw(dev); + if (ret < 0) { + return ret; + } + + ret = eth_intel_igc_pcie_msix_setup(dev); + if (ret < 0) { + return ret; + } + + ret = eth_intel_igc_tx_dma_init(dev); + if (ret < 0) { + return ret; + } + + ret = eth_intel_igc_rx_dma_init(dev); + if (ret < 0) { + return ret; + } + + cfg->config_func(dev); + + return 0; +} + +static const struct ethernet_api eth_api = { + .iface_api.init = eth_intel_igc_iface_init, + .get_capabilities = eth_intel_igc_get_caps, + .set_config = eth_intel_igc_set_config, + .send = eth_intel_igc_tx_data, + .get_phy = eth_intel_igc_get_phy, +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + .get_stats = eth_intel_igc_get_stats, +#endif +}; + +#define NUM_QUEUES(n) DT_INST_PROP(n, num_queues) +#define NUM_TX_DESC(n) DT_INST_PROP(n, num_tx_desc) +#define NUM_RX_DESC(n) DT_INST_PROP(n, num_rx_desc) +#define NUM_MSIX(n) NUM_QUEUES(n) + ETH_IGC_NUM_MISC + +/** + * @brief Generate TX and RX interrupt handling functions as per queue. + */ +#define INTEL_IGC_SETUP_QUEUE_WORK_EXP(n, queue) \ + static void eth_tx_irq_queue##n##_##queue(struct k_work *work) \ + { \ + struct eth_intel_igc_mac_data *data = \ + CONTAINER_OF(work, struct eth_intel_igc_mac_data, tx_work[queue]); \ + eth_intel_igc_tx_clean(data, queue); \ + } \ + static void eth_rx_irq_queue##n##_##queue(struct k_work *work) \ + { \ + struct k_work_delayable *dwork = k_work_delayable_from_work(work); \ + struct eth_intel_igc_mac_data *data = \ + CONTAINER_OF(dwork, struct eth_intel_igc_mac_data, rx_work[queue]); \ + eth_intel_igc_rx_data(data, queue); \ + } + +#define INTEL_IGC_SETUP_QUEUE_WORK(n) \ + INTEL_IGC_SETUP_QUEUE_WORK_EXP(n, 3) \ + INTEL_IGC_SETUP_QUEUE_WORK_EXP(n, 2) \ + INTEL_IGC_SETUP_QUEUE_WORK_EXP(n, 1) \ + INTEL_IGC_SETUP_QUEUE_WORK_EXP(n, 0) + +#define INTEL_IGC_INIT_QUEUE_WORK_EXP(n, queue) \ + k_work_init(&data->tx_work[queue], eth_tx_irq_queue##n##_##queue); \ + k_work_init_delayable(&data->rx_work[queue], eth_rx_irq_queue##n##_##queue); + +/** + * @brief Initialize deferred work for each hardware queue. + */ +#define INTEL_IGC_MAC_CONFIG_IRQ(n) \ + static void eth##n##_cfg_irq(const struct device *dev) \ + { \ + struct eth_intel_igc_mac_data *data = dev->data; \ + uint8_t queue = NUM_QUEUES(n); \ + if (queue > 3) { \ + INTEL_IGC_INIT_QUEUE_WORK_EXP(n, 3); \ + } \ + if (queue > 2) { \ + INTEL_IGC_INIT_QUEUE_WORK_EXP(n, 2); \ + } \ + if (queue > 1) { \ + INTEL_IGC_INIT_QUEUE_WORK_EXP(n, 1); \ + } \ + if (queue > 0) { \ + INTEL_IGC_INIT_QUEUE_WORK_EXP(n, 0); \ + } \ + } + +/** + * @brief Allocate various global objects required for managing tx and rx operations. + */ +#define INTEL_IGC_ALLOC_GLOBAL_OBJECTS(n) \ + struct k_sem tx_ring_lock_##n[NUM_QUEUES(n)]; \ + struct k_sem rx_ring_lock_##n[NUM_QUEUES(n)]; \ + static unsigned int tx_ring_wr_ptr_##n[NUM_QUEUES(n)]; \ + static unsigned int rx_ring_wr_ptr_##n[NUM_QUEUES(n)]; \ + static unsigned int tx_ring_rd_ptr_##n[NUM_QUEUES(n)]; \ + static unsigned int rx_ring_rd_ptr_##n[NUM_QUEUES(n)]; \ + static struct net_buf tx_frag_##n[NUM_QUEUES(n)][NUM_TX_DESC(n)]; \ + static struct net_pkt tx_pkt_##n[NUM_QUEUES(n)][NUM_TX_DESC(n)]; \ + static struct eth_intel_igc_intr_info intr_info_##n[NUM_MSIX(n)]; \ + static msi_vector_t msi_vec_##n[NUM_MSIX(n)]; + +#define INTEL_IGC_MAC_DATA(n) \ + static struct eth_intel_igc_mac_data eth_data_##n = { \ + .tx = \ + { \ + .sem = tx_ring_lock_##n, \ + .ring_wr_ptr = tx_ring_wr_ptr_##n, \ + .ring_rd_ptr = tx_ring_rd_ptr_##n, \ + .pkt = (struct net_pkt **)tx_pkt_##n, \ + .frag = (struct net_buf **)tx_frag_##n, \ + }, \ + .rx = \ + { \ + .sem = rx_ring_lock_##n, \ + .ring_wr_ptr = rx_ring_wr_ptr_##n, \ + .ring_rd_ptr = rx_ring_rd_ptr_##n, \ + }, \ + .intr_info = intr_info_##n, \ + .msi_vec = msi_vec_##n, \ + .mac = DEVICE_DT_GET(DT_DRV_INST(n)), \ + .mac_addr = DT_INST_PROP_OR(n, local_mac_address, {0}), \ + }; + +/** + * @brief Initializes the configuration structure of each driver instance. + */ +#define INTEL_IGC_MAC_CONFIG(n) \ + static void eth##n##_cfg_irq(const struct device *dev); \ + static const struct eth_intel_igc_mac_cfg eth_cfg_##n = { \ + .platform = DEVICE_DT_GET(DT_INST_PARENT(n)), \ + .phy = DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle)), \ + .random_mac_address = DT_INST_PROP(n, zephyr_random_mac_address), \ + .config_func = eth##n##_cfg_irq, \ + .num_queues = NUM_QUEUES(n), \ + .num_msix = NUM_MSIX(n), \ + .num_tx_desc = NUM_TX_DESC(n), \ + .num_rx_desc = NUM_RX_DESC(n), \ + }; + +#define INTEL_IGC_MAC_INIT(n) \ + DEVICE_PCIE_INST_DECLARE(n); \ + INTEL_IGC_MAC_CONFIG(n) \ + INTEL_IGC_ALLOC_GLOBAL_OBJECTS(n) \ + INTEL_IGC_MAC_DATA(n) \ + INTEL_IGC_SETUP_QUEUE_WORK(n); \ + ETH_NET_DEVICE_DT_INST_DEFINE(n, eth_intel_igc_init, NULL, ð_data_##n, ð_cfg_##n, \ + CONFIG_ETH_INIT_PRIORITY, ð_api, \ + CONFIG_ETH_INTEL_IGC_NET_MTU); \ + INTEL_IGC_MAC_CONFIG_IRQ(n) + +DT_INST_FOREACH_STATUS_OKAY(INTEL_IGC_MAC_INIT) diff --git a/drivers/ethernet/intel/eth_intel_igc_priv.h b/drivers/ethernet/intel/eth_intel_igc_priv.h new file mode 100644 index 000000000000..d4ff264b77ae --- /dev/null +++ b/drivers/ethernet/intel/eth_intel_igc_priv.h @@ -0,0 +1,500 @@ +/* + * Copyright (c) 2025 Intel Corporation. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_INTEL_IGC_PRIV_H_ +#define ZEPHYR_DRIVERS_ETHERNET_ETH_INTEL_IGC_PRIV_H_ + +#include +#include +#include +#include +#include +#include + +#define ETH_MAX_FRAME_SZ 2048 +#define INTEL_IGC_MAX_QCNT 4 +#define ETH_IGC_NUM_MISC 1 +#define RAH_QSEL_SHIFT 18 +#define RAH_QSEL_ENABLE BIT(28) +#define RAH_QSEL_MASK GENMASK(19, 18) +#define RAH_ASEL_MASK GENMASK(17, 16) +#define RAH_ASEL_SRC_ADDR BIT(16) +#define INTEL_IGC_RAH_AV BIT(31) +#define INTEL_IGC_DEF_MAC_ADDR 0xC9A000 + +/* Device Control Register */ +#define INTEL_IGC_CTRL 0x00000 +#define INTEL_IGC_CTRL_GIO_MASTER_DISABLE BIT(2) +#define INTEL_IGC_CTRL_SLU BIT(6) +#define INTEL_IGC_CTRL_FRCSPD BIT(11) +#define INTEL_IGC_CTRL_FRCDPX BIT(12) +#define INTEL_IGC_CTRL_RST BIT(26) +#define INTEL_IGC_CTRL_RFCE BIT(27) +#define INTEL_IGC_CTRL_TFCE BIT(28) +#define INTEL_IGC_CTRL_EXT_DRV_LOAD BIT(28) +#define INTEL_IGC_CTRL_DEV_RST BIT(29) +#define INTEL_IGC_CTRL_VME BIT(30) +#define INTEL_IGC_CTRL_PHY_RST BIT(31) + +/* Device Status Register */ +#define INTEL_IGC_STATUS 0x00008 +#define INTEL_IGC_STATUS_FD BIT(0) +#define INTEL_IGC_STATUS_LU BIT(1) +#define INTEL_IGC_STATUS_TXOFF BIT(4) +#define INTEL_IGC_STATUS_SPEED_100 BIT(6) +#define INTEL_IGC_STATUS_SPEED_1000 BIT(7) +#define INTEL_IGC_STATUS_GIO_MASTER_ENABLE BIT(19) +#define INTEL_IGC_STATUS_SPEED_MASK GENMASK(7, 6) + +/* Extended Device Control Register */ +#define INTEL_IGC_CTRL_EXT 0x00018 +#define INTEL_IGC_CTRL_EXT_DRV_LOAD BIT(28) + +/* Internal Rx Packet Buffer Size */ +#define INTEL_IGC_RXPBS 0x02404 +#define INTEL_IGC_RXPBS_RXPBSIZE_MASK GENMASK(5, 0) +#define INTEL_IGC_RXPBS_RXPBSIZE_DEFAULT 0x000000A2 + +/* Internal Tx Packet Buffer Size*/ +#define INTEL_IGC_TXPBS 0x03404 +#define INTEL_IGC_TXPBS_TXPBSIZE_DEFAULT 0x04000014 + +/* Interrupt Cause Read */ +#define INTEL_IGC_ICR 0x01500 + +/* Interrupt Cause Set */ +#define INTEL_IGC_ICS 0x01504 + +/* Interrupt Mask Set/Read */ +#define INTEL_IGC_IMS 0x01508 + +/* Interrupt Mask Clear */ +#define INTEL_IGC_IMC 0x0150C + +#define INTEL_IGC_TXDW BIT(0) +#define INTEL_IGC_LSC BIT(2) +#define INTEL_IGC_RXDMT0 BIT(4) +#define INTEL_IGC_RX_MISS BIT(6) +#define INTEL_IGC_RXDW BIT(7) +#define INTEL_IGC_TIME_SYNC BIT(19) +#define INTEL_IGC_DRSTA BIT(30) +#define INTEL_IGC_INTA BIT(31) + +/* General Purpose Interrupt Enable */ +#define INTEL_IGC_GPIE 0x01514 +#define INTEL_IGC_GPIE_NSICR BIT(0) +#define INTEL_IGC_GPIE_MSIX_MODE BIT(4) +#define INTEL_IGC_GPIE_EIAME BIT(30) +#define INTEL_IGC_GPIE_PBA BIT(31) + +/* Extended Interrupt Cause Set */ +#define INTEL_IGC_EICS 0x01520 + +/* Extended Interrupt Mask Set/Read */ +#define INTEL_IGC_EIMS 0x01524 + +/* Extended Interrupt Mask Clear */ +#define INTEL_IGC_EIMC 0x01528 + +/* Extended Interrupt Auto Clear */ +#define INTEL_IGC_EIAC 0x0152C + +/* Extended Interrupt Auto Mask */ +#define INTEL_IGC_EIAM 0x01530 + +/* Extended Interrupt Cause read */ +#define INTEL_IGC_EICR 0x01580 + +/* Interrupt Throttle */ +#define INTEL_IGC_EITR_BASE_ADDR 0x01680 +#define INTEL_IGC_EITR(n) (INTEL_IGC_EITR_BASE_ADDR + (n * 4)) + +/* Interrupt Vector Allocation */ +#define INTEL_IGC_IVAR_BASE_ADDR 0x01700 +#define INTEL_IGC_IVAR(n) (INTEL_IGC_IVAR_BASE_ADDR + (n * 4)) + +/* Interrupt Vector Allocation MISC */ +#define INTEL_IGC_IVAR_MISC 0x01740 +#define INTEL_IGC_IVAR_INT_VALID_BIT BIT(7) +#define INTEL_IGC_IVAR_MSI_CLEAR_RX0_RX2 0xFFFFFF00 +#define INTEL_IGC_IVAR_MSI_CLEAR_TX0_TX2 0xFFFF00FF +#define INTEL_IGC_IVAR_MSI_CLEAR_RX1_RX3 0xFF00FFFF +#define INTEL_IGC_IVAR_MSI_CLEAR_TX1_TX3 0x00FFFFFF + +/* Receive Control */ +#define INTEL_IGC_RCTL 0x00100 +#define INTEL_IGC_RCTL_EN BIT(1) +#define INTEL_IGC_RCTL_SBP BIT(2) +#define INTEL_IGC_RCTL_UPE BIT(3) +#define INTEL_IGC_RCTL_MPE BIT(4) +#define INTEL_IGC_RCTL_LPE BIT(5) +#define INTEL_IGC_RCTL_LBM_MAC BIT(6) +#define INTEL_IGC_RCTL_BAM BIT(15) +#define INTEL_IGC_RCTL_VFE BIT(18) +#define INTEL_IGC_RCTL_CFIEN BIT(19) +#define INTEL_IGC_RCTL_PADSMALL BIT(21) +#define INTEL_IGC_RCTL_DPF BIT(22) +#define INTEL_IGC_RCTL_PMCF BIT(23) +#define INTEL_IGC_RCTL_SECRC BIT(26) +#define INTEL_IGC_RCTL_MO_SHIFT 12 +#define INTEL_IGC_RCTL_SZ_2048 0x0 +#define INTEL_IGC_RCTL_SZ_1024 GENMASK(16, 16) +#define INTEL_IGC_RCTL_SZ_512 GENMASK(17, 17) +#define INTEL_IGC_RCTL_SZ_256 GENMASK(17, 16) +#define INTEL_IGC_RCTL_LBM_TCVR GENMASK(7, 6) + +/* Split and Replication Receive Control */ +#define INTEL_IGC_SRRCTL_BASE 0x0C00C +#define INTEL_IGC_SRRCTL_OFFSET 0x40 +#define INTEL_IGC_SRRCTL(n) (INTEL_IGC_SRRCTL_BASE + (INTEL_IGC_SRRCTL_OFFSET * (n))) +#define INTEL_IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) + +/* Convert as 1024 Bytes resolution */ +#define INTEL_IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(INTEL_IGC_SRRCTL_BSIZEPKT_MASK, (x) / 1024) +#define INTEL_IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8) + +/* Covert as 64 Bytes resolution */ +#define INTEL_IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(INTEL_IGC_SRRCTL_BSIZEHDR_MASK, (x) / 64) +#define INTEL_IGC_RXBUFFER_256 256 +#define INTEL_IGC_SRRCTL_DESCTYPE_ADV_ONEBUF BIT(25) +#define INTEL_IGC_SRRCTL_DROP_EN BIT(31) + +/* Receive Descriptor Base Address Low */ +#define INTEL_IGC_RDBAL_BASE_ADDR 0x0C000 +#define INTEL_IGC_RDBAL_OFFSET 0x40 +#define INTEL_IGC_RDBAL(n) (INTEL_IGC_RDBAL_BASE_ADDR + (INTEL_IGC_RDBAL_OFFSET * (n))) + +/* Receive Descriptor Base Address High */ +#define INTEL_IGC_RDBAH_BASE_ADDR 0x0C004 +#define INTEL_IGC_RDBAH_OFFSET 0x40 +#define INTEL_IGC_RDBAH(n) (INTEL_IGC_RDBAH_BASE_ADDR + (INTEL_IGC_RDBAH_OFFSET * (n))) + +/* Receive Descriptor Ring Length */ +#define INTEL_IGC_RDLEN_BASE_ADDR 0x0C008 +#define INTEL_IGC_RDLEN_OFFSET 0x40 +#define INTEL_IGC_RDLEN(n) (INTEL_IGC_RDLEN_BASE_ADDR + (INTEL_IGC_RDLEN_OFFSET * (n))) + +/* Receive Descriptor Head */ +#define INTEL_IGC_RDH_BASE_ADDR 0x0C010 +#define INTEL_IGC_RDH_OFFSET 0x40 +#define INTEL_IGC_RDH(n) (INTEL_IGC_RDH_BASE_ADDR + (INTEL_IGC_RDH_OFFSET * (n))) + +/* Receive Descriptor Tail */ +#define INTEL_IGC_RDT_BASE_ADDR 0x0C018 +#define INTEL_IGC_RDT_OFFSET 0x40 +#define INTEL_IGC_RDT(n) (INTEL_IGC_RDT_BASE_ADDR + (INTEL_IGC_RDT_OFFSET * (n))) + +/* Receive Descriptor Control */ +#define INTEL_IGC_RXDCTL_BASE_ADDR 0x0C028 +#define INTEL_IGC_RXDCTL_OFFSET 0x40 +#define INTEL_IGC_RXDCTL(n) (INTEL_IGC_RXDCTL_BASE_ADDR + (INTEL_IGC_RXDCTL_OFFSET * (n))) +#define INTEL_IGC_RXDCTL_QUEUE_ENABLE BIT(25) +#define INTEL_IGC_RXDCTL_SWFLUSH BIT(26) + +#define INTEL_IGC_RX_THRESH_RESET GENMASK(31, 21) +#define INTEL_IGC_RX_PTHRESH_VAL 8 +#define INTEL_IGC_RX_HTHRESH_VAL 8 +#define INTEL_IGC_RX_WTHRESH_VAL 8 +#define INTEL_IGC_RX_PTHRESH_SHIFT 0 +#define INTEL_IGC_RX_HTHRESH_SHIFT 8 +#define INTEL_IGC_RX_WTHRESH_SHIFT 16 + +/* Receive Queue Drop Packet Count */ +#define INTEL_IGC_RQDPC_BASE_ADDR 0x0C030 +#define INTEL_IGC_RQDPC_OFFSET 0x40 +#define INTEL_IGC_RQDPC(n) (INTEL_IGC_RQDPC_BASE_ADDR + (INTEL_IGC_RQDPC_OFFSET * (n))) + +/* Receive Checksum Control */ +#define INTEL_IGC_RXCSUM 0x05000 +#define INTEL_IGC_RXCSUM_CRCOFL BIT(11) +#define INTEL_IGC_RXCSUM_PCSD BIT(13) + +/* Receive Long Packet Maximum Length */ +#define INTEL_IGC_RLPML 0x05004 + +/* Receive Filter Control */ +#define INTEL_IGC_RFCTL 0x05008 +#define INTEL_IGC_RFCTL_IPV6_EX_DIS BIT(16) +#define INTEL_IGC_RFCTL_LEF BIT(18) + +/* Collision related config parameters */ +#define INTEL_IGC_TCTL_CT_SHIFT 4 +#define INTEL_IGC_COLLISION_THRESHOLD 15 + +/* Transmit Control Register */ +#define INTEL_IGC_TCTL 0x00400 +#define INTEL_IGC_TCTL_EN BIT(1) +#define INTEL_IGC_TCTL_PSP BIT(3) +#define INTEL_IGC_TCTL_RTLC BIT(24) +#define INTEL_IGC_TCTL_CT GENMASK(11, 4) +#define INTEL_IGC_TCTL_COLD GENMASK(21, 12) + +/* Transmit Descriptor Base Address Low */ +#define INTEL_TDBAL_BASE_ADDR 0x0E000 +#define INTEL_TDBAL_OFFSET 0x40 +#define INTEL_IGC_TDBAL(n) (INTEL_TDBAL_BASE_ADDR + (INTEL_TDBAL_OFFSET * (n))) + +/* Transmit Descriptor Base Address High */ +#define INTEL_TDBAH_BASE_ADDR 0x0E004 +#define INTEL_TDBAH_OFFSET 0x40 +#define INTEL_IGC_TDBAH(n) (INTEL_TDBAH_BASE_ADDR + (INTEL_TDBAH_OFFSET * (n))) + +/* Transmit Descriptor Ring Length */ +#define INTEL_TDLEN_BASE_ADDR 0x0E008 +#define INTEL_TDLEN_OFFSET 0x40 +#define INTEL_IGC_TDLEN(n) (INTEL_TDLEN_BASE_ADDR + (INTEL_TDLEN_OFFSET * (n))) + +/* Transmit Descriptor Head */ +#define INTEL_TDH_BASE_ADDR 0x0E010 +#define INTEL_TDH_OFFSET 0x40 +#define INTEL_IGC_TDH(n) (INTEL_TDH_BASE_ADDR + (INTEL_TDH_OFFSET * (n))) + +/* Transmit Descriptor Tail */ +#define INTEL_TDT_BASE_ADDR 0x0E018 +#define INTEL_TDT_OFFSET 0x40 +#define INTEL_IGC_TDT(n) (INTEL_TDT_BASE_ADDR + (INTEL_TDT_OFFSET * (n))) + +/* Transmit Descriptor Control */ +#define INTEL_TXDCTL_BASE_ADDR 0x0E028 +#define INTEL_TXDCTL_OFFSET 0x40 +#define INTEL_IGC_TXDCTL(n) (INTEL_TXDCTL_BASE_ADDR + (INTEL_TXDCTL_OFFSET * (n))) +#define INTEL_IGC_TXDCTL_QUEUE_ENABLE BIT(25) + +#define INTEL_IGC_TX_PTHRESH_VAL 8 +#define INTEL_IGC_TX_HTHRESH_VAL 8 +#define INTEL_IGC_TX_WTHRESH_VAL 8 +#define INTEL_IGC_TX_PTHRESH_SHIFT 0 +#define INTEL_IGC_TX_HTHRESH_SHIFT 8 +#define INTEL_IGC_TX_WTHRESH_SHIFT 16 +#define INTEL_IGC_TX_DESC_TYPE 0x3 + +/* Statistics Register Descriptions */ +#define INTEL_IGC_CRCERRS 0x04000 +#define INTEL_IGC_ALGNERRC 0x04004 +#define INTEL_IGC_RXERRC 0x0400C +#define INTEL_IGC_MPC 0x04010 +#define INTEL_IGC_SCC 0x04014 +#define INTEL_IGC_ECOL 0x04018 +#define INTEL_IGC_MCC 0x0401C +#define INTEL_IGC_LATECOL 0x04020 +#define INTEL_IGC_COLC 0x04028 +#define INTEL_IGC_RERC 0x0402C +#define INTEL_IGC_DC 0x04030 +#define INTEL_IGC_TNCRS 0x04034 +#define INTEL_IGC_HTDPMC 0x0403C +#define INTEL_IGC_RLEC 0x04040 +#define INTEL_IGC_XONRXC 0x04048 +#define INTEL_IGC_XONTXC 0x0404C +#define INTEL_IGC_XOFFRXC 0x04050 +#define INTEL_IGC_XOFFTXC 0x04054 +#define INTEL_IGC_FCRUC 0x04058 +#define INTEL_IGC_PRC64 0x0405C +#define INTEL_IGC_PRC127 0x04060 +#define INTEL_IGC_PRC255 0x04064 +#define INTEL_IGC_PRC511 0x04068 +#define INTEL_IGC_PRC1023 0x0406C +#define INTEL_IGC_PRC1522 0x04070 +#define INTEL_IGC_GPRC 0x04074 +#define INTEL_IGC_BPRC 0x04078 +#define INTEL_IGC_MPRC 0x0407C +#define INTEL_IGC_GPTC 0x04080 +#define INTEL_IGC_GORCL 0x04088 +#define INTEL_IGC_GORCH 0x0408C +#define INTEL_IGC_GOTCL 0x04090 +#define INTEL_IGC_GOTCH 0x04094 +#define INTEL_IGC_RNBC 0x040A0 +#define INTEL_IGC_RUC 0x040A4 +#define INTEL_IGC_RFC 0x040A8 +#define INTEL_IGC_ROC 0x040AC +#define INTEL_IGC_RJC 0x040B0 +#define INTEL_IGC_MGTPRC 0x040B4 +#define INTEL_IGC_MGTPDC 0x040B8 +#define INTEL_IGC_MGTPTC 0x040BC +#define INTEL_IGC_TORL 0x040C0 +#define INTEL_IGC_TORH 0x040C4 +#define INTEL_IGC_TOTL 0x040C8 +#define INTEL_IGC_TOTH 0x040CC +#define INTEL_IGC_TPR 0x040D0 +#define INTEL_IGC_TPT 0x040D4 +#define INTEL_IGC_PTC64 0x040D8 +#define INTEL_IGC_PTC127 0x040DC +#define INTEL_IGC_PTC255 0x040E0 +#define INTEL_IGC_PTC511 0x040E4 +#define INTEL_IGC_PTC1023 0x040E8 +#define INTEL_IGC_PTC1522 0x040EC +#define INTEL_IGC_MPTC 0x040F0 +#define INTEL_IGC_BPTC 0x040F4 +#define INTEL_IGC_TSCTC 0x040F8 +#define INTEL_IGC_IAC 0x04100 +#define INTEL_IGC_RPTHC 0x04104 +#define INTEL_IGC_TLPIC 0x04148 +#define INTEL_IGC_RLPIC 0x0414C +#define INTEL_IGC_HGPTC 0x04118 +#define INTEL_IGC_RXDMTC 0x04120 +#define INTEL_IGC_HGORCL 0x04128 +#define INTEL_IGC_HGORCH 0x0412C +#define INTEL_IGC_HGOTCL 0x04130 +#define INTEL_IGC_HGOTCH 0x04134 +#define INTEL_IGC_LENERRS 0x04138 +#define INTEL_IGC_TQDPC_BASE 0x0E030 +#define INTEL_IGC_TQDPC_OFFSET 0x40 +#define INTEL_IGC_TQDPC(n) (INTEL_IGC_TQDPC_BASE + (INTEL_IGC_TQDPC_OFFSET * (n))) + +#define INTEL_IGC_GIO_MASTER_DISABLE_TIMEOUT 800 +#define INTEL_IGC_RESET_DELAY 1 + +#define INTEL_IGC_RAL(i) (((i) <= 15) ? (0x05400 + ((i) * 8)) : (0x054E0 + ((i - 16) * 8))) +#define INTEL_IGC_RAH(i) (((i) <= 15) ? (0x05404 + ((i) * 8)) : (0x054E4 + ((i - 16) * 8))) + +typedef void (*eth_config_irq_t)(const struct device *); + +struct eth_intel_igc_intr_info { + const struct device *mac; + msi_vector_t msi_vector; + uint8_t id; +}; + +enum eth_igc_mac_filter_mode { + DEST_ADDR, /* normal mode */ + SRC_ADDR +}; + +/** + * @brief This Advanced transmit descriptor format is crucial for the transmit DMA. + * Field misalignment or size change will break the DMA operation. Modify this + * structure with caution. + */ +union dma_tx_desc { + struct { + uint64_t data_buf_addr; + + unsigned int data_len: 16; + unsigned int ptp1: 4; + unsigned int desc_type: 4; + unsigned int eop: 1; + unsigned int ifcs: 1; + unsigned int reserved1: 1; + unsigned int rs: 1; + unsigned int reserved2: 1; + unsigned int dext: 1; + unsigned int vle: 1; + unsigned int tse: 1; + unsigned int dd: 1; + unsigned int ts_stat: 1; + unsigned int reserved3: 2; + unsigned int idx: 1; + unsigned int ptp2: 3; + unsigned int popts: 6; + unsigned int payloadlen: 18; + } read; + + struct { + uint64_t dma_time_stamp; + + unsigned int reserved1: 32; + unsigned int dd: 1; + unsigned int ts_stat: 1; + unsigned int reserved2: 2; + unsigned int reserved3: 28; + } writeback; +}; + +/** + * @brief This Advanced receive descriptor format is crucial for the receive DMA. + * Field misalignment or size change will break the DMA operation. Modify this + * structure with caution. + */ +union dma_rx_desc { + struct { + uint64_t pkt_buf_addr; + uint64_t hdr_buf_addr; + } read; + + struct { + unsigned int rss_type: 4; + unsigned int pkt_type: 13; + unsigned int reserved1: 2; + unsigned int hdr_len: 12; + unsigned int sph: 1; + unsigned int rss_has_val: 32; + + /* extended status */ + unsigned int dd: 1; + unsigned int eop: 1; + unsigned int reserved2: 1; + unsigned int vp: 1; + unsigned int udpcs: 1; + unsigned int l4cs: 1; + unsigned int ipcs: 1; + unsigned int pif: 1; + unsigned int reserved3: 1; + unsigned int vext: 1; + unsigned int udpv: 1; + unsigned int llint: 1; + unsigned int crc_strip: 1; + unsigned int smd_type: 2; + unsigned int tsip: 1; + unsigned int reserved4: 3; + unsigned int mc: 1; + + /* extended error */ + unsigned int reserved5: 3; + unsigned int hbo: 1; + unsigned int reserved6: 5; + unsigned int l4e: 1; + unsigned int ipe: 1; + unsigned int rxe: 1; + + unsigned int pkt_len: 16; + unsigned int vlan_tag: 16; + } writeback; +}; + +struct eth_intel_igc_mac_cfg { + const struct device *const platform; + const struct device *const phy; + eth_config_irq_t config_func; + uint32_t num_tx_desc; + uint32_t num_rx_desc; + uint8_t num_queues; + uint8_t num_msix; + bool random_mac_address; +}; + +struct eth_intel_igc_mac_tx { + union dma_tx_desc *desc; + struct k_sem *sem; + uint32_t *ring_wr_ptr; + uint32_t *ring_rd_ptr; + struct net_buf **frag; + struct net_pkt **pkt; +}; + +struct eth_intel_igc_mac_rx { + union dma_rx_desc *desc; + struct k_sem *sem; + uint32_t *ring_wr_ptr; + uint32_t *ring_rd_ptr; + uint8_t *buf; +}; + +struct eth_intel_igc_mac_data { + struct net_if *iface; + const struct device *mac; + struct eth_intel_igc_mac_tx tx; + struct eth_intel_igc_mac_rx rx; + struct k_work tx_work[INTEL_IGC_MAX_QCNT]; + struct k_work_delayable rx_work[INTEL_IGC_MAX_QCNT]; + struct eth_intel_igc_intr_info *intr_info; + uint8_t mac_addr[NET_ETH_ADDR_LEN]; + msi_vector_t *msi_vec; +#if defined(CONFIG_NET_STATISTICS_ETHERNET) + struct net_stats_eth stats; +#endif + mm_reg_t base; +}; + +#endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_INTEL_IGC_PRIV_H_*/ diff --git a/dts/bindings/ethernet/intel,igc-mac.yaml b/dts/bindings/ethernet/intel,igc-mac.yaml new file mode 100644 index 000000000000..33185357896e --- /dev/null +++ b/dts/bindings/ethernet/intel,igc-mac.yaml @@ -0,0 +1,26 @@ +# Copyright 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +description: Intel IGC MAC bindings. + +compatible: "intel,igc-mac" + +include: [base.yaml, ethernet-controller.yaml] + +properties: + num-queues: + type: int + default: 4 + description: Number of queues supported by the IGC MAC. + + num-tx-desc: + type: int + default: 512 + description: The number of transmit descriptors per queue can range from 128 to 4096, + depending on the configuration. + + num-rx-desc: + type: int + default: 512 + description: The number of receive descriptors per queue can range from 128 to 4096, + depending on the configuration. From 32849f1a850a24efee554ab63de3da84a2b51a11 Mon Sep 17 00:00:00 2001 From: Vijayakannan Ayyathurai Date: Thu, 10 Jul 2025 21:55:54 +0800 Subject: [PATCH 4/5] dts: x86: Add i226 device-tree config for Intel Alderlake Foxville LM (0x125B) i226 variant and Intel Alder Lake platform was used for developing and stabilizing the i226 Ethernet device driver. However, users can reuse the provided device tree models as a reference when enabling the support for other i226 variants and platforms. This device-tree model include essential configurations for the i226 Ethernet controller, such as PCIe settings, interrupt mappings, Phy MDIO, and DMA descriptor configurations. Signed-off-by: Vijayakannan Ayyathurai --- dts/x86/intel/alder_lake.dtsi | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/dts/x86/intel/alder_lake.dtsi b/dts/x86/intel/alder_lake.dtsi index 110cc2387291..ba0312f4ec83 100644 --- a/dts/x86/intel/alder_lake.dtsi +++ b/dts/x86/intel/alder_lake.dtsi @@ -362,6 +362,33 @@ status = "okay"; }; + + ethernet0: ethernet0 { + compatible = "intel,eth-plat"; + interrupt-parent = <&intc>; + vendor-id = <0x8086>; + device-id = <0x125b>; + + igc0: igc0 { + compatible = "intel,igc-mac"; + local-mac-address = [aa 14 04 85 11 22]; + phy-handle = <ðphy0>; + status = "okay"; + }; + + mdio0: mdio0 { + compatible = "intel,igc-mdio"; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + ethphy0: ethernet-phy@0 { + compatible = "ethernet-phy"; + reg = <0x0>; + status = "okay"; + }; + }; + }; }; soc { From 884a682d3e8635fbf7e71330dd358b18a3491d80 Mon Sep 17 00:00:00 2001 From: Vijayakannan Ayyathurai Date: Thu, 10 Jul 2025 21:59:27 +0800 Subject: [PATCH 5/5] samples: net: zperf: Enable i226 config with Intel Alderlake The Zperf sample application was chosen to demonstrate basic network functionality and high-performance use cases. This application serves as a reference for users who wish to enable other network sample applications. In addition to the essential configurations for the Intel i226 Ethernet controller, stack-specific configurations were added to ensure stability under heavy network loads. These configurations include adjustments to buffer sizes, interrupt handling, and DMA descriptor management. Signed-off-by: Vijayakannan Ayyathurai --- samples/net/zperf/boards/intel_adl_crb.conf | 35 +++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 samples/net/zperf/boards/intel_adl_crb.conf diff --git a/samples/net/zperf/boards/intel_adl_crb.conf b/samples/net/zperf/boards/intel_adl_crb.conf new file mode 100644 index 000000000000..681f0605a860 --- /dev/null +++ b/samples/net/zperf/boards/intel_adl_crb.conf @@ -0,0 +1,35 @@ +# +# Ethernet Drivers +# +CONFIG_ETH_INTEL_PLAT=y +CONFIG_ETH_INTEL_IGC=y + +# +# Link layer options +# +CONFIG_NET_L2_ETHERNET=y + +CONFIG_NET_PKT_RX_COUNT=512 +CONFIG_NET_PKT_TX_COUNT=512 +CONFIG_NET_BUF_RX_COUNT=512 +CONFIG_NET_BUF_TX_COUNT=512 +CONFIG_NET_BUF_DATA_SIZE=1024 + +# +# Increase stack usage for stability +# +CONFIG_NET_TX_STACK_SIZE=8192 +CONFIG_NET_RX_STACK_SIZE=8192 +CONFIG_NET_TCP_WORKQ_STACK_SIZE=8192 +CONFIG_NET_MGMT_EVENT_STACK_SIZE=8192 +CONFIG_NET_SOCKETS_SERVICE=y +CONFIG_NET_SOCKETS_SERVICE_STACK_SIZE=8192 + +# +# IP stack +# +CONFIG_NET_IPV4_FRAGMENT=y +CONFIG_NET_STATISTICS_ETHERNET=n + +# Disable VTD_ICTL until have IOMMU support. +CONFIG_INTEL_VTD_ICTL=n