summaryrefslogtreecommitdiff
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c49
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c14
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c11
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c7
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c6
6 files changed, 48 insertions, 43 deletions
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index f1bae7b..a9203fa 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1213,13 +1213,13 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
ixgbe_reset_qstat_mappings(hw);
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %u bytes needed to store "
"MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
@@ -1227,12 +1227,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
&eth_dev->data->mac_addrs[0]);
/* Allocate memory for storing hash filter MAC addresses */
- eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- IXGBE_VMDQ_NUM_UC_MAC, 0);
+ eth_dev->data->hash_mac_addrs = rte_zmalloc(
+ "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
@@ -1502,7 +1502,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
l2_tn_info->e_tag_en = FALSE;
l2_tn_info->e_tag_fwd_en = FALSE;
- l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG;
+ l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
return 0;
}
@@ -1543,7 +1543,7 @@ generate_random_mac_addr(struct rte_ether_addr *mac_addr)
mac_addr->addr_bytes[1] = 0x09;
mac_addr->addr_bytes[2] = 0xC0;
/* Force indication of locally assigned MAC address. */
- mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
+ mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
/* Generate the last 3 bytes of the MAC address with a random number. */
random = rte_rand();
memcpy(&mac_addr->addr_bytes[3], &random, 3);
@@ -1650,13 +1650,13 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
ixgbevf_get_queues(hw, &tcs, &tc);
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
+ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %u bytes needed to store "
"MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
@@ -3055,7 +3055,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
hw_stats->qbrc[i] +=
((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
if (crc_strip == 0)
- hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
+ hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN;
hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
hw_stats->qbtc[i] +=
@@ -3100,12 +3100,12 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
if (crc_strip == 0)
- hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
+ hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN;
uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
hw_stats->gptc += delta_gptc;
- hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
- hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
+ hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN;
+ hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
/*
* Workaround: mprc hardware is incorrectly counting
@@ -3135,7 +3135,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
hw_stats->gptc -= total;
hw_stats->mptc -= total;
hw_stats->ptc64 -= total;
- hw_stats->gotc -= total * ETHER_MIN_LEN;
+ hw_stats->gotc -= total * RTE_ETHER_MIN_LEN;
hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
@@ -3757,7 +3757,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD;
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
@@ -4558,7 +4558,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* At least reserve one Ethernet frame for watermark
* high_water/low_water in kilo bytes for ixgbe
*/
- max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+ max_high_water = (rx_buf_size -
+ RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
@@ -4779,7 +4780,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
* At least reserve one Ethernet frame for watermark
* high_water/low_water in kilo bytes for ixgbe
*/
- max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+ max_high_water = (rx_buf_size -
+ RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
if ((pfc_conf->fc.high_water > max_high_water) ||
(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
@@ -4960,7 +4962,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
ixgbe_dev_info_get(dev, &dev_info);
/* check that mtu is within the allowed range */
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
/* If device is started, refuse mtu that requires the support of
@@ -4977,7 +4979,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* switch to jumbo mode if needed */
- if (frame_size > ETHER_MAX_LEN) {
+ if (frame_size > RTE_ETHER_MAX_LEN) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -6366,7 +6368,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ if (mtu < RTE_ETHER_MIN_MTU ||
+ max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
return -EINVAL;
/* If device is started, refuse mtu that requires the support of
@@ -6663,8 +6666,8 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPv6) {
PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
" ethertype filter.", filter->ether_type);
return -EINVAL;
@@ -7072,7 +7075,7 @@ ixgbe_timesync_enable(struct rte_eth_dev *dev)
/* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
- (ETHER_TYPE_1588 |
+ (RTE_ETHER_TYPE_1588 |
IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_1588));
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index d1f61e8..fdad94d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -102,7 +102,7 @@
#define IXGBE_5TUPLE_MIN_PRI 1
/* The overhead from MTU to max frame size. */
-#define IXGBE_ETH_OVERHEAD (ETHER_HDR_LEN + ETHER_CRC_LEN)
+#define IXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
@@ -258,7 +258,7 @@ struct ixgbe_mirror_info {
};
struct ixgbe_vf_info {
- uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
+ uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
uint16_t num_vf_mc_hashes;
uint16_t default_vf_vlan_id;
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 7024354..23aba0a 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -887,8 +887,8 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPv4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPv6) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1705,7 +1705,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
eth_spec = item->spec;
/* Get the dst MAC. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
eth_spec->dst.addr_bytes[j];
}
@@ -1734,7 +1734,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
* src MAC address must be masked,
* and don't support dst MAC address mask.
*/
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
if (eth_mask->src.addr_bytes[j] ||
eth_mask->dst.addr_bytes[j] != 0xFF) {
memset(rule, 0,
@@ -2660,7 +2660,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* src MAC address should be masked. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
if (eth_mask->src.addr_bytes[j]) {
memset(rule, 0,
sizeof(struct ixgbe_fdir_rule));
@@ -2671,7 +2671,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
}
rule->mask.mac_addr_byte_mask = 0;
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
/* It's a per byte mask. */
if (eth_mask->dst.addr_bytes[j] == 0xFF) {
rule->mask.mac_addr_byte_mask |= 0x1 << j;
@@ -2692,7 +2692,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
eth_spec = item->spec;
/* Get the dst MAC. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
eth_spec->dst.addr_bytes[j];
}
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index a2ae703..c88d56e 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -40,7 +40,7 @@ dev_num_vf(struct rte_eth_dev *eth_dev)
static inline
int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
{
- unsigned char vf_mac_addr[ETHER_ADDR_LEN];
+ unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
uint16_t vfn;
@@ -49,7 +49,7 @@ int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
rte_eth_random_addr(vf_mac_addr);
/* keep the random address as default */
memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
}
return 0;
@@ -443,7 +443,7 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
+ rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
@@ -547,7 +547,7 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t new_mtu = msgbuf[1];
uint32_t max_frs;
- int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
/* X540 and X550 support jumbo frames in IOV mode */
if (hw->mac.type != ixgbe_mac_X540 &&
@@ -556,7 +556,8 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms
hw->mac.type != ixgbe_mac_X550EM_a)
return -1;
- if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ if (max_frame < RTE_ETHER_MIN_LEN ||
+ max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
return -1;
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 3072bc1..95dae42 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2955,7 +2955,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
@@ -3980,7 +3980,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
struct ixgbe_dcb_tc_config *tc;
- uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t max_frame = dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_bw_conf *bw_conf =
@@ -4963,7 +4964,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* call to configure.
*/
if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index db21918..077afab 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -38,7 +38,7 @@ rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
if (rte_is_valid_assigned_ether_addr(
(struct rte_ether_addr *)new_mac)) {
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
IXGBE_RAH_AV);
}
@@ -155,7 +155,7 @@ rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
if (vf >= pci_dev->max_vfs)
return -EINVAL;
- if (vlan_id > ETHER_MAX_VLAN_ID)
+ if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
return -EINVAL;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -477,7 +477,7 @@ rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
if (!is_ixgbe_supported(dev))
return -ENOTSUP;
- if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+ if (vlan > RTE_ETHER_MAX_VLAN_ID || vf_mask == 0)
return -EINVAL;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);