summaryrefslogtreecommitdiff
path: root/drivers/net/e1000/em_rxtx.c
diff options
context:
space:
mode:
authorWei Dai <wei.dai@intel.com>2018-05-10 19:56:55 +0800
committerFerruh Yigit <ferruh.yigit@intel.com>2018-05-14 22:31:51 +0100
commita4996bd89c42590f8886454a06a994f71acf89dd (patch)
tree473add5786efaaa017690953bf7f3994a2a5e9e0 /drivers/net/e1000/em_rxtx.c
parentdf428ceef4fdbceacda8d50341c25ddd46a76a39 (diff)
downloaddpdk-a4996bd89c42590f8886454a06a994f71acf89dd.zip
dpdk-a4996bd89c42590f8886454a06a994f71acf89dd.tar.gz
dpdk-a4996bd89c42590f8886454a06a994f71acf89dd.tar.xz
ethdev: new Rx/Tx offloads API
This patch check if a input requested offloading is valid or not. Any reuqested offloading must be supported in the device capabilities. Any offloading is disabled by default if it is not set in the parameter dev_conf->[rt]xmode.offloads to rte_eth_dev_configure() and [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup(). If any offloading is enabled in rte_eth_dev_configure() by application, it is enabled on all queues no matter whether it is per-queue or per-port type and no matter whether it is set or cleared in [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup(). If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(), it can be enabled or disabled for individual queue in ret_eth_[rt]x_queue_setup(). A new added offloading is the one which hasn't been enabled in rte_eth_dev_configure() and is reuqested to be enabled in rte_eth_[rt]x_queue_setup(), it must be per-queue type, otherwise trigger an error log. The underlying PMD must be aware that the requested offloadings to PMD specific queue_setup() function only carries those new added offloadings of per-queue type. This patch can make above such checking in a common way in rte_ethdev layer to avoid same checking in underlying PMD. This patch assumes that all PMDs in 18.05-rc2 have already converted to offload API defined in 17.11 . It also assumes that all PMDs can return correct offloading capabilities in rte_eth_dev_infos_get(). In the beginning of [rt]x_queue_setup() of underlying PMD, add offloads = [rt]xconf->offloads | dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API defined in 17.11 to avoid upper application broken due to offload API change. PMD can use the info that input [rt]xconf->offloads only carry the new added per-queue offloads to do some optimization or some code change on base of this patch. Signed-off-by: Wei Dai <wei.dai@intel.com> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Diffstat (limited to 'drivers/net/e1000/em_rxtx.c')
-rw-r--r--drivers/net/e1000/em_rxtx.c64
1 files changed, 6 insertions, 58 deletions
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 2b3c63e..a6b3e92 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1183,22 +1183,6 @@ em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
return tx_queue_offload_capa;
}
-static int
-em_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t queue_supported = em_get_tx_queue_offloads_capa(dev);
- uint64_t port_supported = em_get_tx_port_offloads_capa(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
int
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1211,21 +1195,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
struct e1000_hw *hw;
uint32_t tsize;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!em_check_tx_queue_offloads(dev, tx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev,
- tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- em_get_tx_port_offloads_capa(dev),
- em_get_tx_queue_offloads_capa(dev));
- return -ENOTSUP;
- }
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
/*
* Validate number of transmit descriptors.
@@ -1330,7 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
return 0;
}
@@ -1412,22 +1386,6 @@ em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
return rx_queue_offload_capa;
}
-static int
-em_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supported = em_get_rx_queue_offloads_capa(dev);
- uint64_t port_supported = em_get_rx_port_offloads_capa(dev);
-
- if ((requested & (queue_supported | port_supported)) != requested)
- return 0;
-
- if ((port_offloads ^ requested) & port_supported)
- return 0;
-
- return 1;
-}
-
int
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1440,21 +1398,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
struct em_rx_queue *rxq;
struct e1000_hw *hw;
uint32_t rsize;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!em_check_rx_queue_offloads(dev, rx_conf->offloads)) {
- PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported port offloads 0x%" PRIx64
- " or supported queue offloads 0x%" PRIx64,
- (void *)dev,
- rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- em_get_rx_port_offloads_capa(dev),
- em_get_rx_queue_offloads_capa(dev));
- return -ENOTSUP;
- }
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
/*
* Validate number of receive descriptors.
@@ -1523,7 +1471,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
- rxq->offloads = rx_conf->offloads;
+ rxq->offloads = offloads;
return 0;
}