summaryrefslogtreecommitdiff
path: root/drivers/net/sfc/sfc_rx.c
diff options
context:
space:
mode:
authorWei Dai <wei.dai@intel.com>2018-05-10 19:56:55 +0800
committerFerruh Yigit <ferruh.yigit@intel.com>2018-05-14 22:31:51 +0100
commita4996bd89c42590f8886454a06a994f71acf89dd (patch)
tree473add5786efaaa017690953bf7f3994a2a5e9e0 /drivers/net/sfc/sfc_rx.c
parentdf428ceef4fdbceacda8d50341c25ddd46a76a39 (diff)
downloaddpdk-a4996bd89c42590f8886454a06a994f71acf89dd.zip
dpdk-a4996bd89c42590f8886454a06a994f71acf89dd.tar.gz
dpdk-a4996bd89c42590f8886454a06a994f71acf89dd.tar.xz
ethdev: new Rx/Tx offloads API
This patch check if a input requested offloading is valid or not. Any reuqested offloading must be supported in the device capabilities. Any offloading is disabled by default if it is not set in the parameter dev_conf->[rt]xmode.offloads to rte_eth_dev_configure() and [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup(). If any offloading is enabled in rte_eth_dev_configure() by application, it is enabled on all queues no matter whether it is per-queue or per-port type and no matter whether it is set or cleared in [rt]x_conf->offloads to rte_eth_[rt]x_queue_setup(). If a per-queue offloading hasn't be enabled in rte_eth_dev_configure(), it can be enabled or disabled for individual queue in ret_eth_[rt]x_queue_setup(). A new added offloading is the one which hasn't been enabled in rte_eth_dev_configure() and is reuqested to be enabled in rte_eth_[rt]x_queue_setup(), it must be per-queue type, otherwise trigger an error log. The underlying PMD must be aware that the requested offloadings to PMD specific queue_setup() function only carries those new added offloadings of per-queue type. This patch can make above such checking in a common way in rte_ethdev layer to avoid same checking in underlying PMD. This patch assumes that all PMDs in 18.05-rc2 have already converted to offload API defined in 17.11 . It also assumes that all PMDs can return correct offloading capabilities in rte_eth_dev_infos_get(). In the beginning of [rt]x_queue_setup() of underlying PMD, add offloads = [rt]xconf->offloads | dev->data->dev_conf.[rt]xmode.offloads; to keep same as offload API defined in 17.11 to avoid upper application broken due to offload API change. PMD can use the info that input [rt]xconf->offloads only carry the new added per-queue offloads to do some optimization or some code change on base of this patch. Signed-off-by: Wei Dai <wei.dai@intel.com> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Diffstat (limited to 'drivers/net/sfc/sfc_rx.c')
-rw-r--r--drivers/net/sfc/sfc_rx.c67
1 files changed, 10 insertions, 57 deletions
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 57ed34f..cc76a5b 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -814,48 +814,10 @@ sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
return caps;
}
-static void
-sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
- const char *verdict, uint64_t offloads)
-{
- unsigned long long bit;
-
- while ((bit = __builtin_ffsll(offloads)) != 0) {
- uint64_t flag = (1ULL << --bit);
-
- sfc_err(sa, "Rx %s offload %s %s", offload_group,
- rte_eth_dev_rx_offload_name(flag), verdict);
-
- offloads &= ~flag;
- }
-}
-
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_rx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
static int
sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
- const struct rte_eth_rxconf *rx_conf)
+ const struct rte_eth_rxconf *rx_conf,
+ uint64_t offloads)
{
uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
sfc_rx_get_queue_offload_caps(sa);
@@ -880,17 +842,14 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
rc = EINVAL;
}
- if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+ if ((offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
DEV_RX_OFFLOAD_CHECKSUM)
sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ (~offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
- if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
@@ -1006,6 +965,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
unsigned int rxq_entries;
unsigned int evq_entries;
unsigned int rxq_max_fill_level;
+ uint64_t offloads;
uint16_t buf_size;
struct sfc_rxq_info *rxq_info;
struct sfc_evq *evq;
@@ -1020,7 +980,9 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
- rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+ offloads = rx_conf->offloads |
+ sa->eth_dev->data->dev_conf.rxmode.offloads;
+ rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
@@ -1033,7 +995,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
}
if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
"object size is too small", sw_index);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1056,7 +1018,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
rxq_info->type_flags =
- (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ (offloads & DEV_RX_OFFLOAD_SCATTER) ?
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
@@ -1463,9 +1425,6 @@ static int
sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
{
struct sfc_rss *rss = &sa->rss;
- uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
- uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
int rc = 0;
switch (rxmode->mq_mode) {
@@ -1484,12 +1443,6 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
rc = EINVAL;
}
- if (offloads_rejected) {
- sfc_rx_log_offloads(sa, "device", "is unsupported",
- offloads_rejected);
- rc = EINVAL;
- }
-
if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
sfc_warn(sa, "FCS stripping cannot be disabled - always on");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;