summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHyong Youb Kim <hyonkim@cisco.com>2018-09-25 19:54:21 -0700
committerFerruh Yigit <ferruh.yigit@intel.com>2018-10-11 18:53:48 +0200
commit70401fd7784db6f55cce87b91e19d364edce506a (patch)
treedb9dd76cea4cad24e93e8849b70b3f030eafbb0e
parent828cf603a159edcd2dc0aab936dc9ff402b8ab11 (diff)
downloaddpdk-70401fd7784d.zip
dpdk-70401fd7784d.tar.gz
dpdk-70401fd7784d.tar.xz
net/enic: add VLAN and csum offloads to simple Tx handler
Currently the simple Tx handler supports no offloads, which makes it usable only for a small number of benchmarks. Add vlan and checksum offloads to the handler, as cycles/packet increases only by about 3 cycles, and applications commonly use those offloads. Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com> Reviewed-by: John Daley <johndale@cisco.com>
-rw-r--r--drivers/net/enic/enic_main.c14
-rw-r--r--drivers/net/enic/enic_rxtx.c21
2 files changed, 32 insertions, 3 deletions
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index fd940c5..03c5ef7 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -534,6 +534,7 @@ int enic_enable(struct enic *enic)
unsigned int index;
int err;
struct rte_eth_dev *eth_dev = enic->rte_dev;
+ uint64_t simple_tx_offloads;
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -572,10 +573,17 @@ int enic_enable(struct enic *enic)
}
/*
- * Use the simple TX handler if possible. All offloads must be
- * disabled.
+ * Use the simple TX handler if possible. Only checksum offloads
+ * and vlan insertion are supported.
*/
- if (eth_dev->data->dev_conf.txmode.offloads == 0) {
+ simple_tx_offloads = enic->tx_offload_capa &
+ (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
+ if ((eth_dev->data->dev_conf.txmode.offloads &
+ ~simple_tx_offloads) == 0) {
PMD_INIT_LOG(DEBUG, " use the simple tx handler");
eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
for (index = 0; index < enic->wq_count; index++)
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 8d57c41..276a2e5 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -842,12 +842,33 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts,
struct enic *enic)
{
struct rte_mbuf *p;
+ uint16_t mss;
while (n) {
n--;
p = *pkts++;
desc->address = p->buf_iova + p->data_off;
desc->length = p->pkt_len;
+ /* VLAN insert */
+ desc->vlan_tag = p->vlan_tci;
+ desc->header_length_flags &=
+ ((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
+ (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
+ if (p->ol_flags & PKT_TX_VLAN) {
+ desc->header_length_flags |=
+ 1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
+ }
+ /*
+ * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which
+ * is 0, so no need to set offload_mode.
+ */
+ mss = 0;
+ if (p->ol_flags & PKT_TX_IP_CKSUM)
+ mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
+ if (p->ol_flags & PKT_TX_L4_MASK)
+ mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
+ desc->mss_loopback = mss;
+
/*
* The app should not send oversized
* packets. tx_pkt_prepare includes a check as