summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViacheslav Ovsiienko <viacheslavo@mellanox.com>2019-11-07 17:10:01 +0000
committerFerruh Yigit <ferruh.yigit@intel.com>2019-11-11 14:23:02 +0100
commita18ac611333153e0f2f0e257df411e08254be9f3 (patch)
tree84bfe56db4e825f61c4bab4492565e289c0d202e
parentfcc8d2f716fd2979dc180b9910ba3fc4b0822bc6 (diff)
downloaddpdk-a18ac611333153e0f2f0e257df411e08254be9f3.zip
dpdk-a18ac611333153e0f2f0e257df411e08254be9f3.tar.gz
dpdk-a18ac611333153e0f2f0e257df411e08254be9f3.tar.xz
net/mlx5: add metadata support to Rx datapath
This patch moves metadata from completion descriptor to appropriate dynamic mbuf field. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Matan Azrad <matan@mellanox.com>
-rw-r--r--drivers/net/mlx5/mlx5_prm.h6
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c5
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_altivec.h25
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h23
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h27
5 files changed, 78 insertions, 8 deletions
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index b405cb6..a0c37c8 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -357,12 +357,14 @@ struct mlx5_cqe {
uint16_t hdr_type_etc;
uint16_t vlan_info;
uint8_t lro_num_seg;
- uint8_t rsvd3[11];
+ uint8_t rsvd3[3];
+ uint32_t flow_table_metadata;
+ uint8_t rsvd4[4];
uint32_t byte_cnt;
uint64_t timestamp;
uint32_t sop_drop_qpn;
uint16_t wqe_counter;
- uint8_t rsvd4;
+ uint8_t rsvd5;
uint8_t op_own;
};
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 887e283..f28a909 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -26,6 +26,7 @@
#include <rte_branch_prediction.h>
#include <rte_ether.h>
#include <rte_cycles.h>
+#include <rte_flow.h>
#include "mlx5.h"
#include "mlx5_utils.h"
@@ -1251,6 +1252,10 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
}
}
+ if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
+ pkt->ol_flags |= PKT_RX_DYNF_METADATA;
+ *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
+ }
if (rxq->csum)
pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
if (rxq->vlan_strip &&
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index 3be3a6d..8e79883 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -416,7 +416,6 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
vec_cmpeq((vector unsigned int)flow_tag,
(vector unsigned int)pinfo_ft_mask)));
}
-
/*
* Merge the two fields to generate the following:
* bit[1] = l3_ok
@@ -1011,7 +1010,29 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
pkts[pos + 3]->timestamp =
rte_be_to_cpu_64(cq[pos + p3].timestamp);
}
-
+ if (rte_flow_dynf_metadata_avail()) {
+ uint64_t flag = rte_flow_dynf_metadata_mask;
+ int offs = rte_flow_dynf_metadata_offs;
+ uint32_t metadata;
+
+ /* This code is subject for futher optimization. */
+ metadata = cq[pos].flow_table_metadata;
+ *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
+ metadata;
+ pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
+ metadata = cq[pos + 1].flow_table_metadata;
+ *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
+ metadata;
+ pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
+ metadata = cq[pos + 2].flow_table_metadata;
+ *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
+ metadata;
+ pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
+ metadata = cq[pos + 3].flow_table_metadata;
+ *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
+ metadata;
+ pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = vec_perm(op_own, zero, len_shuf_mask);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index e914d01..86785c7 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -687,6 +687,29 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
container_of(p3, struct mlx5_cqe,
pkt_info)->timestamp);
}
+ if (rte_flow_dynf_metadata_avail()) {
+ /* This code is subject for futher optimization. */
+ *RTE_FLOW_DYNF_METADATA(elts[pos]) =
+ container_of(p0, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 1]) =
+ container_of(p1, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 2]) =
+ container_of(p2, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 3]) =
+ container_of(p3, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos]))
+ elts[pos]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 1]))
+ elts[pos + 1]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 2]))
+ elts[pos + 2]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 3]))
+ elts[pos + 3]->ol_flags |= PKT_RX_DYNF_METADATA;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = vbic_u16(byte_cnt, invalid_mask);
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index ca8ed41..35b7761 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -537,8 +537,8 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].csum);
cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
- cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd3[9]);
- cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd3[9]);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd4[2]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd4[2]);
cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
/* C.2 generate final structure for mbuf with swapping bytes. */
@@ -564,8 +564,8 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].csum);
cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
- cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd3[9]);
- cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd3[9]);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd4[2]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd4[2]);
cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
/* C.2 generate final structure for mbuf with swapping bytes. */
@@ -640,6 +640,25 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
pkts[pos + 3]->timestamp =
rte_be_to_cpu_64(cq[pos + p3].timestamp);
}
+ if (rte_flow_dynf_metadata_avail()) {
+ /* This code is subject for futher optimization. */
+ *RTE_FLOW_DYNF_METADATA(pkts[pos]) =
+ cq[pos].flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(pkts[pos + 1]) =
+ cq[pos + p1].flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(pkts[pos + 2]) =
+ cq[pos + p2].flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(pkts[pos + 3]) =
+ cq[pos + p3].flow_table_metadata;
+ if (*RTE_FLOW_DYNF_METADATA(pkts[pos]))
+ pkts[pos]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 1]))
+ pkts[pos + 1]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 2]))
+ pkts[pos + 2]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 3]))
+ pkts[pos + 3]->ol_flags |= PKT_RX_DYNF_METADATA;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);