summaryrefslogtreecommitdiff
path: root/drivers/net/enic
diff options
context:
space:
mode:
authorHyong Youb Kim <hyonkim@cisco.com>2019-03-02 02:42:47 -0800
committerFerruh Yigit <ferruh.yigit@intel.com>2019-03-08 17:52:22 +0100
commit477959e6eeb0a2687d45474e67864714e8340a5c (patch)
tree401d5ad8b0f8acb268257a91124a0fb3e11d64bd /drivers/net/enic
parent60c6acb43d3b3970ec077134662e210bf80c037e (diff)
downloaddpdk-next-eventdev-477959e6eeb0a2687d45474e67864714e8340a5c.zip
dpdk-next-eventdev-477959e6eeb0a2687d45474e67864714e8340a5c.tar.gz
dpdk-next-eventdev-477959e6eeb0a2687d45474e67864714e8340a5c.tar.xz
net/enic: enable limited support for raw flow item
Some apps like VPP use a raw item to match UDP tunnel headers like VXLAN or GENEVE. The NIC hardware supports such usage via L5 match, which does pattern match on packet data immediately following the outer L4 header. Accept raw items for these limited use cases. Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Diffstat (limited to 'drivers/net/enic')
-rw-r--r--drivers/net/enic/enic_flow.c65
1 files changed, 65 insertions, 0 deletions
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index fda641b..ffc6ce1 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -77,6 +77,7 @@ struct enic_action_cap {
static enic_copy_item_fn enic_copy_item_ipv4_v1;
static enic_copy_item_fn enic_copy_item_udp_v1;
static enic_copy_item_fn enic_copy_item_tcp_v1;
+static enic_copy_item_fn enic_copy_item_raw_v2;
static enic_copy_item_fn enic_copy_item_eth_v2;
static enic_copy_item_fn enic_copy_item_vlan_v2;
static enic_copy_item_fn enic_copy_item_ipv4_v2;
@@ -123,6 +124,14 @@ static const struct enic_items enic_items_v1[] = {
* that layer 3 must be specified.
*/
static const struct enic_items enic_items_v2[] = {
+ [RTE_FLOW_ITEM_TYPE_RAW] = {
+ .copy_item = enic_copy_item_raw_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
[RTE_FLOW_ITEM_TYPE_ETH] = {
.copy_item = enic_copy_item_eth_v2,
.valid_start_item = 1,
@@ -196,6 +205,14 @@ static const struct enic_items enic_items_v2[] = {
/** NICs with Advanced filters enabled */
static const struct enic_items enic_items_v3[] = {
+ [RTE_FLOW_ITEM_TYPE_RAW] = {
+ .copy_item = enic_copy_item_raw_v2,
+ .valid_start_item = 0,
+ .prev_items = (const enum rte_flow_item_type[]) {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
[RTE_FLOW_ITEM_TYPE_ETH] = {
.copy_item = enic_copy_item_eth_v2,
.valid_start_item = 1,
@@ -835,6 +852,54 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg)
return 0;
}
+/*
+ * Copy raw item into version 2 NIC filter. Currently, raw pattern match is
+ * very limited. It is intended for matching UDP tunnel header (e.g. vxlan
+ * or geneve).
+ */
+static int
+enic_copy_item_raw_v2(struct copy_item_args *arg)
+{
+ const struct rte_flow_item *item = arg->item;
+ struct filter_v2 *enic_filter = arg->filter;
+ uint8_t *inner_ofst = arg->inner_ofst;
+ const struct rte_flow_item_raw *spec = item->spec;
+ const struct rte_flow_item_raw *mask = item->mask;
+ struct filter_generic_1 *gp = &enic_filter->u.generic_1;
+
+ FLOW_TRACE();
+
+ /* Cannot be used for inner packet */
+ if (*inner_ofst)
+ return EINVAL;
+ /* Need both spec and mask */
+ if (!spec || !mask)
+ return EINVAL;
+ /* Only supports relative with offset 0 */
+ if (!spec->relative || spec->offset != 0 || spec->search || spec->limit)
+ return EINVAL;
+ /* Need non-null pattern that fits within the NIC's filter pattern */
+ if (spec->length == 0 || spec->length > FILTER_GENERIC_1_KEY_LEN ||
+ !spec->pattern || !mask->pattern)
+ return EINVAL;
+ /*
+ * Mask fields, including length, are often set to zero. Assume that
+ * means "same as spec" to avoid breaking existing apps. If length
+ * is not zero, then it should be >= spec length.
+ *
+ * No more pattern follows this, so append to the L4 layer instead of
+ * L5 to work with both recent and older VICs.
+ */
+ if (mask->length != 0 && mask->length < spec->length)
+ return EINVAL;
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
+ mask->pattern, spec->length);
+ memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
+ spec->pattern, spec->length);
+
+ return 0;
+}
+
/**
* Return 1 if current item is valid on top of the previous one.
*