summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5_flow_dv.c
diff options
context:
space:
mode:
authorOri Kam <orika@mellanox.com>2018-09-24 23:17:43 +0000
committerFerruh Yigit <ferruh.yigit@intel.com>2018-10-11 18:53:49 +0200
commit3d69434113d169a914db7a9a3cbaedd3563a701d (patch)
treec46ac2ab14bca9e8650db8e47fd5584ce63f00db /drivers/net/mlx5/mlx5_flow_dv.c
parent84c406e74524e0a81e4d77034e7e4e72a32e5dd1 (diff)
downloaddpdk-next-eventdev-3d69434113d169a914db7a9a3cbaedd3563a701d.zip
dpdk-next-eventdev-3d69434113d169a914db7a9a3cbaedd3563a701d.tar.gz
dpdk-next-eventdev-3d69434113d169a914db7a9a3cbaedd3563a701d.tar.xz
net/mlx5: add Direct Verbs validation function
This is commit introduce the Direct Verbs driver API. The Direct Verbs is an API adds new features like encapsulation, match on metatdata. In this commit the validation function was added, most of the validation is done with functions that are also in use for the Verbs API. Signed-off-by: Ori Kam <orika@mellanox.com> Acked-by: Yongseok Koh <yskoh@mellanox.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_flow_dv.c')
-rw-r--r--drivers/net/mlx5/mlx5_flow_dv.c316
1 files changed, 316 insertions, 0 deletions
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
new file mode 100644
index 0000000..41cf08f
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <sys/queue.h>
+#include <stdalign.h>
+#include <stdint.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+
+#include "mlx5.h"
+#include "mlx5_defs.h"
+#include "mlx5_prm.h"
+#include "mlx5_glue.h"
+#include "mlx5_flow.h"
+
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+
+/**
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ * Pointer to dev struct.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint32_t priority_max = priv->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priority out of range");
+ if (attributes->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ if (!attributes->ingress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "ingress attribute is mandatory");
+ return 0;
+}
+
+/**
+ * Internal validation function. For validating both actions and items.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint32_t action_flags = 0;
+ uint32_t item_flags = 0;
+ int tunnel = 0;
+ uint8_t next_protocol = 0xff;
+ int actions_n = 0;
+
+ if (items == NULL)
+ return -1;
+ ret = flow_dv_validate_attributes(dev, attr, error);
+ if (ret < 0)
+ return ret;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_validate_item_eth(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_validate_item_vlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_validate_item_vxlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_validate_item_vxlan_gpe(items,
+ item_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_validate_item_gre(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_validate_item_mpls(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_MPLS;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "too many actions");
+ tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = mlx5_flow_validate_action_flag(action_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = mlx5_flow_validate_action_mark(actions,
+ action_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_validate_action_drop(action_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(actions,
+ action_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mlx5_flow_validate_action_rss(actions,
+ action_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_validate_action_count(dev, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ ++actions_n;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "no fate action is found");
+ return 0;
+}
+
+/**
+ * Fills the flow_ops with the function pointers.
+ *
+ * @param[out] flow_ops
+ * Pointer to driver_ops structure.
+ */
+void
+mlx5_flow_dv_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops)
+{
+ *flow_ops = (struct mlx5_flow_driver_ops) {
+ .validate = flow_dv_validate,
+ .prepare = NULL,
+ .translate = NULL,
+ .apply = NULL,
+ .remove = NULL,
+ .destroy = NULL,
+ };
+}
+
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */