summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5.h
diff options
context:
space:
mode:
authorDekel Peled <dekelp@mellanox.com>2019-09-11 14:03:36 +0300
committerFerruh Yigit <ferruh.yigit@intel.com>2019-09-20 10:19:41 +0200
commitb67b4ecbde22015b48d27d1ece9d05128666ad87 (patch)
tree668bd317cffa8de8af8ddf6532db30cf79ebe23b /drivers/net/mlx5/mlx5.h
parent9763595925aa0488392035dbb15522dc2200c6d4 (diff)
downloaddpdk-b67b4ecbde22015b48d27d1ece9d05128666ad87.zip
dpdk-b67b4ecbde22015b48d27d1ece9d05128666ad87.tar.gz
dpdk-b67b4ecbde22015b48d27d1ece9d05128666ad87.tar.xz
net/mlx5: skip table zero to improve insertion rate
E-switch tables one and above provide higher insertion rate than table zero, as well as enhanced functionality. This patch adds a mechanism to utilize these advantages, by creating a default rule on port start, which directs all packets from e-switch table zero to table one. Other flow rules, requested for group n, will be created in e-switch table n+1. Jump action to e-switch group n will be created to group n+1. Utility function mlx5_flow_group_to_table() is added to translate the rte_flow group value to HW table value, and is called by PMD flow engine on flow rule validation and creation. Signed-off-by: Dekel Peled <dekelp@mellanox.com> Acked-by: Matan Azrad <matan@mellanox.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5.h')
-rw-r--r--drivers/net/mlx5/mlx5.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 239b56c..7982490 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -513,8 +513,8 @@ struct mlx5_flow_tbl_resource {
rte_atomic32_t refcnt; /**< Reference counter. */
};
-#define MLX5_MAX_TABLES 0xffff
-#define MLX5_MAX_TABLES_FDB 0xffff
+#define MLX5_MAX_TABLES UINT16_MAX
+#define MLX5_MAX_TABLES_FDB UINT16_MAX
#define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */
#define MLX5_DBR_SIZE 8
@@ -829,6 +829,7 @@ int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
int mlx5_ctrl_flow(struct rte_eth_dev *dev,
struct rte_flow_item_eth *eth_spec,
struct rte_flow_item_eth *eth_mask);
+struct rte_flow *mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev);
int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
void mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,