summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDekel Peled <dekelp@mellanox.com>2019-07-22 14:52:12 +0000
committerFerruh Yigit <ferruh.yigit@intel.com>2019-07-23 14:31:36 +0200
commit15c80a126dcf0684c7a39272ddb72710cd75c2f9 (patch)
treee2d8f5f297ba649f951c3e325beb133c2a8bb4cf
parent93403560ba1f2a6de2630ebf61f63c9a17176b7a (diff)
downloaddpdk-15c80a126dcf0684c7a39272ddb72710cd75c2f9.zip
dpdk-15c80a126dcf0684c7a39272ddb72710cd75c2f9.tar.gz
dpdk-15c80a126dcf0684c7a39272ddb72710cd75c2f9.tar.xz
net/mlx5: rename verbs indirection table to obj
Prepare for introducing of DevX RQT object. Rx indirection table object is currently created using verbs only. The next patches will add the option to create an RQT object using DevX. This patch renames ind_table_ibv to ind_table_obj wherever relevant, and adds the DevX items to relevant structs. Signed-off-by: Dekel Peled <dekelp@mellanox.com> Acked-by: Matan Azrad <matan@mellanox.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
-rw-r--r--drivers/net/mlx5/mlx5.c2
-rw-r--r--drivers/net/mlx5/mlx5.h4
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c64
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h20
4 files changed, 50 insertions, 40 deletions
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index fbcc93b..a3a9013 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -819,7 +819,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
dev->data->port_id);
- ret = mlx5_ind_table_ibv_verify(dev);
+ ret = mlx5_ind_table_obj_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some indirection table still remain",
dev->data->port_id);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 697e6c4..b080064 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -617,8 +617,8 @@ struct mlx5_priv {
LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
- /* Verbs Indirection tables. */
- LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
+ /* Indirection tables. */
+ LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
/* Pointer to next element. */
rte_atomic32_t refcnt; /**< Reference counter. */
struct ibv_flow_action *verbs_action;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 20a4695..507a1ab 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1576,14 +1576,14 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
* Number of queues in the array.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
log2above(priv->config.ind_table_max_size);
@@ -1642,12 +1642,12 @@ error:
* @return
* An indirection table if found.
*/
-static struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
if ((ind_tbl->queues_n == queues_n) &&
@@ -1678,8 +1678,8 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
* 1 while a reference on it exists, 0 when freed.
*/
static int
-mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
- struct mlx5_ind_table_ibv *ind_tbl)
+mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
{
unsigned int i;
@@ -1706,15 +1706,15 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
* The number of object not released.
*/
int
-mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
+mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
DRV_LOG(DEBUG,
- "port %u Verbs indirection table %p still referenced",
+ "port %u indirection table obj %p still referenced",
dev->data->port_id, (void *)ind_tbl);
++ret;
}
@@ -1752,7 +1752,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
struct ibv_qp *qp;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
struct mlx5dv_qp_init_attr qp_init_attr;
@@ -1760,9 +1760,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
int err;
queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
@@ -1844,7 +1844,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
return hrxq;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_ind_table_ibv_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl);
if (qp)
claim_zero(mlx5_glue->destroy_qp(qp));
rte_errno = err; /* Restore rte_errno. */
@@ -1878,7 +1878,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
queues_n = hash_fields ? queues_n : 1;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
if (hrxq->rss_key_len != rss_key_len)
continue;
@@ -1886,11 +1886,11 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
continue;
if (hrxq->hash_fields != hash_fields)
continue;
- ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_ibv_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl);
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
@@ -1918,12 +1918,12 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
return 0;
}
- claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
+ claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
return 1;
}
@@ -2042,15 +2042,15 @@ mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
struct mlx5_rxq_obj *rxq;
- struct mlx5_ind_table_ibv tmpl;
+ struct mlx5_ind_table_obj tmpl;
rxq = mlx5_rxq_obj_drop_new(dev);
if (!rxq)
@@ -2088,10 +2088,10 @@ error:
* Pointer to Ethernet device.
*/
static void
-mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+ struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
mlx5_rxq_obj_drop_release(dev);
@@ -2112,7 +2112,7 @@ struct mlx5_hrxq *
mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
struct ibv_qp *qp;
struct mlx5_hrxq *hrxq;
@@ -2120,7 +2120,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
return priv->drop_queue.hrxq;
}
- ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+ ind_tbl = mlx5_ind_table_obj_drop_new(dev);
if (!ind_tbl)
return NULL;
qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
@@ -2168,7 +2168,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
return hrxq;
error:
if (ind_tbl)
- mlx5_ind_table_ibv_drop_release(dev);
+ mlx5_ind_table_obj_drop_release(dev);
return NULL;
}
@@ -2189,7 +2189,7 @@ mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_ibv_drop_release(dev);
+ mlx5_ind_table_obj_drop_release(dev);
rte_free(hrxq);
priv->drop_queue.hrxq = NULL;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index eb20a07..0e7b428 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -176,11 +176,21 @@ struct mlx5_rxq_ctrl {
uint16_t dump_file_n; /* Number of dump files. */
};
+enum mlx5_ind_tbl_type {
+ MLX5_IND_TBL_TYPE_IBV,
+ MLX5_IND_TBL_TYPE_DEVX,
+};
+
/* Indirection table. */
-struct mlx5_ind_table_ibv {
- LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
+struct mlx5_ind_table_obj {
+ LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
- struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+ enum mlx5_ind_tbl_type type;
+ RTE_STD_C11
+ union {
+ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+ struct mlx5_devx_obj *rqt; /* DevX RQT object. */
+ };
uint32_t queues_n; /**< Number of queues in the list. */
uint16_t queues[]; /**< Queue list. */
};
@@ -189,7 +199,7 @@ struct mlx5_ind_table_ibv {
struct mlx5_hrxq {
LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
- struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
+ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
void *action; /* DV QP action pointer. */
@@ -320,7 +330,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
-int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,