summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiaoyu Min <jackmin@mellanox.com>2019-10-17 13:49:46 +0300
committerFerruh Yigit <ferruh.yigit@intel.com>2019-10-23 16:43:10 +0200
commit8e2f25cf3c14a9d4c4ee7cf5c6971e9fd5ad64c4 (patch)
tree81e988141118ac7089602b7ff8962b0e57f6aa35
parent5ad3db8d4bdd7dfca619f7acd969f09cf6adb1df (diff)
downloaddpdk-next-eventdev-8e2f25cf3c14a9d4c4ee7cf5c6971e9fd5ad64c4.zip
dpdk-next-eventdev-8e2f25cf3c14a9d4c4ee7cf5c6971e9fd5ad64c4.tar.gz
dpdk-next-eventdev-8e2f25cf3c14a9d4c4ee7cf5c6971e9fd5ad64c4.tar.xz
net/mlx5: fix crash on hash Rx queue handling for drop
When to create hrxq for the drop, it could fail on creating qp and goto the error handle which will release created ind_table by calling drop release function, which takes rte_ethdev as the only parameter and uses the priv->drop_queue.hrxq as input to release. Unfortunately, at this point, the hrxq is not allocated and priv->drop_queue.hrxq is still NULL, which leads to a segfault. This patch fixes the above by allocating the hrxq at first place and when the error happens, hrxq is released as the last one. This patch also release other allocated resources by the correct order, which is missing previously. Fixes: 78be885295b8 ("net/mlx5: handle drop queues as regular queues") Cc: stable@dpdk.org Reported-by: Zengmo Gao <gaozengmo@jd.com> Signed-off-by: Xiaoyu Min <jackmin@mellanox.com> Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c38
1 files changed, 24 insertions, 14 deletions
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 0db065a..f0ab843 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2536,17 +2536,27 @@ struct mlx5_hrxq *
mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_obj *ind_tbl;
- struct ibv_qp *qp;
- struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_obj *ind_tbl = NULL;
+ struct ibv_qp *qp = NULL;
+ struct mlx5_hrxq *hrxq = NULL;
if (priv->drop_queue.hrxq) {
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
return priv->drop_queue.hrxq;
}
+ hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+ if (!hrxq) {
+ DRV_LOG(WARNING,
+ "port %u cannot allocate memory for drop queue",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ priv->drop_queue.hrxq = hrxq;
ind_tbl = mlx5_ind_table_obj_drop_new(dev);
if (!ind_tbl)
- return NULL;
+ goto error;
+ hrxq->ind_table = ind_tbl;
qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
@@ -2570,15 +2580,6 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
rte_errno = errno;
goto error;
}
- hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
- if (!hrxq) {
- DRV_LOG(WARNING,
- "port %u cannot allocate memory for drop queue",
- dev->data->port_id);
- rte_errno = ENOMEM;
- goto error;
- }
- hrxq->ind_table = ind_tbl;
hrxq->qp = qp;
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
@@ -2587,12 +2588,21 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
goto error;
}
#endif
- priv->drop_queue.hrxq = hrxq;
rte_atomic32_set(&hrxq->refcnt, 1);
return hrxq;
error:
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (hrxq && hrxq->action)
+ mlx5_glue->destroy_flow_action(hrxq->action);
+#endif
+ if (qp)
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
if (ind_tbl)
mlx5_ind_table_obj_drop_release(dev);
+ if (hrxq) {
+ priv->drop_queue.hrxq = NULL;
+ rte_free(hrxq);
+ }
return NULL;
}