path: root/drivers/net/mlx5/mlx5_txq.c
diff options
authorMatan Azrad <>2019-05-30 10:20:37 +0000
committerFerruh Yigit <>2019-06-14 00:01:06 +0900
commit957e45fb7bcbcfa1dec7ebca2ffe3a811c5bafca (patch)
tree7443cea2f3d4de51b2933a7a3e7be56db625a834 /drivers/net/mlx5/mlx5_txq.c
parent88c0733535d6a7ce79045d4d57a1d78d904067c8 (diff)
net/mlx5: handle Tx completion with error
When WQEs are posted to the HW to send packets, the PMD may get a completion report with error from the HW, aka error CQE which is associated to a bad WQE. The error reason may be bad address, wrong lkey, bad sizes, etc. that can wrongly be configured by the PMD or by the user. Checking all the optional mistakes to prevent error CQEs doesn't make sense due to performance impacts and huge complexity. The error CQEs change the SQ state to error state what causes all the next posted WQEs to be completed with CQE flush error forever. Currently, the PMD doesn't handle Tx error CQEs and even may crashed when one of them appears. Extend the Tx data-path to detect these error CQEs, to report them by the statistics error counters, to recover the SQ by moving the state to ready again and adjusting the management variables appropriately. Sometimes the error CQE root cause is very hard to debug and even may be related to some corner cases which are not reproducible easily, hence a dump file with debug information will be created for the first number of error CQEs, this number can be configured by the PMD probe parameters. Cc: Signed-off-by: Matan Azrad <> Acked-by: Shahaf Shuler <>
Diffstat (limited to 'drivers/net/mlx5/mlx5_txq.c')
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 289024c..ebb42cb 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -430,8 +430,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
- cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
- ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
+ cqe_n = desc / MLX5_TX_COMP_THRESH + 1;
if (is_empw_burst_func(tx_pkt_burst))
tmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
@@ -563,6 +562,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
txq_ibv->cq = tmpl.cq;
txq_ctrl->bf_reg =;
+ txq_ctrl->cqn = cq_info.cqn;
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;