summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5_rxtx_vec.h
diff options
context:
space:
mode:
authorYongseok Koh <yskoh@mellanox.com>2018-06-26 04:33:35 -0700
committerFerruh Yigit <ferruh.yigit@intel.com>2018-07-03 01:35:58 +0200
commite10245a13b2e340f48ce80484f19bcbc13e9ebe6 (patch)
tree8f0de8d640641bcdaab729683b5733842c0b97cd /drivers/net/mlx5/mlx5_rxtx_vec.h
parent342a7bdd6e413cbb360289811601c516ec8b56a0 (diff)
downloaddpdk-next-eventdev-e10245a13b2e340f48ce80484f19bcbc13e9ebe6.zip
dpdk-next-eventdev-e10245a13b2e340f48ce80484f19bcbc13e9ebe6.tar.gz
dpdk-next-eventdev-e10245a13b2e340f48ce80484f19bcbc13e9ebe6.tar.xz
net/mlx5: fix Rx buffer replenishment threshold
The threshold of buffer replenishment for vectorized Rx burst is a constant value (64). If the size of Rx queue is comparatively small, device could run out of buffers. For example, if the size of Rx queue is 128, buffers are replenished only twice per a wraparound. This can cause jitter in receiving packets and the jitter can cause unnecessary retransmission for TCP connections. Fixes: 6cb559d67b83 ("net/mlx5: add vectorized Rx/Tx burst for x86") Fixes: 570acdb1da8a ("net/mlx5: add vectorized Rx/Tx burst for ARM") Cc: stable@dpdk.org Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_rxtx_vec.h')
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 598dc75..fb884f9 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -91,9 +91,9 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
unsigned int i;
- assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
- assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {