summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5_rxtx_vec.h
diff options
context:
space:
mode:
authorYongseok Koh <yskoh@mellanox.com>2017-10-10 07:04:02 -0700
committerFerruh Yigit <ferruh.yigit@intel.com>2017-10-12 01:52:49 +0100
commit03e0868b4cd779e44ef5550fcfee603d3acd017a (patch)
treec109be019a682f2debeb4a8d71f701da4e2be382 /drivers/net/mlx5/mlx5_rxtx_vec.h
parent47cf8373a1792b4a2c49cc03994f479fd4a693a2 (diff)
downloaddpdk-next-eventdev-03e0868b4cd779e44ef5550fcfee603d3acd017a.zip
dpdk-next-eventdev-03e0868b4cd779e44ef5550fcfee603d3acd017a.tar.gz
dpdk-next-eventdev-03e0868b4cd779e44ef5550fcfee603d3acd017a.tar.xz
net/mlx5: fix deadlock due to buffered slots in Rx SW ring
When replenishing Rx ring, there're always buffered slots reserved between consumed entries and HW owned entries. These have to be filled with fake mbufs to protect from possible overflow rather than optimistically expecting successful replenishment which can cause deadlock with small-sized queue. Fixes: fc048bd52cb7 ("net/mlx5: fix overflow of Rx SW ring") Cc: stable@dpdk.org Reported-by: Martin Weiser <martin.weiser@allegro-packets.com> Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Tested-by: Martin Weiser <martin.weiser@allegro-packets.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_rxtx_vec.h')
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 4261690..1f08ed0 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -101,7 +101,7 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
{
const uint16_t q_n = 1 << rxq->elts_n;
const uint16_t q_mask = q_n - 1;
- const uint16_t elts_idx = rxq->rq_ci & q_mask;
+ uint16_t elts_idx = rxq->rq_ci & q_mask;
struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
unsigned int i;
@@ -119,6 +119,10 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
RTE_PKTMBUF_HEADROOM);
rxq->rq_ci += n;
+ /* Prevent overflowing into consumed mbufs. */
+ elts_idx = rxq->rq_ci & q_mask;
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
rte_io_wmb();
*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
}