summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5_txq.c
diff options
context:
space:
mode:
authorShahaf Shuler <shahafs@mellanox.com>2017-09-14 13:50:39 +0300
committerFerruh Yigit <ferruh.yigit@intel.com>2017-10-06 02:49:48 +0200
commit883ce1724b652d8da1cacdaf8409580b24d6a33d (patch)
tree931e290a7b2f9815bef0de65e8911d75f0727a2d /drivers/net/mlx5/mlx5_txq.c
parent24c14430cdc4556a30a1e608f67230e881718f7f (diff)
downloaddpdk-next-eventdev-883ce1724b652d8da1cacdaf8409580b24d6a33d.zip
dpdk-next-eventdev-883ce1724b652d8da1cacdaf8409580b24d6a33d.tar.gz
dpdk-next-eventdev-883ce1724b652d8da1cacdaf8409580b24d6a33d.tar.xz
net/mlx5: enforce Tx num of segments limitation
Mellanox NICs has a limitation on the number of mbuf segments a multi segment mbuf can have. The max number depends on the Tx offloads requested. The current code not enforce such limitation, which might cause malformed work requests to be written to the device. This commit adds verification for the number of mbuf segments posted to the device. In case of overflow the packet will not be sent. In addition update the nic documentation with the limitation. Considering device limitation is 63 data segments in a work request, the maximum number of segment in mbuf was calculated taking TSO as the worst case: max_nb_segs = 63 - (control_segment + ethernet segment + TSO headers inline + inline segment + extra inline to align to cacheline) Cc: stable@dpdk.org Signed-off-by: Shahaf Shuler <shahafs@mellanox.com> Acked-by: Yongseok Koh <yskoh@mellanox.com> Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_txq.c')
-rw-r--r--drivers/net/mlx5/mlx5_txq.c24
1 files changed, 24 insertions, 0 deletions
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4b0b532..b4c5b10 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -288,6 +288,8 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
.comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
};
if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
+ unsigned int ds_cnt;
+
tmpl.txq.max_inline =
((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
@@ -320,6 +322,28 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
attr.init.cap.max_inline_data =
tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
}
+ /*
+ * Check if the inline size is too large in a way which
+ * can make the WQE DS to overflow.
+ * Considering in calculation:
+ * WQE CTRL (1 DS)
+ * WQE ETH (1 DS)
+ * Inline part (N DS)
+ */
+ ds_cnt = 2 +
+ (attr.init.cap.max_inline_data / MLX5_WQE_DWORD_SIZE);
+ if (ds_cnt > MLX5_DSEG_MAX) {
+ unsigned int max_inline = (MLX5_DSEG_MAX - 2) *
+ MLX5_WQE_DWORD_SIZE;
+
+ max_inline = max_inline - (max_inline %
+ RTE_CACHE_LINE_SIZE);
+ WARN("txq inline is too large (%d) setting it to "
+ "the maximum possible: %d\n",
+ priv->txq_inline, max_inline);
+ tmpl.txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
+ attr.init.cap.max_inline_data = max_inline;
+ }
}
if (priv->tso) {
attr.init.max_tso_header =