summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViacheslav Ovsiienko <viacheslavo@mellanox.com>2019-08-05 13:03:52 +0000
committerRaslan Darawsheh <rasland@mellanox.com>2019-08-06 17:42:12 +0200
commitc6f0485646fd0ab49570df2cebd69379f62b349b (patch)
treee70de356359fbd2e26b7bc6d5334e8e53f4ecb1f
parentda1df1ccabade0df6d46e481268657a9993815af (diff)
downloaddpdk-next-eventdev-c6f0485646fd0ab49570df2cebd69379f62b349b.zip
dpdk-next-eventdev-c6f0485646fd0ab49570df2cebd69379f62b349b.tar.gz
dpdk-next-eventdev-c6f0485646fd0ab49570df2cebd69379f62b349b.tar.xz
net/mlx5: fix inline data settings
If the minimal inline data are required the data inline feature must be engaged. There were the incorrect settings enabling the entire small packet inline (in size up to 82B) which may result in sending rate declining if there is no enough cores. The same problem was raised if inline was enabled to support VLAN tag insertion by software. Fixes: 38b4b397a57d ("net/mlx5: add Tx configuration and setup") Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Matan Azrad <matan@mellanox.com>
-rw-r--r--drivers/net/mlx5/mlx5_txq.c39
1 files changed, 18 insertions, 21 deletions
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index fe3b4ec..81f3b40 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -784,13 +784,11 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
!config->hw_vlan_insert;
- if (vlan_inline)
- inlen_send = RTE_MAX(inlen_send, MLX5_ESEG_MIN_INLINE_SIZE);
/*
* If there are few Tx queues it is prioritized
* to save CPU cycles and disable data inlining at all.
*/
- if ((inlen_send && priv->txqs_n >= txqs_inline) || vlan_inline) {
+ if (inlen_send && priv->txqs_n >= txqs_inline) {
/*
* The data sent with ordinal MLX5_OPCODE_SEND
* may be inlined in Ethernet Segment, align the
@@ -825,32 +823,31 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
MLX5_WQE_CSEG_SIZE -
MLX5_WQE_ESEG_SIZE -
MLX5_WQE_DSEG_SIZE * 2);
- txq_ctrl->txq.inlen_send = inlen_send;
- txq_ctrl->txq.inlen_mode = inlen_mode;
- txq_ctrl->txq.inlen_empw = 0;
- } else {
+ } else if (inlen_mode) {
/*
* If minimal inlining is requested we must
* enable inlining in general, despite the
- * number of configured queues.
+ * number of configured queues. Ignore the
+ * txq_inline_max devarg, this is not
+ * full-featured inline.
*/
inlen_send = inlen_mode;
- if (inlen_mode) {
- /*
- * Extend space for inline data to allow
- * optional alignment of data buffer
- * start address, it may improve PCIe
- * performance.
- */
- inlen_send = RTE_MIN(inlen_send + MLX5_WQE_SIZE,
- MLX5_SEND_MAX_INLINE_LEN);
- }
- txq_ctrl->txq.inlen_send = inlen_send;
- txq_ctrl->txq.inlen_mode = inlen_mode;
- txq_ctrl->txq.inlen_empw = 0;
+ inlen_empw = 0;
+ } else if (vlan_inline) {
+ /*
+ * Hardware does not report offload for
+ * VLAN insertion, we must enable data inline
+ * to implement feature by software.
+ */
+ inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
+ inlen_empw = 0;
+ } else {
inlen_send = 0;
inlen_empw = 0;
}
+ txq_ctrl->txq.inlen_send = inlen_send;
+ txq_ctrl->txq.inlen_mode = inlen_mode;
+ txq_ctrl->txq.inlen_empw = 0;
if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
/*
* The data sent with MLX5_OPCODE_ENHANCED_MPSW