summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5_rxtx.c
diff options
context:
space:
mode:
authorYongseok Koh <yskoh@mellanox.com>2017-12-26 19:55:46 -0800
committerFerruh Yigit <ferruh.yigit@intel.com>2018-01-16 18:47:49 +0100
commit4b0d7b7fff7faef30bd19a9f2c33d5a85193bfac (patch)
tree5fecc40a90d2b4d3633f8c260fb0c39714830236 /drivers/net/mlx5/mlx5_rxtx.c
parent2eefbec531c7abf4c9c668f7941b285858c06733 (diff)
downloaddpdk-4b0d7b7fff7faef30bd19a9f2c33d5a85193bfac.zip
dpdk-4b0d7b7fff7faef30bd19a9f2c33d5a85193bfac.tar.gz
dpdk-4b0d7b7fff7faef30bd19a9f2c33d5a85193bfac.tar.xz
net/mlx5: add fallback in Tx for multi-segment packet
mlx5_tx_burst_empw() falls back to legacy Tx descriptor for multi-segmented packets without taking advantage of inlining. In many cases, the 1st segment can be inlined and this could make device fetch only one segment instead of two. This helps saving PCIe bandwidth when transmitting out multi-segmented packets with still using the Enhanced Multi-Packet Send for other packets. Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_rxtx.c')
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c53
1 files changed, 47 insertions, 6 deletions
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 7c9d182..d5e32b8 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1304,10 +1304,10 @@ mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
}
/**
- * DPDK callback for TX with Enhanced MPW support.
+ * TX with Enhanced MPW support.
*
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
+ * @param txq
+ * Pointer to TX queue structure.
* @param[in] pkts
* Packets to transmit.
* @param pkts_n
@@ -1316,10 +1316,10 @@ mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
* @return
* Number of packets successfully transmitted (<= pkts_n).
*/
-uint16_t
-mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+static inline uint16_t
+txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
const uint16_t elts_m = elts_n - 1;
@@ -1585,6 +1585,47 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
/**
+ * DPDK callback for TX with Enhanced MPW support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
+ uint16_t nb_tx = 0;
+
+ while (pkts_n > nb_tx) {
+ uint16_t n;
+ uint16_t ret;
+
+ n = txq_count_contig_multi_seg(&pkts[nb_tx], pkts_n - nb_tx);
+ if (n) {
+ ret = mlx5_tx_burst(dpdk_txq, &pkts[nb_tx], n);
+ if (!ret)
+ break;
+ nb_tx += ret;
+ }
+ n = txq_count_contig_single_seg(&pkts[nb_tx], pkts_n - nb_tx);
+ if (n) {
+ ret = txq_burst_empw(txq, &pkts[nb_tx], n);
+ if (!ret)
+ break;
+ nb_tx += ret;
+ }
+ }
+ return nb_tx;
+}
+
+/**
* Translate RX completion flags to packet type.
*
* @param[in] cqe