summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5_rxtx.h
diff options
context:
space:
mode:
authorYongseok Koh <yskoh@mellanox.com>2018-09-24 18:36:47 +0000
committerFerruh Yigit <ferruh.yigit@intel.com>2018-10-11 18:53:49 +0200
commit7e43a32ee06054e7af9de6b9830b61b783df063b (patch)
treef2d0ad730f97d7ffa9f311151323db9c2102a945 /drivers/net/mlx5/mlx5_rxtx.h
parent31912d9924039c3a4f58e1bb00f380e5b4c7bd81 (diff)
downloaddpdk-7e43a32ee06054e7af9de6b9830b61b783df063b.zip
dpdk-7e43a32ee06054e7af9de6b9830b61b783df063b.tar.gz
dpdk-7e43a32ee06054e7af9de6b9830b61b783df063b.tar.xz
net/mlx5: support externally allocated static memory
When MLX PMD registers memory for DMA, it accesses the global memseg list of DPDK to maximize the range of registration so that LKey search can be more efficient. Granularity of MR registration is per page. Externally allocated memory shouldn't be used for DMA because it can't be searched in the memseg list and free event can't be tracked by DPDK. If it is used, the following error will occur: net_mlx5: port 0 unable to find virtually contiguous chunk for address (0x5600017587c0). rte_memseg_contig_walk() failed. There's a pending patchset [1] which enables externally allocated memory. Once it is merged, users can register their own memory out of EAL then that will resolve this issue. Meanwhile, if the external memory is static (allocated on startup and never freed), such memory can also be registered by little tweak in the code. [1] http://patches.dpdk.org/project/dpdk/list/?series=1415 This patch is not a bug fix but needs to be included in stable versions. Fixes: 974f1e7ef146 ("net/mlx5: add new memory region support") Cc: stable@dpdk.org Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_rxtx.h')
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h35
1 files changed, 34 insertions, 1 deletions
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d225b9c..1db468c 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -363,6 +363,8 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+ struct rte_mempool *mp);
/**
* Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
@@ -607,6 +609,24 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
}
/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
+ * cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx5_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
+/**
* Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
* as mempool is pre-configured and static.
*
@@ -664,7 +684,20 @@ mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
return mlx5_tx_addr2mr_bh(txq, addr);
}
-#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+static __rte_always_inline uint32_t
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey = mlx5_tx_addr2mr(txq, addr);
+
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ if (rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+ }
+ return lkey;
+}
/**
* Ring TX queue doorbell and flush the update if requested.