summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5_txq.c
diff options
context:
space:
mode:
authorYongseok Koh <yskoh@mellanox.com>2019-04-01 14:12:55 -0700
committerFerruh Yigit <ferruh.yigit@intel.com>2019-04-05 17:45:22 +0200
commit7be600c8d8ef48cbd4b73077923821798190f2f1 (patch)
tree7ab465a93a432a2607afe11f8c431f4850616810 /drivers/net/mlx5/mlx5_txq.c
parent9a8ab29b84d3479512855fdd849679921ef4567a (diff)
downloaddpdk-next-eventdev-7be600c8d8ef48cbd4b73077923821798190f2f1.zip
dpdk-next-eventdev-7be600c8d8ef48cbd4b73077923821798190f2f1.tar.gz
dpdk-next-eventdev-7be600c8d8ef48cbd4b73077923821798190f2f1.tar.xz
net/mlx5: rework PMD global data init
There's more need to have PMD global data structure. This should be initialized once per a process regardless of how many PMD instances are probed. mlx5_init_once() is called during probing and make sure all the init functions are called once per a process. Currently, such global data and its initialization functions are even scattered. Rather than 'extern'-ing such variables and calling such functions one by one making sure it is called only once by checking the validity of such variables, it will be better to have a global storage to hold such data and a consolidated function having all the initializations. The existing shared memory gets more extensively used for this purpose. As there could be multiple secondary processes, a static storage (local to process) is also added. As the reserved virtual address for UAR remap is a PMD global resource, this doesn't need to be stored in the device priv structure, but in the PMD global data. Signed-off-by: Yongseok Koh <yskoh@mellanox.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_txq.c')
-rw-r--r--drivers/net/mlx5/mlx5_txq.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 5062f5c..1b3d89f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -286,7 +286,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
}
}
/* new address in reserved UAR address space. */
- addr = RTE_PTR_ADD(priv->uar_base,
+ addr = RTE_PTR_ADD(mlx5_shared_data->uar_base,
uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1));
if (!already_mapped) {
pages[pages_n++] = uar_va;
@@ -844,9 +844,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
txq->ibv = NULL;
- if (priv->uar_base)
- munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
- page_size), page_size);
+ munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg, page_size),
+ page_size);
if (rte_atomic32_dec_and_test(&txq->refcnt)) {
txq_free_elts(txq);
mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);