summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViacheslav Ovsiienko <viacheslavo@mellanox.com>2019-03-27 13:15:41 +0000
committerFerruh Yigit <ferruh.yigit@intel.com>2019-03-29 17:25:32 +0100
commit1b782252cb21741fd5adf53068904979cf8189d8 (patch)
treedcc4ec0723c8dd09a6c43758ab5303743f5d5718
parent9c0a9eed37f17c84b92a75071e1df2de3d5d16d6 (diff)
downloaddpdk-next-eventdev-1b782252cb21741fd5adf53068904979cf8189d8.zip
dpdk-next-eventdev-1b782252cb21741fd5adf53068904979cf8189d8.tar.gz
dpdk-next-eventdev-1b782252cb21741fd5adf53068904979cf8189d8.tar.xz
net/mlx5: switch to the shared protection domain
The PMD code is updated to use Protected Domain from the shared IB device context. The Domain is shared between all devices belonging to the same multiport Infiniband device. If IB device has only one port, the PD is not shared, because there is only ethernet device created over IB one. Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Shahaf Shuler <shahafs@mellanox.com>
-rw-r--r--drivers/net/mlx5/mlx5.c1
-rw-r--r--drivers/net/mlx5/mlx5.h1
-rw-r--r--drivers/net/mlx5/mlx5_mr.c4
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c10
-rw-r--r--drivers/net/mlx5/mlx5_txq.c2
5 files changed, 8 insertions, 10 deletions
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ac6e841..6313824 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1098,7 +1098,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->ctx = sh->ctx;
priv->ibv_port = spawn->ibv_port;
priv->device_attr = sh->device_attr;
- priv->pd = sh->pd;
priv->mtu = ETHER_MTU;
#ifndef RTE_ARCH_64
/* Initialize UAR access locks for 32bit implementations. */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 56270a6..4213866 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -225,7 +225,6 @@ struct mlx5_priv {
uint32_t ibv_port; /* IB device port number. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr_ex device_attr; /* Device properties. */
- struct ibv_pd *pd; /* Protection Domain. */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
/* Bit-field of MAC addresses owned by the PMD. */
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 21f8b5e..0f0a64f 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -720,7 +720,7 @@ alloc_resources:
* mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
* through mlx5_alloc_verbs_buf().
*/
- mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
DEBUG("port %u fail to create a verbs MR for address (%p)",
@@ -1138,7 +1138,7 @@ mlx5_create_mr_ext(struct rte_eth_dev *dev, uintptr_t addr, size_t len,
RTE_CACHE_LINE_SIZE, socket_id);
if (mr == NULL)
return NULL;
- mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len,
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len,
IBV_ACCESS_LOCAL_WRITE);
if (mr->ibv_mr == NULL) {
DRV_LOG(WARNING,
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 2f60999..0496c4e 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -867,7 +867,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
.max_wr = wqe_n >> rxq_data->sges_n,
/* Max number of scatter/gather elements in a WR. */
.max_sge = 1 << rxq_data->sges_n,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
.cq = tmpl->cq,
.comp_mask =
IBV_WQ_FLAGS_CVLAN_STRIPPING |
@@ -1831,7 +1831,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
},
&qp_init_attr);
#else
@@ -1850,7 +1850,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
});
#endif
if (!qp) {
@@ -2006,7 +2006,7 @@ mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
.cq = cq,
});
if (!wq) {
@@ -2160,7 +2160,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
.rx_hash_fields_mask = 0,
},
.rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->pd
+ .pd = priv->sh->pd
});
if (!qp) {
DEBUG("port %u cannot allocate QP for drop queue",
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index d185617..d3a5498 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -426,7 +426,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
* Tx burst.
*/
.sq_sig_all = 0,
- .pd = priv->pd,
+ .pd = priv->sh->pd,
.comp_mask = IBV_QP_INIT_ATTR_PD,
};
if (txq_data->max_inline)