summaryrefslogtreecommitdiff
path: root/drivers/net/mlx5/mlx5.h
diff options
context:
space:
mode:
authorViacheslav Ovsiienko <viacheslavo@mellanox.com>2019-04-27 04:32:56 +0000
committerFerruh Yigit <ferruh.yigit@intel.com>2019-05-03 18:45:23 +0200
commitab3cffcfc2b6d7612daa2e7048459e25650f56fc (patch)
tree3078bd159fcd5d6fcd10b3fdfecbc81e83d8185b /drivers/net/mlx5/mlx5.h
parent0333b2f584d95577681ea88e4238be6cb4369569 (diff)
downloaddpdk-ab3cffcfc2b6d7612daa2e7048459e25650f56fc.zip
dpdk-ab3cffcfc2b6d7612daa2e7048459e25650f56fc.tar.gz
dpdk-ab3cffcfc2b6d7612daa2e7048459e25650f56fc.tar.xz
net/mlx5: share Memory Regions for multiport device
The multiport Infiniband device support was introduced [1]. All active ports, belonging to the same Infiniband device use the single shared Infiniband context of that device and share the resources: - QPs are created within shared context - Verbs flows are also created with specifying port index - DV/DR resources - Protection Domain - Event Handlers This patchset adds support for Memory Regions sharing between ports, created on the base of multiport Infiniband device. The datapath of mlx5 uses the layered cache subsystem for allocating/releasing Memory Regions, only the lowest layer L3 is subject to share due to performance issues. [1] http://patches.dpdk.org/cover/51800/ Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com> Acked-by: Yongseok Koh <yskoh@mellanox.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5.h')
-rw-r--r--drivers/net/mlx5/mlx5.h15
1 files changed, 8 insertions, 7 deletions
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0a6d7f1..2575732 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -275,6 +275,14 @@ struct mlx5_ibv_shared {
char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
struct ibv_device_attr_ex device_attr; /* Device properties. */
+ struct rte_pci_device *pci_dev; /* Backend PCI device. */
+ struct {
+ uint32_t dev_gen; /* Generation number to flush local caches. */
+ rte_rwlock_t rwlock; /* MR Lock. */
+ struct mlx5_mr_btree cache; /* Global MR cache table. */
+ struct mlx5_mr_list mr_list; /* Registered MR list. */
+ struct mlx5_mr_list mr_free_list; /* Freed MR list. */
+ } mr;
/* Shared DV/DR flow data section. */
pthread_mutex_t dv_mutex; /* DV context mutex. */
uint32_t dv_refcnt; /* DV/DR data reference counter. */
@@ -347,13 +355,6 @@ struct mlx5_priv {
struct mlx5_flows ctrl_flows; /* Control flow rules. */
LIST_HEAD(counters, mlx5_flow_counter) flow_counters;
/* Flow counters. */
- struct {
- uint32_t dev_gen; /* Generation number to flush local caches. */
- rte_rwlock_t rwlock; /* MR Lock. */
- struct mlx5_mr_btree cache; /* Global MR cache table. */
- struct mlx5_mr_list mr_list; /* Registered MR list. */
- struct mlx5_mr_list mr_free_list; /* Freed MR list. */
- } mr;
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */