summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJin Yu <jin.yu@intel.com>2019-10-10 04:48:34 +0800
committerFerruh Yigit <ferruh.yigit@intel.com>2019-10-20 15:43:27 +0100
commitb5bf156fd806ed600acd896f82724963997b73fb (patch)
tree4fb9ee04274e3b9d26839369ced1a846d4e6341d
parent19ce684f2ba4774e7ca585a5161f1e1e751ea02b (diff)
downloaddpdk-next-net-intel-b5bf156fd806ed600acd896f82724963997b73fb.zip
dpdk-next-net-intel-b5bf156fd806ed600acd896f82724963997b73fb.tar.gz
dpdk-next-net-intel-b5bf156fd806ed600acd896f82724963997b73fb.tar.xz
vhost: add APIs to operate inflight ring
This patch introduces three APIs to operate the inflight ring. Three APIs are set, set last and clear. It includes split and packed ring. Signed-off-by: Lin Li <lilin24@baidu.com> Signed-off-by: Xun Ni <nixun@baidu.com> Signed-off-by: Yu Zhang <zhangyu31@baidu.com> Signed-off-by: Jin Yu <jin.yu@intel.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
-rw-r--r--lib/librte_vhost/rte_vhost.h116
-rw-r--r--lib/librte_vhost/rte_vhost_version.map6
-rw-r--r--lib/librte_vhost/vhost.c273
3 files changed, 395 insertions, 0 deletions
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 8c5b58d..a3df612 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -710,6 +710,122 @@ int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
struct rte_vhost_vring *vring);
/**
+ * Set split inflight descriptor.
+ *
+ * This function save descriptors that has been comsumed in available
+ * ring
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param idx
+ * inflight entry index
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
+ uint16_t idx);
+
+/**
+ * Set packed inflight descriptor and get corresponding inflight entry
+ *
+ * This function save descriptors that has been comsumed
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param head
+ * head of descriptors
+ * @param last
+ * last of descriptors
+ * @param inflight_entry
+ * corresponding inflight entry
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
+ uint16_t head, uint16_t last, uint16_t *inflight_entry);
+
+/**
+ * Save the head of list that the last batch of used descriptors.
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param idx
+ * descriptor entry index
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_set_last_inflight_io_split(int vid,
+ uint16_t vring_idx, uint16_t idx);
+
+/**
+ * Update the inflight free_head, used_idx and used_wrap_counter.
+ *
+ * This function will update status first before updating descriptors
+ * to used
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param head
+ * head of descriptors
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_set_last_inflight_io_packed(int vid,
+ uint16_t vring_idx, uint16_t head);
+
+/**
+ * Clear the split inflight status.
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param last_used_idx
+ * last used idx of used ring
+ * @param idx
+ * inflight entry index
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
+ uint16_t last_used_idx, uint16_t idx);
+
+/**
+ * Clear the packed inflight status.
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param head
+ * inflight entry index
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
+ uint16_t head);
+
+/**
* Notify the guest that used descriptors have been added to the vring. This
* function acts as a memory barrier.
*
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index 5f1d4a7..bc70bfa 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -87,4 +87,10 @@ EXPERIMENTAL {
rte_vdpa_relay_vring_used;
rte_vhost_extern_callback_register;
rte_vhost_driver_set_protocol_features;
+ rte_vhost_set_inflight_desc_split;
+ rte_vhost_set_inflight_desc_packed;
+ rte_vhost_set_last_inflight_io_split;
+ rte_vhost_set_last_inflight_io_packed;
+ rte_vhost_clr_inflight_desc_split;
+ rte_vhost_clr_inflight_desc_packed;
};
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 1f3e1b1..7b07c8a 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -824,6 +824,279 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
}
int
+rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
+ uint16_t idx)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ if (unlikely(!vq->inflight_split))
+ return -1;
+
+ if (unlikely(idx >= vq->size))
+ return -1;
+
+ vq->inflight_split->desc[idx].counter = vq->global_counter++;
+ vq->inflight_split->desc[idx].inflight = 1;
+ return 0;
+}
+
+int
+rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
+ uint16_t head, uint16_t last,
+ uint16_t *inflight_entry)
+{
+ struct rte_vhost_inflight_info_packed *inflight_info;
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+ struct vring_packed_desc *desc;
+ uint16_t old_free_head, free_head;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(!vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ inflight_info = vq->inflight_packed;
+ if (unlikely(!inflight_info))
+ return -1;
+
+ if (unlikely(head >= vq->size))
+ return -1;
+
+ desc = vq->desc_packed;
+ old_free_head = inflight_info->old_free_head;
+ if (unlikely(old_free_head >= vq->size))
+ return -1;
+
+ free_head = old_free_head;
+
+ /* init header descriptor */
+ inflight_info->desc[old_free_head].num = 0;
+ inflight_info->desc[old_free_head].counter = vq->global_counter++;
+ inflight_info->desc[old_free_head].inflight = 1;
+
+ /* save desc entry in flight entry */
+ while (head != ((last + 1) % vq->size)) {
+ inflight_info->desc[old_free_head].num++;
+ inflight_info->desc[free_head].addr = desc[head].addr;
+ inflight_info->desc[free_head].len = desc[head].len;
+ inflight_info->desc[free_head].flags = desc[head].flags;
+ inflight_info->desc[free_head].id = desc[head].id;
+
+ inflight_info->desc[old_free_head].last = free_head;
+ free_head = inflight_info->desc[free_head].next;
+ inflight_info->free_head = free_head;
+ head = (head + 1) % vq->size;
+ }
+
+ inflight_info->old_free_head = free_head;
+ *inflight_entry = old_free_head;
+
+ return 0;
+}
+
+int
+rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
+ uint16_t last_used_idx, uint16_t idx)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ if (unlikely(!vq->inflight_split))
+ return -1;
+
+ if (unlikely(idx >= vq->size))
+ return -1;
+
+ rte_smp_mb();
+
+ vq->inflight_split->desc[idx].inflight = 0;
+
+ rte_smp_mb();
+
+ vq->inflight_split->used_idx = last_used_idx;
+ return 0;
+}
+
+int
+rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
+ uint16_t head)
+{
+ struct rte_vhost_inflight_info_packed *inflight_info;
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(!vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ inflight_info = vq->inflight_packed;
+ if (unlikely(!inflight_info))
+ return -1;
+
+ if (unlikely(head >= vq->size))
+ return -1;
+
+ rte_smp_mb();
+
+ inflight_info->desc[head].inflight = 0;
+
+ rte_smp_mb();
+
+ inflight_info->old_free_head = inflight_info->free_head;
+ inflight_info->old_used_idx = inflight_info->used_idx;
+ inflight_info->old_used_wrap_counter = inflight_info->used_wrap_counter;
+
+ return 0;
+}
+
+int
+rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx,
+ uint16_t idx)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ if (unlikely(!vq->inflight_split))
+ return -1;
+
+ vq->inflight_split->last_inflight_io = idx;
+ return 0;
+}
+
+int
+rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx,
+ uint16_t head)
+{
+ struct rte_vhost_inflight_info_packed *inflight_info;
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+ uint16_t last;
+
+ dev = get_device(vid);
+ if (unlikely(!dev))
+ return -1;
+
+ if (unlikely(!(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
+ return 0;
+
+ if (unlikely(!vq_is_packed(dev)))
+ return -1;
+
+ if (unlikely(vring_idx >= VHOST_MAX_VRING))
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (unlikely(!vq))
+ return -1;
+
+ inflight_info = vq->inflight_packed;
+ if (unlikely(!inflight_info))
+ return -1;
+
+ if (unlikely(head >= vq->size))
+ return -1;
+
+ last = inflight_info->desc[head].last;
+ if (unlikely(last >= vq->size))
+ return -1;
+
+ inflight_info->desc[last].next = inflight_info->free_head;
+ inflight_info->free_head = head;
+ inflight_info->used_idx += inflight_info->desc[head].num;
+ if (inflight_info->used_idx >= inflight_info->desc_num) {
+ inflight_info->used_idx -= inflight_info->desc_num;
+ inflight_info->used_wrap_counter =
+ !inflight_info->used_wrap_counter;
+ }
+
+ return 0;
+}
+
+int
rte_vhost_vring_call(int vid, uint16_t vring_idx)
{
struct virtio_net *dev;