summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGage Eads <gage.eads@intel.com>2018-01-09 10:19:35 -0600
committerJerin Jacob <jerin.jacob@caviumnetworks.com>2018-01-19 16:09:56 +0100
commit45219005264946d1a24a7764013a51b0e5c2de9a (patch)
treeeeccd6e6fd04c103f22e63608b96b1e3d6bce171
parentc7aa67f5a9e4a59a816a6506aa87cfb133981315 (diff)
downloaddpdk-45219005264946d1a24a7764013a51b0e5c2de9a.zip
dpdk-45219005264946d1a24a7764013a51b0e5c2de9a.tar.gz
dpdk-45219005264946d1a24a7764013a51b0e5c2de9a.tar.xz
event/sw: remove stale IQ references when reconfigured
This commit fixes a bug in which, when the sw PMD is reconfigured, it would leave stale IQ chunk pointers in each queue's IQ structure. Now, the PMD initializes all IQs at eventdev start time and releases all IQ chunk pointers at eventdev stop time (which has the consequence that any events in a queue when the eventdev is stopped will be lost). This approach should be resilient to any reconfiguration done between the stop and start, such as adding or removing queues. This commit also fixes two potential issues in iq_chunk.h. iq_init() now initializes the IQ's count field to 0, and iq_dequeue_burst() sets iq->head to the appropriate next pointer. Fixes: dca926ca9faa ("event/sw: use dynamically-sized IQs") Reported-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com> Signed-off-by: Gage Eads <gage.eads@intel.com> Reviewed-by: Harry van Haaren <harry.van.haaren@intel.com> Acked-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
-rw-r--r--drivers/event/sw/iq_chunk.h14
-rw-r--r--drivers/event/sw/sw_evdev.c62
2 files changed, 57 insertions, 19 deletions
diff --git a/drivers/event/sw/iq_chunk.h b/drivers/event/sw/iq_chunk.h
index 219bc0e..31d013e 100644
--- a/drivers/event/sw/iq_chunk.h
+++ b/drivers/event/sw/iq_chunk.h
@@ -45,12 +45,24 @@ iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
}
static __rte_always_inline void
+iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
+{
+ while (head) {
+ struct sw_queue_chunk *next;
+ next = head->next;
+ iq_free_chunk(sw, head);
+ head = next;
+ }
+}
+
+static __rte_always_inline void
iq_init(struct sw_evdev *sw, struct sw_iq *iq)
{
iq->head = iq_alloc_chunk(sw);
iq->tail = iq->head;
iq->head_idx = 0;
iq->tail_idx = 0;
+ iq->count = 0;
}
static __rte_always_inline void
@@ -126,7 +138,7 @@ iq_dequeue_burst(struct sw_evdev *sw,
done:
if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
- struct sw_queue_chunk *next = iq->head->next;
+ struct sw_queue_chunk *next = current->next;
iq_free_chunk(sw, current);
iq->head = next;
iq->head_idx = 0;
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index df83f20..52eb7d0 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -217,9 +217,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
char buf[IQ_ROB_NAMESIZE];
struct sw_qid *qid = &sw->qids[idx];
- for (i = 0; i < SW_IQS_MAX; i++)
- iq_init(sw, &qid->iq[i]);
-
/* Initialize the FID structures to no pinning (-1), and zero packets */
const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
for (i = 0; i < RTE_DIM(qid->fids); i++)
@@ -297,11 +294,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
return 0;
cleanup:
- for (i = 0; i < SW_IQS_MAX; i++) {
- if (qid->iq[i].head)
- iq_free_chunk(sw, qid->iq[i].head);
- }
-
if (qid->reorder_buffer) {
rte_free(qid->reorder_buffer);
qid->reorder_buffer = NULL;
@@ -320,13 +312,6 @@ sw_queue_release(struct rte_eventdev *dev, uint8_t id)
{
struct sw_evdev *sw = sw_pmd_priv(dev);
struct sw_qid *qid = &sw->qids[id];
- uint32_t i;
-
- for (i = 0; i < SW_IQS_MAX; i++) {
- if (!qid->iq[i].head)
- continue;
- iq_free_chunk(sw, qid->iq[i].head);
- }
if (qid->type == RTE_SCHED_TYPE_ORDERED) {
rte_free(qid->reorder_buffer);
@@ -360,6 +345,41 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
}
static void
+sw_init_qid_iqs(struct sw_evdev *sw)
+{
+ int i, j;
+
+ /* Initialize the IQ memory of all configured qids */
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct sw_qid *qid = &sw->qids[i];
+
+ if (!qid->initialized)
+ continue;
+
+ for (j = 0; j < SW_IQS_MAX; j++)
+ iq_init(sw, &qid->iq[j]);
+ }
+}
+
+static void
+sw_clean_qid_iqs(struct sw_evdev *sw)
+{
+ int i, j;
+
+ /* Release the IQ memory of all configured qids */
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ struct sw_qid *qid = &sw->qids[i];
+
+ for (j = 0; j < SW_IQS_MAX; j++) {
+ if (!qid->iq[j].head)
+ continue;
+ iq_free_chunk_list(sw, qid->iq[j].head);
+ qid->iq[j].head = NULL;
+ }
+ }
+}
+
+static void
sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
struct rte_event_queue_conf *conf)
{
@@ -406,7 +426,10 @@ sw_dev_configure(const struct rte_eventdev *dev)
num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
sw->qid_count*SW_IQS_MAX*2;
- /* If this is a reconfiguration, free the previous IQ allocation */
+ /* If this is a reconfiguration, free the previous IQ allocation. All
+ * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
+ * will be reinitialized in sw_start().
+ */
if (sw->chunks)
rte_free(sw->chunks);
@@ -642,8 +665,8 @@ sw_start(struct rte_eventdev *dev)
/* check all queues are configured and mapped to ports*/
for (i = 0; i < sw->qid_count; i++)
- if (sw->qids[i].iq[0].head == NULL ||
- sw->qids[i].cq_num_mapped_cqs == 0) {
+ if (!sw->qids[i].initialized ||
+ sw->qids[i].cq_num_mapped_cqs == 0) {
SW_LOG_ERR("Queue %d not configured\n", i);
return -ENOLINK;
}
@@ -664,6 +687,8 @@ sw_start(struct rte_eventdev *dev)
}
}
+ sw_init_qid_iqs(sw);
+
if (sw_xstats_init(sw) < 0)
return -EINVAL;
@@ -677,6 +702,7 @@ static void
sw_stop(struct rte_eventdev *dev)
{
struct sw_evdev *sw = sw_pmd_priv(dev);
+ sw_clean_qid_iqs(sw);
sw_xstats_uninit(sw);
sw->started = 0;
rte_smp_wmb();