summaryrefslogtreecommitdiff
path: root/examples/load_balancer
diff options
context:
space:
mode:
authorBruce Richardson <bruce.richardson@intel.com>2017-03-29 16:21:23 +0100
committerThomas Monjalon <thomas.monjalon@6wind.com>2017-03-29 22:25:37 +0200
commitcfa7c9e6fc1f7b248d8f250966851bdd19d7b9c2 (patch)
tree7a90acd3cde39e289628d6d4993216b7513f2981 /examples/load_balancer
parent77dd3064270c1fbb930aaecec70492c9e96ec404 (diff)
downloaddpdk-draft-windows-cfa7c9e6fc1f7b248d8f250966851bdd19d7b9c2.zip
dpdk-draft-windows-cfa7c9e6fc1f7b248d8f250966851bdd19d7b9c2.tar.gz
dpdk-draft-windows-cfa7c9e6fc1f7b248d8f250966851bdd19d7b9c2.tar.xz
ring: make bulk and burst return values consistent
The bulk fns for rings returns 0 for all elements enqueued and negative for no space. Change that to make them consistent with the burst functions in returning the number of elements enqueued/dequeued, i.e. 0 or N. This change also allows the return value from enq/deq to be used directly without a branch for error checking. Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> Reviewed-by: Yuanhan Liu <yuanhan.liu@linux.intel.com> Acked-by: Olivier Matz <olivier.matz@6wind.com>
Diffstat (limited to 'examples/load_balancer')
-rw-r--r--examples/load_balancer/runtime.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 6944325..82b10bc 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -146,7 +146,7 @@ app_lcore_io_rx_buffer_to_send (
(void **) lp->rx.mbuf_out[worker].array,
bsz);
- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -312,7 +312,7 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
(void **) lp->rx.mbuf_out[worker].array,
lp->rx.mbuf_out[worker].n_mbufs);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -349,9 +349,8 @@ app_lcore_io_tx(
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
bsz_rd);
- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }
n_mbufs += bsz_rd;
@@ -505,9 +504,8 @@ app_lcore_worker(
(void **) lp->mbuf_in.array,
bsz_rd);
- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }
#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@ -559,7 +557,7 @@ app_lcore_worker(
#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@ -572,7 +570,7 @@ app_lcore_worker(
}
#endif
- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -609,7 +607,7 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
(void **) lp->mbuf_out[port].array,
lp->mbuf_out[port].n_mbufs);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];