summaryrefslogtreecommitdiff
path: root/lib/librte_kni/rte_kni.c
diff options
context:
space:
mode:
authorGowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>2017-05-11 17:21:26 +0530
committerThomas Monjalon <thomas@monjalon.net>2017-07-01 12:54:51 +0200
commit49da4e82cf94a5b7d8fc611c2ca16433ef52881c (patch)
treec24a7b5b188731d0d1e6097af3731fbce8fefab8 /lib/librte_kni/rte_kni.c
parent814baffdb6910c1a949bd7c5ce3c6807f703c799 (diff)
downloaddpdk-49da4e82cf94a5b7d8fc611c2ca16433ef52881c.zip
dpdk-49da4e82cf94a5b7d8fc611c2ca16433ef52881c.tar.gz
dpdk-49da4e82cf94a5b7d8fc611c2ca16433ef52881c.tar.xz
kni: allocate no more mbuf than empty slots in queue
In kni_allocate_mbufs(), we attempt to add max_burst (32) count of mbuf always into alloc_q, which is excessively leading too many rte_pktmbuf_ free() when alloc_q is contending at high packet rate (for eg 10Gig data). In a situation when alloc_q fifo can only accommodate very few (or zero) mbuf, create only what needed and add in fifo. With this patch, we could stop random network stall in KNI at higher packet rate (eg 1G or 10G data between vEth0 and PMD) sufficiently exhausting alloc_q on above condition. I tested i40e PMD for this purpose in ppc64le. Signed-off-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
Diffstat (limited to 'lib/librte_kni/rte_kni.c')
-rw-r--r--lib/librte_kni/rte_kni.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 40288a1..8c483c1 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -624,6 +624,7 @@ kni_allocate_mbufs(struct rte_kni *kni)
int i, ret;
struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
void *phys[MAX_MBUF_BURST_NUM];
+ int allocq_free;
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
offsetof(struct rte_kni_mbuf, pool));
@@ -646,7 +647,9 @@ kni_allocate_mbufs(struct rte_kni *kni)
return;
}
- for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
+ allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) \
+ & (MAX_MBUF_BURST_NUM - 1);
+ for (i = 0; i < allocq_free; i++) {
pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
if (unlikely(pkts[i] == NULL)) {
/* Out of memory */