summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnatoly Burakov <anatoly.burakov@intel.com>2018-04-20 16:25:26 +0100
committerThomas Monjalon <thomas@monjalon.net>2018-04-27 23:52:51 +0200
commite82ca1a75edaf4bc5836f61f9c03af14b95a5194 (patch)
tree4ebd85e3ec9e41bb29c54fa7887f25bc1b839adb
parenta99e8df63f8ab4b782dc5249682ab1152ff2965f (diff)
downloaddpdk-e82ca1a75edaf4bc5836f61f9c03af14b95a5194.zip
dpdk-e82ca1a75edaf4bc5836f61f9c03af14b95a5194.tar.gz
dpdk-e82ca1a75edaf4bc5836f61f9c03af14b95a5194.tar.xz
mem: improve preallocation on 32-bit
Previously, if we couldn't preallocate VA space on 32-bit for one page size, we simply bailed out, even though we could've tried allocating VA space with other page sizes. For example, if user had both 1G and 2M pages enabled, and has asked DPDK to allocate memory on both sockets, DPDK would've tried to allocate VA space for 1x1G page on both sockets, failed and never tried again, even though it could've allocated the same 1G of VA space for 512x2M pages. Fix this by retrying with different page sizes if VA space reservation failed. Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com> Tested-by: Jananee Parthasarathy <jananeex.m.parthasarathy@intel.com>
-rw-r--r--lib/librte_eal/common/eal_common_memory.c42
1 files changed, 35 insertions, 7 deletions
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 54329dc..dc46e56 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -142,6 +142,17 @@ get_mem_amount(uint64_t page_sz, uint64_t max_mem)
}
static int
+free_memseg_list(struct rte_memseg_list *msl)
+{
+ if (rte_fbarray_destroy(&msl->memseg_arr)) {
+ RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
+ return -1;
+ }
+ memset(msl, 0, sizeof(*msl));
+ return 0;
+}
+
+static int
alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
uint64_t max_mem, int socket_id, int type_msl_idx)
{
@@ -339,24 +350,41 @@ memseg_primary_init_32(void)
return -1;
}
- msl = &mcfg->memsegs[msl_idx++];
+ msl = &mcfg->memsegs[msl_idx];
if (alloc_memseg_list(msl, hugepage_sz,
max_pagesz_mem, socket_id,
- type_msl_idx))
+ type_msl_idx)) {
+ /* failing to allocate a memseg list is
+ * a serious error.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
return -1;
+ }
+
+ if (alloc_va_space(msl)) {
+ /* if we couldn't allocate VA space, we
+ * can try with smaller page sizes.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
+ /* deallocate memseg list */
+ if (free_memseg_list(msl))
+ return -1;
+ break;
+ }
total_segs += msl->memseg_arr.len;
cur_pagesz_mem = total_segs * hugepage_sz;
type_msl_idx++;
-
- if (alloc_va_space(msl)) {
- RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
- return -1;
- }
+ msl_idx++;
}
cur_socket_mem += cur_pagesz_mem;
}
+ if (cur_socket_mem == 0) {
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
+ socket_id);
+ return -1;
+ }
}
return 0;