summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnatoly Burakov <anatoly.burakov@intel.com>2018-04-24 11:19:24 +0100
committerThomas Monjalon <thomas@monjalon.net>2018-04-27 23:52:51 +0200
commit1be7644986261377a588f2ee7e1cdcbab4710896 (patch)
tree2d25cb881fc313a0261b0cffd3f94dacd651819a
parente82ca1a75edaf4bc5836f61f9c03af14b95a5194 (diff)
downloaddpdk-1be7644986261377a588f2ee7e1cdcbab4710896.zip
dpdk-1be7644986261377a588f2ee7e1cdcbab4710896.tar.gz
dpdk-1be7644986261377a588f2ee7e1cdcbab4710896.tar.xz
mem: improve autodetection of hugepage counts on 32-bit
For non-legacy mode, we are preallocating space for hugepages, so we know in advance which pages we will be able to allocate, and which we won't. However, the init procedure was using hugepage counts gathered from sysfs and paid no attention to hugepage sizes that were actually available for reservation, and failed on attempts to reserve unavailable pages. Fix this by limiting total page counts by number of pages actually preallocated. Also, VA preallocate procedure only looks at mountpoints that are available, and expects pages to exist if a mountpoint exists. That might not necessarily be the case, so also check if there are hugepages available for a particular page size on a particular NUMA node. Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com> Tested-by: Jananee Parthasarathy <jananeex.m.parthasarathy@intel.com>
-rw-r--r--lib/librte_eal/common/eal_common_memory.c4
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c31
2 files changed, 35 insertions, 0 deletions
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index dc46e56..e29b93b 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -329,6 +329,10 @@ memseg_primary_init_32(void)
hpi = &internal_config.hugepage_info[hpi_idx];
hugepage_sz = hpi->hugepage_sz;
+ /* check if pages are actually available */
+ if (hpi->num_pages[socket_id] == 0)
+ continue;
+
max_segs = RTE_MAX_MEMSEG_PER_TYPE;
max_pagesz_mem = max_socket_mem - cur_socket_mem;
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 9351e84..53e7087 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -1603,6 +1603,18 @@ fail:
return -1;
}
+static int __rte_unused
+hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
+{
+ struct hugepage_info *hpi = arg;
+
+ if (msl->page_sz != hpi->hugepage_sz)
+ return 0;
+
+ hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
+ return 0;
+}
+
static int
eal_hugepage_init(void)
{
@@ -1617,10 +1629,29 @@ eal_hugepage_init(void)
for (hp_sz_idx = 0;
hp_sz_idx < (int) internal_config.num_hugepage_sizes;
hp_sz_idx++) {
+#ifndef RTE_ARCH_64
+ struct hugepage_info dummy;
+ unsigned int i;
+#endif
/* also initialize used_hp hugepage sizes in used_hp */
struct hugepage_info *hpi;
hpi = &internal_config.hugepage_info[hp_sz_idx];
used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+
+#ifndef RTE_ARCH_64
+ /* for 32-bit, limit number of pages on socket to whatever we've
+ * preallocated, as we cannot allocate more.
+ */
+ memset(&dummy, 0, sizeof(dummy));
+ dummy.hugepage_sz = hpi->hugepage_sz;
+ if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)
+ return -1;
+
+ for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
+ hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
+ dummy.num_pages[i]);
+ }
+#endif
}
/* make a copy of socket_mem, needed for balanced allocation. */