summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDamien Millescamps <damien.millescamps@6wind.com>2013-03-01 17:10:57 +0100
committerThomas Monjalon <thomas.monjalon@6wind.com>2013-09-17 14:16:12 +0200
commit27845a464f62fb7cf22685b6d046b2bf0b2acc3a (patch)
treef84a24e834be0d8644eab0b74c07a95053e2fa8f
parent9896f5059f9aa6003681e75530f838194862cb95 (diff)
downloaddpdk-27845a464f62fb7cf22685b6d046b2bf0b2acc3a.zip
dpdk-27845a464f62fb7cf22685b6d046b2bf0b2acc3a.tar.gz
dpdk-27845a464f62fb7cf22685b6d046b2bf0b2acc3a.tar.xz
mem: fix mempool for --no-huge
In --no-huge mode, mempool provides objects with their associated header/trailer fitting in a standard page (usually 4KB). This means all non-UIO driver should work correctly in this mode, since UIO drivers allocate ring sizes that cannot fit in a page. Extend rte_mempool_virt2phy to obtain the correct physical address when elements of the pool are not on the same physically contiguous memory region. This is a first step for enhancement PR #29696. Reason for this patch is to be able to run on a kernel < 2.6.37 without the need to patch it, since all kernel below are either bugged or don't have huge page support at all (< 2.6.28). Signed-off-by: Damien Millescamps <damien.millescamps@6wind.com> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c2
-rw-r--r--lib/librte_mempool/rte_mempool.c54
-rw-r--r--lib/librte_mempool/rte_mempool.h20
3 files changed, 67 insertions, 9 deletions
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 8fb53b3..9cac99b 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -725,7 +725,7 @@ rte_eal_hugepage_init(void)
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
- /* for debug purposes, hugetlbfs can be disabled */
+ /* hugetlbfs can be disabled */
if (internal_config.no_hugetlbfs) {
addr = malloc(internal_config.memory);
mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 5032ca0..e2de4cd 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -36,6 +36,7 @@
#include <string.h>
#include <stdint.h>
#include <stdarg.h>
+#include <unistd.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/queue.h>
@@ -139,6 +140,8 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
uint32_t header_size, trailer_size;
unsigned i;
void *obj;
+ void *startaddr;
+ int page_size = getpagesize();
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
@@ -227,6 +230,20 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
trailer_size);
trailer_size = new_size - header_size - elt_size;
}
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * compute trailer size so that pool elements fit exactly in
+ * a standard page
+ */
+ int new_size = page_size - header_size - elt_size;
+ if (new_size < 0 || (unsigned int)new_size < trailer_size) {
+ printf("When hugepages are disabled, pool objects "
+ "can't exceed PAGE_SIZE: %d + %d + %d > %d\n",
+ header_size, elt_size, trailer_size, page_size);
+ return NULL;
+ }
+ trailer_size = new_size;
+ }
/* this is the size of an object, including header and trailer */
total_elt_size = header_size + elt_size + trailer_size;
@@ -235,8 +252,31 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* cache-aligned */
private_data_size = (private_data_size +
CACHE_LINE_MASK) & (~CACHE_LINE_MASK);
+
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * expand private data size to a whole page, so that the
+ * first pool element will start on a new standard page
+ */
+ int head = sizeof(struct rte_mempool);
+ int new_size = (private_data_size + head) % page_size;
+ if (new_size) {
+ private_data_size += page_size - new_size;
+ }
+ }
+
mempool_size = total_elt_size * n +
sizeof(struct rte_mempool) + private_data_size;
+
+ if (! rte_eal_has_hugepages()) {
+ /*
+ * we want the memory pool to start on a page boundary,
+ * because pool elements crossing page boundaries would
+ * result in discontiguous physical addresses
+ */
+ mempool_size += page_size;
+ }
+
rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", name);
mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
@@ -248,8 +288,20 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
if (mz == NULL)
goto exit;
+ if (rte_eal_has_hugepages()) {
+ startaddr = (void*)mz->addr;
+ } else {
+ /* align memory pool start address on a page boundary */
+ unsigned long addr = (unsigned long)mz->addr;
+ if (addr & (page_size - 1)) {
+ addr += page_size;
+ addr &= ~(page_size - 1);
+ }
+ startaddr = (void*)addr;
+ }
+
/* init the mempool structure */
- mp = mz->addr;
+ mp = startaddr;
memset(mp, 0, sizeof(*mp));
rte_snprintf(mp->name, sizeof(mp->name), "%s", name);
mp->phys_addr = mz->phys_addr;
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index a85341d..2df376e 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -955,16 +955,22 @@ rte_mempool_empty(const struct rte_mempool *mp)
* @return
* The physical address of the elt element.
*/
-static inline phys_addr_t rte_mempool_virt2phy(const struct rte_mempool *mp,
- const void *elt)
+static inline phys_addr_t
+rte_mempool_virt2phy(const struct rte_mempool *mp, const void *elt)
{
- uintptr_t off;
-
- off = (const char *)elt - (const char *)mp;
- return mp->phys_addr + off;
+ if (rte_eal_has_hugepages()) {
+ uintptr_t offset = (const char *)elt - (const char *)mp;
+ return mp->phys_addr + offset;
+ } else {
+ /*
+ * If huge pages are disabled, we cannot assume the
+ * memory region to be physically contiguous.
+ * Lookup for each element.
+ */
+ return rte_mem_virt2phy(elt);
+ }
}
-
/**
* Check the consistency of mempool objects.
*