summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDamien Millescamps <damien.millescamps@6wind.com>2013-06-12 09:50:20 +0200
committerThomas Monjalon <thomas.monjalon@6wind.com>2013-09-17 14:16:11 +0200
commit215f471268581b6c60ae590ba8092b2dbbdc363f (patch)
tree2c3a47d05f0936a84f02259548ccc7c05d12f034
parentb9bbf657d30b5b59b4b9c2918b221e1ed05331a4 (diff)
downloaddpdk-215f471268581b6c60ae590ba8092b2dbbdc363f.zip
dpdk-215f471268581b6c60ae590ba8092b2dbbdc363f.tar.gz
dpdk-215f471268581b6c60ae590ba8092b2dbbdc363f.tar.xz
mem: get physical address of any pointer
Extract rte_mem_virt2phy() from find_physaddr(). rte_mem_virt2phy() permits to obtain the physical address of any virtual address mapped to the current process calling this function. Note that this function is very slow and shouldn't be called after initialization to avoid a performance bottleneck. The memory must be locked with mlock(). The function rte_mem_lock_page() is a mlock() helper that lock the whole page. A better name would be rte_mem_virt2phys but rte_mem_virt2phy is more consistent with rte_mempool_virt2phy. Signed-off-by: Damien Millescamps <damien.millescamps@6wind.com> Signed-off-by: Thomas Monjalon <thomas.monjalon@6wind.com> Acked-by: Olivier Matz <olivier.matz@6wind.com>
-rw-r--r--lib/librte_eal/common/include/rte_memory.h22
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c87
2 files changed, 72 insertions, 37 deletions
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index ce8500b..48714d4 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -68,6 +68,7 @@ enum rte_page_sizes {
#define __rte_cache_aligned __attribute__((__aligned__(CACHE_LINE_SIZE)))
typedef uint64_t phys_addr_t; /**< Physical address definition. */
+#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
/**
* Physical memory segment descriptor.
@@ -85,6 +86,27 @@ struct rte_memseg {
uint32_t nrank; /**< Number of ranks. */
} __attribute__((__packed__));
+/**
+ * Lock page in physical memory and prevent from swapping.
+ *
+ * @param virt
+ * The virtual address.
+ * @return
+ * 0 on success, negative on error.
+ */
+int rte_mem_lock_page(const void *virt);
+
+/**
+ * Get physical address of any mapped virtual address in the current process.
+ * It is found by browsing the /proc/self/pagemap special file.
+ * The page must be locked.
+ *
+ * @param virt
+ * The virtual address.
+ * @return
+ * The physical address or RTE_BAD_PHYS_ADDR on error.
+ */
+phys_addr_t rte_mem_virt2phy(const void *virt);
/**
* Get the layout of the available physical memory.
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 7438b8f..b70eb76 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -62,6 +62,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#define _FILE_OFFSET_BITS 64
#include <errno.h>
#include <stdarg.h>
#include <stdlib.h>
@@ -306,54 +307,66 @@ unmap_all_hugepages_orig(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
return 0;
}
+/* Lock page in physical memory and prevent from swapping. */
+int
+rte_mem_lock_page(const void *virt)
+{
+ unsigned long virtual = (unsigned long)virt;
+ int page_size = getpagesize();
+ unsigned long aligned = (virtual & ~ (page_size - 1));
+ return mlock((void*)aligned, page_size);
+}
+
/*
- * For each hugepage in hugepg_tbl, fill the physaddr value. We find
- * it by browsing the /proc/self/pagemap special file.
+ * Get physical address of any mapped virtual address in the current process.
*/
-static int
-find_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
+phys_addr_t
+rte_mem_virt2phy(const void *virt)
{
- int fd;
- unsigned i;
+ int fdmem;
uint64_t page;
- unsigned long virt_pfn;
- int page_size;
+ off_t offset;
+ unsigned long virtual = (unsigned long)virt;
+ int page_size = getpagesize();
- /* standard page size */
- page_size = getpagesize();
-
- fd = open("/proc/self/pagemap", O_RDONLY);
- if (fd < 0) {
+ fdmem = open("/proc/self/pagemap", O_RDONLY);
+ if (fdmem < 0) {
RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
- __func__, strerror(errno));
- return -1;
+ __func__, strerror(errno));
+ return RTE_BAD_PHYS_ADDR;
+ }
+ offset = (off_t) (virtual / page_size) * sizeof(uint64_t);
+ if (lseek(fdmem, offset, SEEK_SET) == (off_t) -1) {
+ RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
+ __func__, strerror(errno));
+ close(fdmem);
+ return RTE_BAD_PHYS_ADDR;
+ }
+ if (read(fdmem, &page, sizeof(uint64_t)) <= 0) {
+ RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
+ __func__, strerror(errno));
+ close(fdmem);
+ return RTE_BAD_PHYS_ADDR;
}
+ close (fdmem);
+
+ /* pfn (page frame number) are bits 0-54 (see pagemap.txt in Linux doc) */
+ return ((page & 0x7fffffffffffffULL) * page_size) + (virtual % page_size);
+}
+
+/*
+ * For each hugepage in hugepg_tbl, fill the physaddr value.
+ */
+static int
+find_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
+{
+ unsigned i;
for (i = 0; i < hpi->num_pages[0]; i++) {
- off_t offset;
- virt_pfn = (unsigned long)hugepg_tbl[i].orig_va /
- page_size;
- offset = sizeof(uint64_t) * virt_pfn;
- if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
- RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
- __func__, strerror(errno));
- close(fd);
+ hugepg_tbl[i].physaddr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
+ if (hugepg_tbl[i].physaddr == RTE_BAD_PHYS_ADDR)
return -1;
- }
- if (read(fd, &page, sizeof(uint64_t)) < 0) {
- RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
- __func__, strerror(errno));
- close(fd);
- return -1;
- }
-
- /*
- * the pfn (page frame number) are bits 0-54 (see
- * pagemap.txt in linux Documentation)
- */
- hugepg_tbl[i].physaddr = ((page & 0x7fffffffffffffULL) * page_size);
}
- close(fd);
return 0;
}