summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorJerin Jacob <jerin.jacob@caviumnetworks.com>2017-05-13 14:57:25 +0530
committerThomas Monjalon <thomas@monjalon.net>2017-06-06 17:21:55 +0200
commitc0583d98a9153549383d236e5cc5464bbbe8dd89 (patch)
tree438626ebf57693f0a05d5593d5a093d39ddeb12b /lib
parent4af00fb1a2449f23a7c0fc32159207ae68b0fbbb (diff)
downloaddpdk-c0583d98a9153549383d236e5cc5464bbbe8dd89.zip
dpdk-c0583d98a9153549383d236e5cc5464bbbe8dd89.tar.gz
dpdk-c0583d98a9153549383d236e5cc5464bbbe8dd89.tar.xz
eal: introduce macro for always inline
Different drivers use internal macros like force_inline for compiler always inline feature. Standardizing it through __rte_always_inline macro. Verified the change by comparing the output binary file. No difference found in the output binary file with this change. Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/librte_acl/acl_run_altivec.h4
-rw-r--r--lib/librte_acl/acl_run_avx2.h2
-rw-r--r--lib/librte_acl/acl_run_neon.h6
-rw-r--r--lib/librte_acl/acl_run_sse.h4
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_io_64.h32
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_memcpy.h5
-rw-r--r--lib/librte_eal/common/include/generic/rte_io.h32
-rw-r--r--lib/librte_eal/common/include/rte_common.h5
-rw-r--r--lib/librte_ether/rte_ethdev.h2
-rw-r--r--lib/librte_mbuf/rte_mbuf.h7
-rw-r--r--lib/librte_mempool/rte_mempool.h20
-rw-r--r--lib/librte_net/net_crc_sse.h10
-rw-r--r--lib/librte_net/rte_net_crc.c2
-rw-r--r--lib/librte_port/rte_port_ring.c4
-rw-r--r--lib/librte_ring/rte_ring.h46
-rw-r--r--lib/librte_vhost/rte_vhost.h2
-rw-r--r--lib/librte_vhost/vhost.h8
-rw-r--r--lib/librte_vhost/virtio_net.c30
18 files changed, 113 insertions, 108 deletions
diff --git a/lib/librte_acl/acl_run_altivec.h b/lib/librte_acl/acl_run_altivec.h
index 7d329bc..62fd6a2 100644
--- a/lib/librte_acl/acl_run_altivec.h
+++ b/lib/librte_acl/acl_run_altivec.h
@@ -104,13 +104,13 @@ resolve_priority_altivec(uint64_t transition, int n,
/*
* Check for any match in 4 transitions
*/
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
check_any_match_x4(uint64_t val[])
{
return (val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH;
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
struct acl_flow_data *flows, uint64_t transitions[])
{
diff --git a/lib/librte_acl/acl_run_avx2.h b/lib/librte_acl/acl_run_avx2.h
index b01a46a..804e45a 100644
--- a/lib/librte_acl/acl_run_avx2.h
+++ b/lib/librte_acl/acl_run_avx2.h
@@ -86,7 +86,7 @@ static const rte_ymm_t ymm_range_base = {
* tr_hi contains high 32 bits for 8 transition.
* next_input contains up to 4 input bytes for 8 flows.
*/
-static inline __attribute__((always_inline)) ymm_t
+static __rte_always_inline ymm_t
transition8(ymm_t next_input, const uint64_t *trans, ymm_t *tr_lo, ymm_t *tr_hi)
{
const int32_t *tr;
diff --git a/lib/librte_acl/acl_run_neon.h b/lib/librte_acl/acl_run_neon.h
index d233ff0..dfa38f5 100644
--- a/lib/librte_acl/acl_run_neon.h
+++ b/lib/librte_acl/acl_run_neon.h
@@ -99,13 +99,13 @@ resolve_priority_neon(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
/*
* Check for any match in 4 transitions
*/
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
check_any_match_x4(uint64_t val[])
{
return (val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH;
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
struct acl_flow_data *flows, uint64_t transitions[])
{
@@ -124,7 +124,7 @@ acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
/*
* Process 4 transitions (in 2 NEON Q registers) in parallel
*/
-static inline __attribute__((always_inline)) int32x4_t
+static __rte_always_inline int32x4_t
transition4(int32x4_t next_input, const uint64_t *trans, uint64_t transitions[])
{
int32x4x2_t tr_hi_lo;
diff --git a/lib/librte_acl/acl_run_sse.h b/lib/librte_acl/acl_run_sse.h
index ad40a67..72f66e4 100644
--- a/lib/librte_acl/acl_run_sse.h
+++ b/lib/librte_acl/acl_run_sse.h
@@ -149,7 +149,7 @@ acl_process_matches(xmm_t *indices, int slot, const struct rte_acl_ctx *ctx,
/*
* Check for any match in 4 transitions (contained in 2 SSE registers)
*/
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
struct acl_flow_data *flows, xmm_t *indices1, xmm_t *indices2,
xmm_t match_mask)
@@ -176,7 +176,7 @@ acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
/*
* Process 4 transitions (in 2 XMM registers) in parallel
*/
-static inline __attribute__((always_inline)) xmm_t
+static __rte_always_inline xmm_t
transition4(xmm_t next_input, const uint64_t *trans,
xmm_t *indices1, xmm_t *indices2)
{
diff --git a/lib/librte_eal/common/include/arch/arm/rte_io_64.h b/lib/librte_eal/common/include/arch/arm/rte_io_64.h
index 0402125..e59e22a 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_io_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_io_64.h
@@ -44,7 +44,7 @@ extern "C" {
#include "generic/rte_io.h"
#include "rte_atomic_64.h"
-static inline uint8_t __attribute__((always_inline))
+static __rte_always_inline uint8_t
rte_read8_relaxed(const volatile void *addr)
{
uint8_t val;
@@ -56,7 +56,7 @@ rte_read8_relaxed(const volatile void *addr)
return val;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_read16_relaxed(const volatile void *addr)
{
uint16_t val;
@@ -68,7 +68,7 @@ rte_read16_relaxed(const volatile void *addr)
return val;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
rte_read32_relaxed(const volatile void *addr)
{
uint32_t val;
@@ -80,7 +80,7 @@ rte_read32_relaxed(const volatile void *addr)
return val;
}
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_read64_relaxed(const volatile void *addr)
{
uint64_t val;
@@ -92,7 +92,7 @@ rte_read64_relaxed(const volatile void *addr)
return val;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write8_relaxed(uint8_t val, volatile void *addr)
{
asm volatile(
@@ -101,7 +101,7 @@ rte_write8_relaxed(uint8_t val, volatile void *addr)
: [val] "r" (val), [addr] "r" (addr));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write16_relaxed(uint16_t val, volatile void *addr)
{
asm volatile(
@@ -110,7 +110,7 @@ rte_write16_relaxed(uint16_t val, volatile void *addr)
: [val] "r" (val), [addr] "r" (addr));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write32_relaxed(uint32_t val, volatile void *addr)
{
asm volatile(
@@ -119,7 +119,7 @@ rte_write32_relaxed(uint32_t val, volatile void *addr)
: [val] "r" (val), [addr] "r" (addr));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write64_relaxed(uint64_t val, volatile void *addr)
{
asm volatile(
@@ -128,7 +128,7 @@ rte_write64_relaxed(uint64_t val, volatile void *addr)
: [val] "r" (val), [addr] "r" (addr));
}
-static inline uint8_t __attribute__((always_inline))
+static __rte_always_inline uint8_t
rte_read8(const volatile void *addr)
{
uint8_t val;
@@ -137,7 +137,7 @@ rte_read8(const volatile void *addr)
return val;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_read16(const volatile void *addr)
{
uint16_t val;
@@ -146,7 +146,7 @@ rte_read16(const volatile void *addr)
return val;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
rte_read32(const volatile void *addr)
{
uint32_t val;
@@ -155,7 +155,7 @@ rte_read32(const volatile void *addr)
return val;
}
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_read64(const volatile void *addr)
{
uint64_t val;
@@ -164,28 +164,28 @@ rte_read64(const volatile void *addr)
return val;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write8(uint8_t value, volatile void *addr)
{
rte_io_wmb();
rte_write8_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write16(uint16_t value, volatile void *addr)
{
rte_io_wmb();
rte_write16_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write32(uint32_t value, volatile void *addr)
{
rte_io_wmb();
rte_write32_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write64(uint64_t value, volatile void *addr)
{
rte_io_wmb();
diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
index b9785e8..74c280c 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -44,6 +44,7 @@
#include <stdint.h>
#include <string.h>
#include <rte_vect.h>
+#include <rte_common.h>
#ifdef __cplusplus
extern "C" {
@@ -64,8 +65,8 @@ extern "C" {
* @return
* Pointer to the destination data.
*/
-static inline void *
-rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
+static __rte_always_inline void *
+rte_memcpy(void *dst, const void *src, size_t n);
#ifdef RTE_MACHINE_CPUFLAG_AVX512F
diff --git a/lib/librte_eal/common/include/generic/rte_io.h b/lib/librte_eal/common/include/generic/rte_io.h
index d82ee69..477e7b5 100644
--- a/lib/librte_eal/common/include/generic/rte_io.h
+++ b/lib/librte_eal/common/include/generic/rte_io.h
@@ -264,55 +264,55 @@ rte_write64(uint64_t value, volatile void *addr);
#ifndef RTE_OVERRIDE_IO_H
-static inline uint8_t __attribute__((always_inline))
+static __rte_always_inline uint8_t
rte_read8_relaxed(const volatile void *addr)
{
return *(const volatile uint8_t *)addr;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_read16_relaxed(const volatile void *addr)
{
return *(const volatile uint16_t *)addr;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
rte_read32_relaxed(const volatile void *addr)
{
return *(const volatile uint32_t *)addr;
}
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_read64_relaxed(const volatile void *addr)
{
return *(const volatile uint64_t *)addr;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write8_relaxed(uint8_t value, volatile void *addr)
{
*(volatile uint8_t *)addr = value;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write16_relaxed(uint16_t value, volatile void *addr)
{
*(volatile uint16_t *)addr = value;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write32_relaxed(uint32_t value, volatile void *addr)
{
*(volatile uint32_t *)addr = value;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write64_relaxed(uint64_t value, volatile void *addr)
{
*(volatile uint64_t *)addr = value;
}
-static inline uint8_t __attribute__((always_inline))
+static __rte_always_inline uint8_t
rte_read8(const volatile void *addr)
{
uint8_t val;
@@ -321,7 +321,7 @@ rte_read8(const volatile void *addr)
return val;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_read16(const volatile void *addr)
{
uint16_t val;
@@ -330,7 +330,7 @@ rte_read16(const volatile void *addr)
return val;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
rte_read32(const volatile void *addr)
{
uint32_t val;
@@ -339,7 +339,7 @@ rte_read32(const volatile void *addr)
return val;
}
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_read64(const volatile void *addr)
{
uint64_t val;
@@ -348,28 +348,28 @@ rte_read64(const volatile void *addr)
return val;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write8(uint8_t value, volatile void *addr)
{
rte_io_wmb();
rte_write8_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write16(uint16_t value, volatile void *addr)
{
rte_io_wmb();
rte_write16_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write32(uint32_t value, volatile void *addr)
{
rte_io_wmb();
rte_write32_relaxed(value, addr);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_write64(uint64_t value, volatile void *addr)
{
rte_io_wmb();
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index e057f6e..a9a7494 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -102,6 +102,11 @@ typedef uint16_t unaligned_uint16_t;
*/
#define RTE_SET_USED(x) (void)(x)
+/**
+ * Force a function to be inlined
+ */
+#define __rte_always_inline inline __attribute__((always_inline))
+
/*********** Macros for pointer arithmetic ********/
/**
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 0f38b45..121058c 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3266,7 +3266,7 @@ rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
* causing N packets to be sent, and the error callback to be called for
* the rest.
*/
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
{
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 1cb0310..fe605c7 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -840,7 +840,7 @@ static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
* @param m
* The mbuf to be freed.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mbuf_raw_free(struct rte_mbuf *m)
{
RTE_ASSERT(RTE_MBUF_DIRECT(m));
@@ -1287,8 +1287,7 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
* - (m) if it is the last reference. It can be recycled or freed.
* - (NULL) if the mbuf still has remaining references on it.
*/
-__attribute__((always_inline))
-static inline struct rte_mbuf *
+static __rte_always_inline struct rte_mbuf *
rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 0);
@@ -1339,7 +1338,7 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
* @param m
* The packet mbuf segment to be freed.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_pktmbuf_free_seg(struct rte_mbuf *m)
{
m = rte_pktmbuf_prefree_seg(m);
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 48bc8ea..76b5b3b 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -993,7 +993,7 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache);
* @param mp
* A pointer to the mempool.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_cache_flush(struct rte_mempool_cache *cache,
struct rte_mempool *mp)
{
@@ -1011,7 +1011,7 @@ rte_mempool_cache_flush(struct rte_mempool_cache *cache,
* @return
* A pointer to the mempool cache or NULL if disabled or non-EAL thread.
*/
-static inline struct rte_mempool_cache *__attribute__((always_inline))
+static __rte_always_inline struct rte_mempool_cache *
rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
{
if (mp->cache_size == 0)
@@ -1038,7 +1038,7 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
* The flags used for the mempool creation.
* Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
unsigned n, struct rte_mempool_cache *cache)
{
@@ -1100,7 +1100,7 @@ ring_enqueue:
* The flags used for the mempool creation.
* Single-producer (MEMPOOL_F_SP_PUT flag) or multi-producers.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
unsigned n, struct rte_mempool_cache *cache,
__rte_unused int flags)
@@ -1123,7 +1123,7 @@ rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
* @param n
* The number of objects to add in the mempool from obj_table.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
@@ -1144,7 +1144,7 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
* @param obj
* A pointer to the object to be added.
*/
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
rte_mempool_put(struct rte_mempool *mp, void *obj)
{
rte_mempool_put_bulk(mp, &obj, 1);
@@ -1167,7 +1167,7 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
* - >=0: Success; number of objects supplied.
* - <0: Error; code of ring dequeue function.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
unsigned n, struct rte_mempool_cache *cache)
{
@@ -1248,7 +1248,7 @@ ring_dequeue:
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
struct rte_mempool_cache *cache, __rte_unused int flags)
{
@@ -1281,7 +1281,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
* - 0: Success; objects taken
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
{
struct rte_mempool_cache *cache;
@@ -1309,7 +1309,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
* - 0: Success; objects taken.
* - -ENOENT: Not enough entries in the mempool; no object is retrieved.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_mempool_get(struct rte_mempool *mp, void **obj_p)
{
return rte_mempool_get_bulk(mp, obj_p, 1);
diff --git a/lib/librte_net/net_crc_sse.h b/lib/librte_net/net_crc_sse.h
index 8bce522..ac93637 100644
--- a/lib/librte_net/net_crc_sse.h
+++ b/lib/librte_net/net_crc_sse.h
@@ -73,7 +73,7 @@ struct crc_pclmulqdq_ctx crc16_ccitt_pclmulqdq __rte_aligned(16);
* @return
* New 16 byte folded data
*/
-static inline __attribute__((always_inline)) __m128i
+static __rte_always_inline __m128i
crcr32_folding_round(__m128i data_block,
__m128i precomp,
__m128i fold)
@@ -96,7 +96,7 @@ crcr32_folding_round(__m128i data_block,
* 64 bits reduced data
*/
-static inline __attribute__((always_inline)) __m128i
+static __rte_always_inline __m128i
crcr32_reduce_128_to_64(__m128i data128, __m128i precomp)
{
__m128i tmp0, tmp1, tmp2;
@@ -125,7 +125,7 @@ crcr32_reduce_128_to_64(__m128i data128, __m128i precomp)
* reduced 32 bits data
*/
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
crcr32_reduce_64_to_32(__m128i data64, __m128i precomp)
{
static const uint32_t mask1[4] __rte_aligned(16) = {
@@ -171,7 +171,7 @@ static const uint8_t crc_xmm_shift_tab[48] __rte_aligned(16) = {
* reg << (num * 8)
*/
-static inline __attribute__((always_inline)) __m128i
+static __rte_always_inline __m128i
xmm_shift_left(__m128i reg, const unsigned int num)
{
const __m128i *p = (const __m128i *)(crc_xmm_shift_tab + 16 - num);
@@ -179,7 +179,7 @@ xmm_shift_left(__m128i reg, const unsigned int num)
return _mm_shuffle_epi8(reg, _mm_loadu_si128(p));
}
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
crc32_eth_calc_pclmulqdq(
const uint8_t *data,
uint32_t data_len,
diff --git a/lib/librte_net/rte_net_crc.c b/lib/librte_net/rte_net_crc.c
index 9d1ee63..0391c72 100644
--- a/lib/librte_net/rte_net_crc.c
+++ b/lib/librte_net/rte_net_crc.c
@@ -116,7 +116,7 @@ crc32_eth_init_lut(uint32_t poly,
}
}
-static inline __attribute__((always_inline)) uint32_t
+static __rte_always_inline uint32_t
crc32_eth_calc_lut(const uint8_t *data,
uint32_t data_len,
uint32_t crc,
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 64bd965..a4e709c 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -293,7 +293,7 @@ rte_port_ring_multi_writer_tx(void *port, struct rte_mbuf *pkt)
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_port_ring_writer_tx_bulk_internal(void *port,
struct rte_mbuf **pkts,
uint64_t pkts_mask,
@@ -609,7 +609,7 @@ rte_port_ring_multi_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
struct rte_mbuf **pkts,
uint64_t pkts_mask,
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 97f025a1..e4e910b 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -345,7 +345,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
} \
} while (0)
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
uint32_t single)
{
@@ -383,7 +383,7 @@ update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
@@ -443,7 +443,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
* Actual number of objects enqueued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
int is_sp, unsigned int *free_space)
@@ -489,7 +489,7 @@ end:
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
@@ -548,7 +548,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
* - Actual number of objects dequeued.
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline __attribute__((always_inline)) unsigned int
+static __rte_always_inline unsigned int
__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
int is_sc, unsigned int *available)
@@ -590,7 +590,7 @@ end:
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -613,7 +613,7 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -640,7 +640,7 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @return
* The number of objects enqueued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -662,7 +662,7 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -679,7 +679,7 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -700,7 +700,7 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* - 0: Success; objects enqueued.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
@@ -724,7 +724,7 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -748,7 +748,7 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -775,7 +775,7 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
* @return
* The number of objects dequeued, either 0 or n
*/
-static inline unsigned int __attribute__((always_inline))
+static __rte_always_inline unsigned int
rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
unsigned int *available)
{
@@ -798,7 +798,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
* - -ENOENT: Not enough entries in the ring to dequeue; no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
@@ -816,7 +816,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
@@ -838,7 +838,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
* dequeued.
*/
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
@@ -962,7 +962,7 @@ struct rte_ring *rte_ring_lookup(const char *name);
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -985,7 +985,7 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -1012,7 +1012,7 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects enqueued.
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
unsigned int n, unsigned int *free_space)
{
@@ -1040,7 +1040,7 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -1065,7 +1065,7 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
@@ -1092,7 +1092,7 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
* @return
* - Number of objects dequeued
*/
-static inline unsigned __attribute__((always_inline))
+static __rte_always_inline unsigned
rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
unsigned int n, unsigned int *available)
{
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 605e47c..22d0db2 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -120,7 +120,7 @@ struct vhost_device_ops {
* @return
* the host virtual address on success, 0 on failure
*/
-static inline uint64_t __attribute__((always_inline))
+static __rte_always_inline uint64_t
rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
{
struct rte_vhost_mem_region *reg;
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index ddd8a9c..0f294f3 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -201,13 +201,13 @@ struct virtio_net {
#define VHOST_LOG_PAGE 4096
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_log_page(uint8_t *log_base, uint64_t page)
{
log_base[page / 8] |= 1 << (page % 8);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
{
uint64_t page;
@@ -229,7 +229,7 @@ vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t offset, uint64_t len)
{
@@ -272,7 +272,7 @@ extern uint64_t VHOST_FEATURES;
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
/* Convert guest physical address to host physical address */
-static inline phys_addr_t __attribute__((always_inline))
+static __rte_always_inline phys_addr_t
gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
{
uint32_t i;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 48219e0..b5d8096 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -55,7 +55,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t to, uint16_t from, uint16_t size)
{
@@ -67,7 +67,7 @@ do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
size * sizeof(struct vring_used_elem));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
@@ -95,7 +95,7 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
sizeof(vq->used->idx));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_shadow_used_ring(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint16_t len)
{
@@ -153,7 +153,7 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
}
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
{
@@ -237,7 +237,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
* added to the RX queue. This function works when the mbuf is scattered, but
* it doesn't support the mergeable feature.
*/
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
@@ -335,7 +335,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
return count;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t avail_idx, uint32_t *vec_idx,
struct buf_vector *buf_vec, uint16_t *desc_chain_head,
@@ -424,7 +424,7 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
struct buf_vector *buf_vec, uint16_t num_buffers)
{
@@ -512,7 +512,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
return 0;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
@@ -655,7 +655,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
{
uint16_t l4_proto = 0;
@@ -743,13 +743,13 @@ make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac)
return 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
put_zmbuf(struct zcopy_mbuf *zmbuf)
{
zmbuf->in_use = 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx,
struct rte_mempool *mbuf_pool)
@@ -899,7 +899,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
return 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t used_idx, uint32_t desc_idx)
{
@@ -910,7 +910,7 @@ update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
sizeof(vq->used->ring[used_idx]));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t count)
{
@@ -930,7 +930,7 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
eventfd_write(vq->callfd, (eventfd_t)1);
}
-static inline struct zcopy_mbuf *__attribute__((always_inline))
+static __rte_always_inline struct zcopy_mbuf *
get_zmbuf(struct vhost_virtqueue *vq)
{
uint16_t i;
@@ -961,7 +961,7 @@ again:
return NULL;
}
-static inline bool __attribute__((always_inline))
+static __rte_always_inline bool
mbuf_is_consumed(struct rte_mbuf *m)
{
while (m) {