summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Hemminger <stephen@networkplumber.org>2018-01-25 18:01:37 -0800
committerFerruh Yigit <ferruh.yigit@intel.com>2018-03-30 14:08:43 +0200
commitff2863570fccaa9f31db3c91d0f40a5532a198c7 (patch)
treedac7c61bb5c4ad14f1594a3f6c16cb4ac15c9a82
parentadeb2a2d572bdadb44466b874556682f480681dc (diff)
downloaddpdk-next-eventdev-ff2863570fcc.zip
dpdk-next-eventdev-ff2863570fcc.tar.gz
dpdk-next-eventdev-ff2863570fcc.tar.xz
eal: introduce atomic exchange operation
To handle atomic update of link status (64 bit), every driver was doing its own version using cmpset. Atomic exchange is a useful primitive in its own right; therefore make it a EAL routine. Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h21
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_atomic.h24
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_atomic_32.h12
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_atomic_64.h12
-rw-r--r--lib/librte_eal/common/include/generic/rte_atomic.h78
5 files changed, 146 insertions, 1 deletions
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
index 1821774..ce38350 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
@@ -136,6 +136,12 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
}
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+ return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
+}
+
/*------------------------- 32 bit atomic operations -------------------------*/
static inline int
@@ -237,6 +243,13 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
return ret == 0;
}
+
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+}
+
/*------------------------- 64 bit atomic operations -------------------------*/
static inline int
@@ -431,7 +444,6 @@ static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
{
return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
}
-
/**
* Atomically set a 64-bit counter to 0.
*
@@ -442,6 +454,13 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v)
{
v->cnt = 0;
}
+
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+}
+
#endif
#ifdef __cplusplus
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic.h b/lib/librte_eal/common/include/arch/x86/rte_atomic.h
index 5cfd383..148398f 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_atomic.h
@@ -104,6 +104,18 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
return res;
}
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+ asm volatile(
+ MPLOCKED
+ "xchgw %0, %1;"
+ : "=r" (val), "=m" (*dst)
+ : "0" (val), "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return val;
+}
+
static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
{
return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
@@ -178,6 +190,18 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
return res;
}
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+ asm volatile(
+ MPLOCKED
+ "xchgl %0, %1;"
+ : "=r" (val), "=m" (*dst)
+ : "0" (val), "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return val;
+}
+
static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
{
return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h b/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
index fb3abf1..8d711b6 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
@@ -98,6 +98,18 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
return res;
}
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dest, uint64_t val)
+{
+ uint64_t old;
+
+ do {
+ old = *dest;
+ } while (rte_atomic64_cmpset(dest, old, val));
+
+ return old;
+}
+
static inline void
rte_atomic64_init(rte_atomic64_t *v)
{
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h b/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
index 1a53a76..fd2ec9c 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
@@ -71,6 +71,18 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
return res;
}
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+ asm volatile(
+ MPLOCKED
+ "xchgq %0, %1;"
+ : "=r" (val), "=m" (*dst)
+ : "0" (val), "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return val;
+}
+
static inline void
rte_atomic64_init(rte_atomic64_t *v)
{
diff --git a/lib/librte_eal/common/include/generic/rte_atomic.h b/lib/librte_eal/common/include/generic/rte_atomic.h
index 50e1b8a..8652c02 100644
--- a/lib/librte_eal/common/include/generic/rte_atomic.h
+++ b/lib/librte_eal/common/include/generic/rte_atomic.h
@@ -191,6 +191,32 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
#endif
/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+ return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
+}
+#endif
+
+/**
* The atomic counter structure.
*/
typedef struct {
@@ -444,6 +470,32 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
#endif
/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+}
+#endif
+
+/**
* The atomic counter structure.
*/
typedef struct {
@@ -696,6 +748,32 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
#endif
/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+ return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
+}
+#endif
+
+/**
* The atomic counter structure.
*/
typedef struct {