atomics/generic: Define atomic64_fetch_add_unless()
As a step towards unifying the atomic/atomic64/atomic_long APIs, this patch converts the generic implementation of atomic64_add_unless() into a generic implementation of atomic64_fetch_add_unless(). A wrapper in <linux/atomic.h> will build atomic_add_unless() atop of this, provided it is given a preprocessor definition. No functional change is intended as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-9-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
0ae1d99402
commit
00b808ab79
@@ -53,7 +53,8 @@ ATOMIC64_OPS(xor)
|
|||||||
extern long long atomic64_dec_if_positive(atomic64_t *v);
|
extern long long atomic64_dec_if_positive(atomic64_t *v);
|
||||||
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
|
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
|
||||||
extern long long atomic64_xchg(atomic64_t *v, long long new);
|
extern long long atomic64_xchg(atomic64_t *v, long long new);
|
||||||
extern bool atomic64_add_unless(atomic64_t *v, long long a, long long u);
|
extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
|
||||||
|
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
|
||||||
|
|
||||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
||||||
|
|||||||
@@ -178,18 +178,18 @@ long long atomic64_xchg(atomic64_t *v, long long new)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(atomic64_xchg);
|
EXPORT_SYMBOL(atomic64_xchg);
|
||||||
|
|
||||||
bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
raw_spinlock_t *lock = lock_addr(v);
|
raw_spinlock_t *lock = lock_addr(v);
|
||||||
bool ret = false;
|
long long val;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(lock, flags);
|
raw_spin_lock_irqsave(lock, flags);
|
||||||
if (v->counter != u) {
|
val = v->counter;
|
||||||
|
if (val != u)
|
||||||
v->counter += a;
|
v->counter += a;
|
||||||
ret = true;
|
|
||||||
}
|
|
||||||
raw_spin_unlock_irqrestore(lock, flags);
|
raw_spin_unlock_irqrestore(lock, flags);
|
||||||
return ret;
|
|
||||||
|
return val;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(atomic64_add_unless);
|
EXPORT_SYMBOL(atomic64_fetch_add_unless);
|
||||||
|
|||||||
Reference in New Issue
Block a user