Files
linux/tools/memory-model/linux-kernel.def
Puranjay Mohan e176ebffc3 tools/memory-model: Add atomic_andnot() with its variants
Pull-855[1] added the support of atomic_andnot() to the herd tool. Use
this to add the implementation in the LKMM. All of the ordering variants
are also added.

Here is a small litmus-test that uses this operation:

C andnot

{
atomic_t u = ATOMIC_INIT(7);
}

P0(atomic_t *u)
{

        r0 = atomic_fetch_andnot(3, u);
        r1 = READ_ONCE(*u);
}

exists (0:r0=7 /\ 0:r1=4)

Test andnot Allowed
States 1
0:r0=7; 0:r1=4;
Ok
Witnesses
Positive: 1 Negative: 0
Condition exists (0:r0=7 /\ 0:r1=4)
Observation andnot Always 1 0
Time andnot 0.00
Hash=78f011a0b5a0c65fa1cf106fcd62c845

[1] https://github.com/herd/herdtools7/pull/855

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Acked-by: Andrea Parri <parri.andrea@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Will Deacon <will@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Jade Alglave <j.alglave@ucl.ac.uk>
Cc: Luc Maranget <luc.maranget@inria.fr>
Cc: Akira Yokosawa <akiyks@gmail.com>
Cc: Daniel Lustig <dlustig@nvidia.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: <linux-arch@vger.kernel.org>
2025-02-20 07:40:23 -08:00

147 lines
6.1 KiB
Modula-2

// SPDX-License-Identifier: GPL-2.0+
//
// An earlier version of this file appeared in the companion webpage for
// "Frightening small children and disconcerting grown-ups: Concurrency
// in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
// which appeared in ASPLOS 2018.
// ONCE
READ_ONCE(X) __load{once}(X)
WRITE_ONCE(X,V) { __store{once}(X,V); }
// Release Acquire and friends
smp_store_release(X,V) { __store{release}(*X,V); }
smp_load_acquire(X) __load{acquire}(*X)
rcu_assign_pointer(X,V) { __store{release}(X,V); }
rcu_dereference(X) __load{once}(X)
smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
// Fences
smp_mb() { __fence{mb}; }
smp_rmb() { __fence{rmb}; }
smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
smp_mb__after_atomic() { __fence{after-atomic}; }
smp_mb__after_spinlock() { __fence{after-spinlock}; }
smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange
xchg(X,V) __xchg{mb}(X,V)
xchg_relaxed(X,V) __xchg{once}(X,V)
xchg_release(X,V) __xchg{release}(X,V)
xchg_acquire(X,V) __xchg{acquire}(X,V)
cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
// Spinlocks
spin_lock(X) { __lock(X); }
spin_unlock(X) { __unlock(X); }
spin_trylock(X) __trylock(X)
spin_is_locked(X) __islocked(X)
// RCU
rcu_read_lock() { __fence{rcu-lock}; }
rcu_read_unlock() { __fence{rcu-unlock}; }
synchronize_rcu() { __fence{sync-rcu}; }
synchronize_rcu_expedited() { __fence{sync-rcu}; }
// SRCU
srcu_read_lock(X) __load{srcu-lock}(*X)
srcu_read_unlock(X,Y) { __store{srcu-unlock}(*X,Y); }
srcu_down_read(X) __load{srcu-lock}(*X)
srcu_up_read(X,Y) { __store{srcu-unlock}(*X,Y); }
synchronize_srcu(X) { __srcu{sync-srcu}(X); }
synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); }
// Atomic
atomic_read(X) READ_ONCE(*X)
atomic_set(X,V) { WRITE_ONCE(*X,V); }
atomic_read_acquire(X) smp_load_acquire(X)
atomic_set_release(X,V) { smp_store_release(X,V); }
atomic_add(V,X) { __atomic_op(X,+,V); }
atomic_sub(V,X) { __atomic_op(X,-,V); }
atomic_and(V,X) { __atomic_op(X,&,V); }
atomic_or(V,X) { __atomic_op(X,|,V); }
atomic_xor(V,X) { __atomic_op(X,^,V); }
atomic_inc(X) { __atomic_op(X,+,1); }
atomic_dec(X) { __atomic_op(X,-,1); }
atomic_andnot(V,X) { __atomic_op(X,&~,V); }
atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V)
atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V)
atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V)
atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V)
atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V)
atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V)
atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V)
atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V)
atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V)
atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V)
atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V)
atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V)
atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
atomic_xchg(X,V) __xchg{mb}(X,V)
atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
atomic_xchg_release(X,V) __xchg{release}(X,V)
atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0
atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0
atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0
atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V)
atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V)
atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V)
atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V)