locking/atomic: remove ARCH_ATOMIC remanants

Now that gen-atomic-fallback.sh is only used to generate the arch_*
fallbacks, we don't need to also generate the non-arch_* forms, and can
removethe infrastructure this needed.

There is no change to any of the generated headers as a result of this
patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210713105253.7615-3-mark.rutland@arm.com
This commit is contained in:
Mark Rutland
2021-07-13 11:52:50 +01:00
committed by Peter Zijlstra
parent 47401d9494
commit f3e615b4db
21 changed files with 71 additions and 91 deletions

View File

@@ -1,6 +1,6 @@
cat << EOF
/**
* ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
* arch_${atomic}_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -9,14 +9,14 @@ cat << EOF
* Returns original value of @v
*/
static __always_inline ${int}
${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
arch_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
${int} c = ${arch}${atomic}_read(v);
${int} c = arch_${atomic}_read(v);
do {
if (unlikely(c == u))
break;
} while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
} while (!arch_${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}