forked from Minki/linux
percpu: Generic support for this_cpu_cmpxchg_double()
Introduce this_cpu_cmpxchg_double(). this_cpu_cmpxchg_double() allows the comparison between two consecutive words and replaces them if there is a match. bool this_cpu_cmpxchg_double(pcp1, pcp2, old_word1, old_word2, new_word1, new_word2) this_cpu_cmpxchg_double does not return the old value (difficult since there are two words) but a boolean indicating if the operation was successful. The first percpu variable must be double word aligned! -tj: Updated to return bool instead of int, converted size check to BUILD_BUG_ON() instead of VM_BUG_ON() and other cosmetic changes. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
0f06c06362
commit
7c33433921
@ -255,6 +255,30 @@ extern void __bad_size_call_parameter(void);
|
||||
pscr2_ret__; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Special handling for cmpxchg_double. cmpxchg_double is passed two
|
||||
* percpu variables. The first has to be aligned to a double word
|
||||
* boundary and the second has to follow directly thereafter.
|
||||
*/
|
||||
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
|
||||
({ \
|
||||
bool pdcrb_ret__; \
|
||||
__verify_pcpu_ptr(&pcp1); \
|
||||
BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
|
||||
VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
|
||||
VM_BUG_ON((unsigned long)(&pcp2) != \
|
||||
(unsigned long)(&pcp1) + sizeof(pcp1)); \
|
||||
switch(sizeof(pcp1)) { \
|
||||
case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
|
||||
case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
|
||||
case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
|
||||
case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
|
||||
default: \
|
||||
__bad_size_call_parameter(); break; \
|
||||
} \
|
||||
pdcrb_ret__; \
|
||||
})
|
||||
|
||||
#define __pcpu_size_call(stem, variable, ...) \
|
||||
do { \
|
||||
__verify_pcpu_ptr(&(variable)); \
|
||||
@ -500,6 +524,45 @@ do { \
|
||||
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cmpxchg_double replaces two adjacent scalars at once. The first
|
||||
* two parameters are per cpu variables which have to be of the same
|
||||
* size. A truth value is returned to indicate success or failure
|
||||
* (since a double register result is difficult to handle). There is
|
||||
* very limited hardware support for these operations, so only certain
|
||||
* sizes may work.
|
||||
*/
|
||||
#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
({ \
|
||||
int ret__; \
|
||||
preempt_disable(); \
|
||||
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
|
||||
oval1, oval2, nval1, nval2); \
|
||||
preempt_enable(); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef this_cpu_cmpxchg_double
|
||||
# ifndef this_cpu_cmpxchg_double_1
|
||||
# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef this_cpu_cmpxchg_double_2
|
||||
# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef this_cpu_cmpxchg_double_4
|
||||
# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef this_cpu_cmpxchg_double_8
|
||||
# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generic percpu operations that do not require preemption handling.
|
||||
* Either we do not care about races or the caller has the
|
||||
@ -703,6 +766,39 @@ do { \
|
||||
__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
|
||||
#endif
|
||||
|
||||
#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (__this_cpu_read(pcp1) == (oval1) && \
|
||||
__this_cpu_read(pcp2) == (oval2)) { \
|
||||
__this_cpu_write(pcp1, (nval1)); \
|
||||
__this_cpu_write(pcp2, (nval2)); \
|
||||
__ret = 1; \
|
||||
} \
|
||||
(__ret); \
|
||||
})
|
||||
|
||||
#ifndef __this_cpu_cmpxchg_double
|
||||
# ifndef __this_cpu_cmpxchg_double_1
|
||||
# define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef __this_cpu_cmpxchg_double_2
|
||||
# define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef __this_cpu_cmpxchg_double_4
|
||||
# define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef __this_cpu_cmpxchg_double_8
|
||||
# define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IRQ safe versions of the per cpu RMW operations. Note that these operations
|
||||
* are *not* safe against modification of the same variable from another
|
||||
@ -823,4 +919,36 @@ do { \
|
||||
__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
|
||||
#endif
|
||||
|
||||
#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
({ \
|
||||
int ret__; \
|
||||
unsigned long flags; \
|
||||
local_irq_save(flags); \
|
||||
ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
|
||||
oval1, oval2, nval1, nval2); \
|
||||
local_irq_restore(flags); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef irqsafe_cpu_cmpxchg_double
|
||||
# ifndef irqsafe_cpu_cmpxchg_double_1
|
||||
# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef irqsafe_cpu_cmpxchg_double_2
|
||||
# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef irqsafe_cpu_cmpxchg_double_4
|
||||
# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# ifndef irqsafe_cpu_cmpxchg_double_8
|
||||
# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
__pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
Loading…
Reference in New Issue
Block a user