[PATCH] FRV: Use the correct preemption primitives in kmap_atomic() and co
Use inc/dec_preempt_count() rather than preempt_enable/disable() and manually add in the compiler barriers that were provided by the latter. This makes FRV consistent with other archs. Furthermore, the compiler barrier effects are now there unconditionally - at least as far as preemption is concerned - because we don't want the compiler moving memory accesses out of the section of code in which the mapping is in force - in effect the kmap_atomic() must imply a LOCK-class barrier and the kunmap_atomic() must imply an UNLOCK-class barrier to the compiler. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
8741ca71a3
commit
29da7eb0ec
@ -82,11 +82,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
|
|||||||
dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
|
dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
|
||||||
\
|
\
|
||||||
if (type != __KM_CACHE) \
|
if (type != __KM_CACHE) \
|
||||||
asm volatile("movgs %0,dampr"#ampr :: "r"(dampr)); \
|
asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
|
||||||
else \
|
else \
|
||||||
asm volatile("movgs %0,iampr"#ampr"\n" \
|
asm volatile("movgs %0,iampr"#ampr"\n" \
|
||||||
"movgs %0,dampr"#ampr"\n" \
|
"movgs %0,dampr"#ampr"\n" \
|
||||||
:: "r"(dampr) \
|
:: "r"(dampr) : "memory" \
|
||||||
); \
|
); \
|
||||||
\
|
\
|
||||||
asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \
|
asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \
|
||||||
@ -104,7 +104,7 @@ extern struct page *kmap_atomic_to_page(void *ptr);
|
|||||||
asm volatile("movgs %0,tplr \n" \
|
asm volatile("movgs %0,tplr \n" \
|
||||||
"movgs %1,tppr \n" \
|
"movgs %1,tppr \n" \
|
||||||
"tlbpr %0,gr0,#2,#1" \
|
"tlbpr %0,gr0,#2,#1" \
|
||||||
: : "r"(damlr), "r"(dampr)); \
|
: : "r"(damlr), "r"(dampr) : "memory"); \
|
||||||
\
|
\
|
||||||
/*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \
|
/*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \
|
||||||
\
|
\
|
||||||
@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
|
|||||||
{
|
{
|
||||||
unsigned long paddr;
|
unsigned long paddr;
|
||||||
|
|
||||||
preempt_disable();
|
inc_preempt_count();
|
||||||
paddr = page_to_phys(page);
|
paddr = page_to_phys(page);
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
@ -138,16 +138,16 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __kunmap_atomic_primary(type, ampr) \
|
#define __kunmap_atomic_primary(type, ampr) \
|
||||||
do { \
|
do { \
|
||||||
asm volatile("movgs gr0,dampr"#ampr"\n"); \
|
asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
|
||||||
if (type == __KM_CACHE) \
|
if (type == __KM_CACHE) \
|
||||||
asm volatile("movgs gr0,iampr"#ampr"\n"); \
|
asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define __kunmap_atomic_secondary(slot, vaddr) \
|
#define __kunmap_atomic_secondary(slot, vaddr) \
|
||||||
do { \
|
do { \
|
||||||
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr)); \
|
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
static inline void kunmap_atomic(void *kvaddr, enum km_type type)
|
static inline void kunmap_atomic(void *kvaddr, enum km_type type)
|
||||||
@ -170,7 +170,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
|
|||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
preempt_enable();
|
dec_preempt_count();
|
||||||
|
preempt_check_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
Loading…
Reference in New Issue
Block a user