x86: unify include/asm/cache_32/64.h

Same file, except for whitespace, comment formatting and:

32-bit:	unsigned long *virt_addr = va;
64-bit: unsigned int *virt_addr = va;

Both can be safely replaced by:
	u32 i, *virt_addr = va;

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2007-10-15 23:28:20 +02:00 committed by Thomas Gleixner
parent 327c21bc3d
commit 1f7afb08a5
3 changed files with 17 additions and 40 deletions

View File

@ -1,5 +1,18 @@
#ifdef CONFIG_X86_32
# include "edac_32.h"
#else
# include "edac_64.h"
#ifndef _ASM_X86_EDAC_H
#define _ASM_X86_EDAC_H
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static __inline__ void atomic_scrub(void *va, u32 size)
{
u32 i, *virt_addr = va;
/*
* Very carefully read and write to memory atomically so we
* are interrupt, DMA and SMP safe.
*/
for (i = 0; i < size / 4; i++, virt_addr++)
__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
}
#endif

View File

@ -1,18 +0,0 @@
#ifndef ASM_EDAC_H
#define ASM_EDAC_H
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static __inline__ void atomic_scrub(void *va, u32 size)
{
unsigned long *virt_addr = va;
u32 i;
for (i = 0; i < size / 4; i++, virt_addr++)
/* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*/
__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
}
#endif

View File

@ -1,18 +0,0 @@
#ifndef ASM_EDAC_H
#define ASM_EDAC_H
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static __inline__ void atomic_scrub(void *va, u32 size)
{
unsigned int *virt_addr = va;
u32 i;
for (i = 0; i < size / 4; i++, virt_addr++)
/* Very carefully read and write to memory atomically
* so we are interrupt, DMA and SMP safe.
*/
__asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
}
#endif