forked from Minki/linux
5f97f7f940
This adds support for the Atmel AVR32 architecture as well as the AT32AP7000 CPU and the AT32STK1000 development board. AVR32 is a new high-performance 32-bit RISC microprocessor core, designed for cost-sensitive embedded applications, with particular emphasis on low power consumption and high code density. The AVR32 architecture is not binary compatible with earlier 8-bit AVR architectures. The AVR32 architecture, including the instruction set, is described by the AVR32 Architecture Manual, available from http://www.atmel.com/dyn/resources/prod_documents/doc32000.pdf The Atmel AT32AP7000 is the first CPU implementing the AVR32 architecture. It features a 7-stage pipeline, 16KB instruction and data caches and a full Memory Management Unit. It also comes with a large set of integrated peripherals, many of which are shared with the AT91 ARM-based controllers from Atmel. Full data sheet is available from http://www.atmel.com/dyn/resources/prod_documents/doc32003.pdf while the CPU core implementation including caches and MMU is documented by the AVR32 AP Technical Reference, available from http://www.atmel.com/dyn/resources/prod_documents/doc32001.pdf Information about the AT32STK1000 development board can be found at http://www.atmel.com/dyn/products/tools_card.asp?tool_id=3918 including a BSP CD image with an earlier version of this patch, development tools (binaries and source/patches) and a root filesystem image suitable for booting from SD card. Alternatively, there's a preliminary "getting started" guide available at http://avr32linux.org/twiki/bin/view/Main/GettingStarted which provides links to the sources and patches you will need in order to set up a cross-compiling environment for avr32-linux. This patch, as well as the other patches included with the BSP and the toolchain patches, is actively supported by Atmel Corporation. [dmccr@us.ibm.com: Fix more pxx_page macro locations] [bunk@stusta.de: fix `make defconfig'] Signed-off-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Dave McCracken <dmccr@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
140 lines
3.2 KiB
C
140 lines
3.2 KiB
C
/*
|
|
* Copyright (C) 2004-2006 Atmel Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <asm/addrspace.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
void dma_cache_sync(void *vaddr, size_t size, int direction)
|
|
{
|
|
/*
|
|
* No need to sync an uncached area
|
|
*/
|
|
if (PXSEG(vaddr) == P2SEG)
|
|
return;
|
|
|
|
switch (direction) {
|
|
case DMA_FROM_DEVICE: /* invalidate only */
|
|
dma_cache_inv(vaddr, size);
|
|
break;
|
|
case DMA_TO_DEVICE: /* writeback only */
|
|
dma_cache_wback(vaddr, size);
|
|
break;
|
|
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
|
dma_cache_wback_inv(vaddr, size);
|
|
break;
|
|
default:
|
|
BUG();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(dma_cache_sync);
|
|
|
|
static struct page *__dma_alloc(struct device *dev, size_t size,
|
|
dma_addr_t *handle, gfp_t gfp)
|
|
{
|
|
struct page *page, *free, *end;
|
|
int order;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
order = get_order(size);
|
|
|
|
page = alloc_pages(gfp, order);
|
|
if (!page)
|
|
return NULL;
|
|
split_page(page, order);
|
|
|
|
/*
|
|
* When accessing physical memory with valid cache data, we
|
|
* get a cache hit even if the virtual memory region is marked
|
|
* as uncached.
|
|
*
|
|
* Since the memory is newly allocated, there is no point in
|
|
* doing a writeback. If the previous owner cares, he should
|
|
* have flushed the cache before releasing the memory.
|
|
*/
|
|
invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
|
|
|
|
*handle = page_to_bus(page);
|
|
free = page + (size >> PAGE_SHIFT);
|
|
end = page + (1 << order);
|
|
|
|
/*
|
|
* Free any unused pages
|
|
*/
|
|
while (free < end) {
|
|
__free_page(free);
|
|
free++;
|
|
}
|
|
|
|
return page;
|
|
}
|
|
|
|
static void __dma_free(struct device *dev, size_t size,
|
|
struct page *page, dma_addr_t handle)
|
|
{
|
|
struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
|
|
|
|
while (page < end)
|
|
__free_page(page++);
|
|
}
|
|
|
|
void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
dma_addr_t *handle, gfp_t gfp)
|
|
{
|
|
struct page *page;
|
|
void *ret = NULL;
|
|
|
|
page = __dma_alloc(dev, size, handle, gfp);
|
|
if (page)
|
|
ret = phys_to_uncached(page_to_phys(page));
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(dma_alloc_coherent);
|
|
|
|
void dma_free_coherent(struct device *dev, size_t size,
|
|
void *cpu_addr, dma_addr_t handle)
|
|
{
|
|
void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
|
|
struct page *page;
|
|
|
|
pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
|
|
cpu_addr, (unsigned long)handle, (unsigned)size);
|
|
BUG_ON(!virt_addr_valid(addr));
|
|
page = virt_to_page(addr);
|
|
__dma_free(dev, size, page, handle);
|
|
}
|
|
EXPORT_SYMBOL(dma_free_coherent);
|
|
|
|
#if 0
|
|
void *dma_alloc_writecombine(struct device *dev, size_t size,
|
|
dma_addr_t *handle, gfp_t gfp)
|
|
{
|
|
struct page *page;
|
|
|
|
page = __dma_alloc(dev, size, handle, gfp);
|
|
|
|
/* Now, map the page into P3 with write-combining turned on */
|
|
return __ioremap(page_to_phys(page), size, _PAGE_BUFFER);
|
|
}
|
|
EXPORT_SYMBOL(dma_alloc_writecombine);
|
|
|
|
void dma_free_writecombine(struct device *dev, size_t size,
|
|
void *cpu_addr, dma_addr_t handle)
|
|
{
|
|
struct page *page;
|
|
|
|
iounmap(cpu_addr);
|
|
|
|
page = bus_to_page(handle);
|
|
__dma_free(dev, size, page, handle);
|
|
}
|
|
EXPORT_SYMBOL(dma_free_writecombine);
|
|
#endif
|