mirror of
https://github.com/torvalds/linux.git
synced 2024-11-20 02:51:44 +00:00
43377453af
This is a helper to be used by the DMA mapping API to handle cache maintenance for memory identified by a page structure instead of a virtual address. Those pages may or may not be highmem pages, and when they're highmem pages, they may or may not be virtually mapped. When they're not mapped then there is no L1 cache to worry about. But even in that case the L2 cache must be processed since unmapped highmem pages can still be L2 cached. Signed-off-by: Nicolas Pitre <nico@marvell.com>
32 lines
935 B
C
32 lines
935 B
C
#ifndef _ASM_HIGHMEM_H
|
|
#define _ASM_HIGHMEM_H
|
|
|
|
#include <asm/kmap_types.h>
|
|
|
|
#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
|
|
#define LAST_PKMAP PTRS_PER_PTE
|
|
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
|
|
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
|
|
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
|
|
|
#define kmap_prot PAGE_KERNEL
|
|
|
|
#define flush_cache_kmaps() flush_cache_all()
|
|
|
|
extern pte_t *pkmap_page_table;
|
|
|
|
#define ARCH_NEEDS_KMAP_HIGH_GET
|
|
|
|
extern void *kmap_high(struct page *page);
|
|
extern void *kmap_high_get(struct page *page);
|
|
extern void kunmap_high(struct page *page);
|
|
|
|
extern void *kmap(struct page *page);
|
|
extern void kunmap(struct page *page);
|
|
extern void *kmap_atomic(struct page *page, enum km_type type);
|
|
extern void kunmap_atomic(void *kvaddr, enum km_type type);
|
|
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
|
|
extern struct page *kmap_atomic_to_page(const void *ptr);
|
|
|
|
#endif
|