x86, PAT: Remove page granularity tracking for vm_insert_pfn maps

This change resolves the problem of too many single page entries
in pat_memtype_list and "freeing invalid memtype" errors with i915,
reported here:

  http://marc.info/?l=linux-kernel&m=123845244713183&w=2

Remove page level granularity track and untrack of vm_insert_pfn.
memtype tracking at page granularity does not scale and cleaner
approach would be for the driver to request a type for a bigger
IO address range or PCI io memory range for that device, either at
mmap time or driver init time and just use that type during
vm_insert_pfn.

This patch just removes the track/untrack of vm_insert_pfn. That
means we will be in same state as 2.6.28, with respect to these APIs.

Newer APIs for the drivers to request a memtype for a bigger region
is coming soon.

[ Impact: fix Xorg startup warnings and hangs ]

Reported-by: Arkadiusz Miskiewicz <a.miskiewicz@gmail.com>
Tested-by: Arkadiusz Miskiewicz <a.miskiewicz@gmail.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
LKML-Reference: <20090408223716.GC3493@linux-os.sc.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Pallipadi, Venkatesh 2009-04-08 15:37:16 -07:00 committed by Ingo Molnar
parent d45b41ae8d
commit 4b06504627

View File

@ -669,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
*
* If the vma has a linear pfn mapping for the entire range, we get the prot
* from pte and reserve the entire vma range with single reserve_pfn_range call.
* Otherwise, we reserve the entire vma range, my ging through the PTEs page
* by page to get physical address and protection.
*/
int track_pfn_vma_copy(struct vm_area_struct *vma)
{
int retval = 0;
unsigned long i, j;
resource_size_t paddr;
unsigned long prot;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
unsigned long vma_size = vma->vm_end - vma->vm_start;
pgprot_t pgprot;
if (!pat_enabled)
return 0;
/*
* For now, only handle remap_pfn_range() vmas where
* is_linear_pfn_mapping() == TRUE. Handling of
* vm_insert_pfn() is TBD.
*/
if (is_linear_pfn_mapping(vma)) {
/*
* reserve the whole chunk covered by vma. We need the
* starting address and protection from pte.
*/
if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
WARN_ON_ONCE(1);
return -EINVAL;
}
@ -699,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
}
/* reserve entire vma page by page, using pfn and prot from pte */
for (i = 0; i < vma_size; i += PAGE_SIZE) {
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
continue;
pgprot = __pgprot(prot);
retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
if (retval)
goto cleanup_ret;
}
return 0;
cleanup_ret:
/* Reserve error: Cleanup partial reservation and return error */
for (j = 0; j < i; j += PAGE_SIZE) {
if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
continue;
free_pfn_range(paddr, PAGE_SIZE);
}
return retval;
}
/*
@ -730,50 +708,28 @@ cleanup_ret:
* prot is passed in as a parameter for the new mapping. If the vma has a
* linear pfn mapping for the entire range reserve the entire vma range with
* single reserve_pfn_range call.
* Otherwise, we look t the pfn and size and reserve only the specified range
* page by page.
*
* Note that this function can be called with caller trying to map only a
* subrange/page inside the vma.
*/
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long size)
{
int retval = 0;
unsigned long i, j;
resource_size_t base_paddr;
resource_size_t paddr;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
unsigned long vma_size = vma->vm_end - vma->vm_start;
if (!pat_enabled)
return 0;
/*
* For now, only handle remap_pfn_range() vmas where
* is_linear_pfn_mapping() == TRUE. Handling of
* vm_insert_pfn() is TBD.
*/
if (is_linear_pfn_mapping(vma)) {
/* reserve the whole chunk starting from vm_pgoff */
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
return reserve_pfn_range(paddr, vma_size, prot, 0);
}
/* reserve page by page using pfn and size */
base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
for (i = 0; i < size; i += PAGE_SIZE) {
paddr = base_paddr + i;
retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
if (retval)
goto cleanup_ret;
}
return 0;
cleanup_ret:
/* Reserve error: Cleanup partial reservation and return error */
for (j = 0; j < i; j += PAGE_SIZE) {
paddr = base_paddr + j;
free_pfn_range(paddr, PAGE_SIZE);
}
return retval;
}
/*
@ -784,39 +740,23 @@ cleanup_ret:
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size)
{
unsigned long i;
resource_size_t paddr;
unsigned long prot;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
unsigned long vma_size = vma->vm_end - vma->vm_start;
if (!pat_enabled)
return;
/*
* For now, only handle remap_pfn_range() vmas where
* is_linear_pfn_mapping() == TRUE. Handling of
* vm_insert_pfn() is TBD.
*/
if (is_linear_pfn_mapping(vma)) {
/* free the whole chunk starting from vm_pgoff */
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
free_pfn_range(paddr, vma_size);
return;
}
if (size != 0 && size != vma_size) {
/* free page by page, using pfn and size */
paddr = (resource_size_t)pfn << PAGE_SHIFT;
for (i = 0; i < size; i += PAGE_SIZE) {
paddr = paddr + i;
free_pfn_range(paddr, PAGE_SIZE);
}
} else {
/* free entire vma, page by page, using the pfn from pte */
for (i = 0; i < vma_size; i += PAGE_SIZE) {
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
continue;
free_pfn_range(paddr, PAGE_SIZE);
}
}
}
pgprot_t pgprot_writecombine(pgprot_t prot)