forked from Minki/linux
fix "mspec: handle shrinking virtual memory areas"
The vma_data structure may be shared by vma's from multiple tasks, with no way of knowing which areas are shared or not shared, so release/clear pages only when the refcount (of vma's) goes to zero. Signed-off-by: Cliff Wickman <cpw@sgi.com> Cc: Jes Sorensen <jes@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f9b7cba1b8
commit
afa684f6fd
@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
|
|||||||
* mspec_close
|
* mspec_close
|
||||||
*
|
*
|
||||||
* Called when unmapping a device mapping. Frees all mspec pages
|
* Called when unmapping a device mapping. Frees all mspec pages
|
||||||
* belonging to the vma.
|
* belonging to all the vma's sharing this vma_data structure.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
mspec_close(struct vm_area_struct *vma)
|
mspec_close(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct vma_data *vdata;
|
struct vma_data *vdata;
|
||||||
int index, last_index, result;
|
int index, last_index;
|
||||||
unsigned long my_page;
|
unsigned long my_page;
|
||||||
|
|
||||||
vdata = vma->vm_private_data;
|
vdata = vma->vm_private_data;
|
||||||
|
|
||||||
BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);
|
if (!atomic_dec_and_test(&vdata->refcnt))
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock(&vdata->lock);
|
last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
|
||||||
index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
|
for (index = 0; index < last_index; index++) {
|
||||||
last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
|
|
||||||
for (; index < last_index; index++) {
|
|
||||||
if (vdata->maddr[index] == 0)
|
if (vdata->maddr[index] == 0)
|
||||||
continue;
|
continue;
|
||||||
/*
|
/*
|
||||||
@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
|
|||||||
*/
|
*/
|
||||||
my_page = vdata->maddr[index];
|
my_page = vdata->maddr[index];
|
||||||
vdata->maddr[index] = 0;
|
vdata->maddr[index] = 0;
|
||||||
spin_unlock(&vdata->lock);
|
if (!mspec_zero_block(my_page, PAGE_SIZE))
|
||||||
result = mspec_zero_block(my_page, PAGE_SIZE);
|
|
||||||
if (!result)
|
|
||||||
uncached_free_page(my_page);
|
uncached_free_page(my_page);
|
||||||
else
|
else
|
||||||
printk(KERN_WARNING "mspec_close(): "
|
printk(KERN_WARNING "mspec_close(): "
|
||||||
"failed to zero page %i\n",
|
"failed to zero page %ld\n", my_page);
|
||||||
result);
|
|
||||||
spin_lock(&vdata->lock);
|
|
||||||
}
|
}
|
||||||
spin_unlock(&vdata->lock);
|
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&vdata->refcnt))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (vdata->flags & VMD_VMALLOCED)
|
if (vdata->flags & VMD_VMALLOCED)
|
||||||
vfree(vdata);
|
vfree(vdata);
|
||||||
@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma)
|
|||||||
kfree(vdata);
|
kfree(vdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mspec_nopfn
|
* mspec_nopfn
|
||||||
*
|
*
|
||||||
|
Loading…
Reference in New Issue
Block a user