ksm: five little cleanups

1. We don't use __break_cow entry point now: merge it into break_cow.
2. remove_all_slot_rmap_items is just a special case of
   remove_trailing_rmap_items: use the latter instead.
3. Extend comment on unmerge_ksm_pages and rmap_items.
4. try_to_merge_two_pages should use try_to_merge_with_ksm_page
   instead of duplicating its code; and so swap them around.
5. Comment on cmp_and_merge_page described last year's: update it.

Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Acked-by: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Hugh Dickins 2009-09-21 17:02:15 -07:00 committed by Linus Torvalds
parent 6e15838425
commit 81464e3060

112
mm/ksm.c
View File

@ -315,22 +315,18 @@ static void break_ksm(struct vm_area_struct *vma, unsigned long addr)
/* Which leaves us looping there if VM_FAULT_OOM: hmmm... */ /* Which leaves us looping there if VM_FAULT_OOM: hmmm... */
} }
static void __break_cow(struct mm_struct *mm, unsigned long addr) static void break_cow(struct mm_struct *mm, unsigned long addr)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
down_read(&mm->mmap_sem);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr) if (!vma || vma->vm_start > addr)
return; goto out;
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
return; goto out;
break_ksm(vma, addr); break_ksm(vma, addr);
} out:
static void break_cow(struct mm_struct *mm, unsigned long addr)
{
down_read(&mm->mmap_sem);
__break_cow(mm, addr);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
} }
@ -439,17 +435,6 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
cond_resched(); /* we're called from many long loops */ cond_resched(); /* we're called from many long loops */
} }
static void remove_all_slot_rmap_items(struct mm_slot *mm_slot)
{
struct rmap_item *rmap_item, *node;
list_for_each_entry_safe(rmap_item, node, &mm_slot->rmap_list, link) {
remove_rmap_item_from_tree(rmap_item);
list_del(&rmap_item->link);
free_rmap_item(rmap_item);
}
}
static void remove_trailing_rmap_items(struct mm_slot *mm_slot, static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
struct list_head *cur) struct list_head *cur)
{ {
@ -471,6 +456,11 @@ static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
* page and upping mmap_sem. Nor does it fit with the way we skip dup'ing * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
* rmap_items from parent to child at fork time (so as not to waste time * rmap_items from parent to child at fork time (so as not to waste time
* if exit comes before the next scan reaches it). * if exit comes before the next scan reaches it).
*
* Similarly, although we'd like to remove rmap_items (so updating counts
* and freeing memory) when unmerging an area, it's easier to leave that
* to the next pass of ksmd - consider, for example, how ksmd might be
* in cmp_and_merge_page on one of the rmap_items we would be removing.
*/ */
static void unmerge_ksm_pages(struct vm_area_struct *vma, static void unmerge_ksm_pages(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
@ -495,7 +485,7 @@ static void unmerge_and_remove_all_rmap_items(void)
continue; continue;
unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end);
} }
remove_all_slot_rmap_items(mm_slot); remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
} }
@ -533,7 +523,7 @@ static void remove_mm_from_lists(struct mm_struct *mm)
list_del(&mm_slot->mm_list); list_del(&mm_slot->mm_list);
spin_unlock(&ksm_mmlist_lock); spin_unlock(&ksm_mmlist_lock);
remove_all_slot_rmap_items(mm_slot); remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
free_mm_slot(mm_slot); free_mm_slot(mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags); clear_bit(MMF_VM_MERGEABLE, &mm->flags);
} }
@ -739,6 +729,29 @@ out:
return err; return err;
} }
/*
* try_to_merge_with_ksm_page - like try_to_merge_two_pages,
* but no new kernel page is allocated: kpage must already be a ksm page.
*/
static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
unsigned long addr1,
struct page *page1,
struct page *kpage)
{
struct vm_area_struct *vma;
int err = -EFAULT;
down_read(&mm1->mmap_sem);
vma = find_vma(mm1, addr1);
if (!vma || vma->vm_start > addr1)
goto out;
err = try_to_merge_one_page(vma, page1, kpage);
out:
up_read(&mm1->mmap_sem);
return err;
}
/* /*
* try_to_merge_two_pages - take two identical pages and prepare them * try_to_merge_two_pages - take two identical pages and prepare them
* to be merged into one page. * to be merged into one page.
@ -772,9 +785,8 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
down_read(&mm1->mmap_sem); down_read(&mm1->mmap_sem);
vma = find_vma(mm1, addr1); vma = find_vma(mm1, addr1);
if (!vma || vma->vm_start > addr1) { if (!vma || vma->vm_start > addr1) {
put_page(kpage);
up_read(&mm1->mmap_sem); up_read(&mm1->mmap_sem);
return err; goto out;
} }
copy_user_highpage(kpage, page1, addr1, vma); copy_user_highpage(kpage, page1, addr1, vma);
@ -782,55 +794,19 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
up_read(&mm1->mmap_sem); up_read(&mm1->mmap_sem);
if (!err) { if (!err) {
down_read(&mm2->mmap_sem); err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage);
vma = find_vma(mm2, addr2);
if (!vma || vma->vm_start > addr2) {
put_page(kpage);
up_read(&mm2->mmap_sem);
break_cow(mm1, addr1);
return -EFAULT;
}
err = try_to_merge_one_page(vma, page2, kpage);
up_read(&mm2->mmap_sem);
/* /*
* If the second try_to_merge_one_page failed, we have a * If that fails, we have a ksm page with only one pte
* ksm page with just one pte pointing to it, so break it. * pointing to it: so break it.
*/ */
if (err) if (err)
break_cow(mm1, addr1); break_cow(mm1, addr1);
} }
out:
put_page(kpage); put_page(kpage);
return err; return err;
} }
/*
* try_to_merge_with_ksm_page - like try_to_merge_two_pages,
* but no new kernel page is allocated: kpage must already be a ksm page.
*/
static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
unsigned long addr1,
struct page *page1,
struct page *kpage)
{
struct vm_area_struct *vma;
int err = -EFAULT;
down_read(&mm1->mmap_sem);
vma = find_vma(mm1, addr1);
if (!vma || vma->vm_start > addr1) {
up_read(&mm1->mmap_sem);
return err;
}
err = try_to_merge_one_page(vma, page1, kpage);
up_read(&mm1->mmap_sem);
return err;
}
/* /*
* stable_tree_search - search page inside the stable tree * stable_tree_search - search page inside the stable tree
* @page: the page that we are searching identical pages to. * @page: the page that we are searching identical pages to.
@ -1033,10 +1009,10 @@ static void stable_tree_append(struct rmap_item *rmap_item,
} }
/* /*
* cmp_and_merge_page - take a page computes its hash value and check if there * cmp_and_merge_page - first see if page can be merged into the stable tree;
* is similar hash value to different page, * if not, compare checksum to previous and if it's the same, see if page can
* in case we find that there is similar hash to different page we call to * be inserted into the unstable tree, or merged with a page already there and
* try_to_merge_two_pages(). * both transferred to the stable tree.
* *
* @page: the page that we are searching identical page to. * @page: the page that we are searching identical page to.
* @rmap_item: the reverse mapping into the virtual address of this page * @rmap_item: the reverse mapping into the virtual address of this page