mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 01:51:53 +00:00
powerpc/mm: add radix__remove_section_mapping()
Tear down and free the four-level page tables of physical mappings during memory hotremove. Borrow the basic structure of remove_pagetable() and friends from the identically-named x86 functions. Reduce the frequency of tlb flushes and page_table_lock spinlocks by only doing them in the outermost function. There was some question as to whether the locking is needed at all. Leave it for now, but we could consider dropping it. Memory must be offline to be removed, thus not in use. So there shouldn't be the sort of concurrent page walking activity here that might prompt us to use RCU. Signed-off-by: Reza Arbab <arbab@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
6cc27341b2
commit
4b5d62ca17
@ -294,6 +294,7 @@ static inline unsigned long radix__get_tree_size(void)
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
int radix__create_section_mapping(unsigned long start, unsigned long end);
|
||||
int radix__remove_section_mapping(unsigned long start, unsigned long end);
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif
|
||||
|
@ -139,7 +139,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
|
||||
int remove_section_mapping(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (radix_enabled())
|
||||
return -ENODEV;
|
||||
return radix__remove_section_mapping(start, end);
|
||||
|
||||
return hash__remove_section_mapping(start, end);
|
||||
}
|
||||
|
@ -482,10 +482,143 @@ void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
{
|
||||
pte_t *pte;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
pte = pte_start + i;
|
||||
if (!pte_none(*pte))
|
||||
return;
|
||||
}
|
||||
|
||||
pte_free_kernel(&init_mm, pte_start);
|
||||
pmd_clear(pmd);
|
||||
}
|
||||
|
||||
static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++) {
|
||||
pmd = pmd_start + i;
|
||||
if (!pmd_none(*pmd))
|
||||
return;
|
||||
}
|
||||
|
||||
pmd_free(&init_mm, pmd_start);
|
||||
pud_clear(pud);
|
||||
}
|
||||
|
||||
static void remove_pte_table(pte_t *pte_start, unsigned long addr,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long next;
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_start + pte_index(addr);
|
||||
for (; addr < end; addr = next, pte++) {
|
||||
next = (addr + PAGE_SIZE) & PAGE_MASK;
|
||||
if (next > end)
|
||||
next = end;
|
||||
|
||||
if (!pte_present(*pte))
|
||||
continue;
|
||||
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long next;
|
||||
pte_t *pte_base;
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = pmd_start + pmd_index(addr);
|
||||
for (; addr < end; addr = next, pmd++) {
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
if (!pmd_present(*pmd))
|
||||
continue;
|
||||
|
||||
if (pmd_huge(*pmd)) {
|
||||
pte_clear(&init_mm, addr, (pte_t *)pmd);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_base = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
remove_pte_table(pte_base, addr, next);
|
||||
free_pte_table(pte_base, pmd);
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_pud_table(pud_t *pud_start, unsigned long addr,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmd_base;
|
||||
pud_t *pud;
|
||||
|
||||
pud = pud_start + pud_index(addr);
|
||||
for (; addr < end; addr = next, pud++) {
|
||||
next = pud_addr_end(addr, end);
|
||||
|
||||
if (!pud_present(*pud))
|
||||
continue;
|
||||
|
||||
if (pud_huge(*pud)) {
|
||||
pte_clear(&init_mm, addr, (pte_t *)pud);
|
||||
continue;
|
||||
}
|
||||
|
||||
pmd_base = (pmd_t *)pud_page_vaddr(*pud);
|
||||
remove_pmd_table(pmd_base, addr, next);
|
||||
free_pmd_table(pmd_base, pud);
|
||||
}
|
||||
}
|
||||
|
||||
static void remove_pagetable(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long addr, next;
|
||||
pud_t *pud_base;
|
||||
pgd_t *pgd;
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
|
||||
for (addr = start; addr < end; addr = next) {
|
||||
next = pgd_addr_end(addr, end);
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (!pgd_present(*pgd))
|
||||
continue;
|
||||
|
||||
if (pgd_huge(*pgd)) {
|
||||
pte_clear(&init_mm, addr, (pte_t *)pgd);
|
||||
continue;
|
||||
}
|
||||
|
||||
pud_base = (pud_t *)pgd_page_vaddr(*pgd);
|
||||
remove_pud_table(pud_base, addr, next);
|
||||
}
|
||||
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
radix__flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
||||
int __ref radix__create_section_mapping(unsigned long start, unsigned long end)
|
||||
{
|
||||
return create_physical_mapping(start, end);
|
||||
}
|
||||
|
||||
int radix__remove_section_mapping(unsigned long start, unsigned long end)
|
||||
{
|
||||
remove_pagetable(start, end);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
|
Loading…
Reference in New Issue
Block a user