forked from Minki/linux
mm: fix spelling mistakes
Fix some spelling mistakes in comments: each having differents usage ==> each has a different usage statments ==> statements adresses ==> addresses aggresive ==> aggressive datas ==> data posion ==> poison higer ==> higher precisly ==> precisely wont ==> won't We moves tha ==> We move the endianess ==> endianness Link: https://lkml.kernel.org/r/20210519065853.7723-2-thunder.leizhen@huawei.com Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> Reviewed-by: Souptick Joarder <jrdr.linux@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fac7757e1f
commit
041711ce7c
@ -26,7 +26,7 @@ struct vmem_altmap {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Specialize ZONE_DEVICE memory into multiple types each having differents
|
* Specialize ZONE_DEVICE memory into multiple types each has a different
|
||||||
* usage.
|
* usage.
|
||||||
*
|
*
|
||||||
* MEMORY_DEVICE_PRIVATE:
|
* MEMORY_DEVICE_PRIVATE:
|
||||||
|
@ -397,7 +397,7 @@ struct mm_struct {
|
|||||||
unsigned long mmap_base; /* base of mmap area */
|
unsigned long mmap_base; /* base of mmap area */
|
||||||
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
|
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
|
||||||
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
|
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
|
||||||
/* Base adresses for compatible mmap() */
|
/* Base addresses for compatible mmap() */
|
||||||
unsigned long mmap_compat_base;
|
unsigned long mmap_compat_base;
|
||||||
unsigned long mmap_compat_legacy_base;
|
unsigned long mmap_compat_legacy_base;
|
||||||
#endif
|
#endif
|
||||||
|
@ -114,7 +114,7 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
|
|||||||
struct pglist_data;
|
struct pglist_data;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add a wild amount of padding here to ensure datas fall into separate
|
* Add a wild amount of padding here to ensure data fall into separate
|
||||||
* cachelines. There are very few zone structures in the machine, so space
|
* cachelines. There are very few zone structures in the machine, so space
|
||||||
* consumption is not a concern here.
|
* consumption is not a concern here.
|
||||||
*/
|
*/
|
||||||
|
@ -1340,7 +1340,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|||||||
* could potentially call huge_pmd_unshare. Because of
|
* could potentially call huge_pmd_unshare. Because of
|
||||||
* this, take semaphore in write mode here and set
|
* this, take semaphore in write mode here and set
|
||||||
* TTU_RMAP_LOCKED to indicate we have taken the lock
|
* TTU_RMAP_LOCKED to indicate we have taken the lock
|
||||||
* at this higer level.
|
* at this higher level.
|
||||||
*/
|
*/
|
||||||
mapping = hugetlb_page_mapping_lock_write(hpage);
|
mapping = hugetlb_page_mapping_lock_write(hpage);
|
||||||
if (mapping) {
|
if (mapping) {
|
||||||
|
@ -783,7 +783,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *z
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* {on,off}lining is constrained to full memory sections (or more
|
* {on,off}lining is constrained to full memory sections (or more
|
||||||
* precisly to memory blocks from the user space POV).
|
* precisely to memory blocks from the user space POV).
|
||||||
* memmap_on_memory is an exception because it reserves initial part
|
* memmap_on_memory is an exception because it reserves initial part
|
||||||
* of the physical memory space for vmemmaps. That space is pageblock
|
* of the physical memory space for vmemmaps. That space is pageblock
|
||||||
* aligned.
|
* aligned.
|
||||||
@ -1580,7 +1580,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* {on,off}lining is constrained to full memory sections (or more
|
* {on,off}lining is constrained to full memory sections (or more
|
||||||
* precisly to memory blocks from the user space POV).
|
* precisely to memory blocks from the user space POV).
|
||||||
* memmap_on_memory is an exception because it reserves initial part
|
* memmap_on_memory is an exception because it reserves initial part
|
||||||
* of the physical memory space for vmemmaps. That space is pageblock
|
* of the physical memory space for vmemmaps. That space is pageblock
|
||||||
* aligned.
|
* aligned.
|
||||||
|
@ -3180,7 +3180,7 @@ static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
|
|||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate in the BSS so we wont require allocation in
|
* Allocate in the BSS so we won't require allocation in
|
||||||
* direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
|
* direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
|
||||||
*/
|
*/
|
||||||
static cpumask_t cpus_with_pcps;
|
static cpumask_t cpus_with_pcps;
|
||||||
|
@ -2967,7 +2967,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* swap partition endianess hack... */
|
/* swap partition endianness hack... */
|
||||||
if (swab32(swap_header->info.version) == 1) {
|
if (swab32(swap_header->info.version) == 1) {
|
||||||
swab32s(&swap_header->info.version);
|
swab32s(&swap_header->info.version);
|
||||||
swab32s(&swap_header->info.last_page);
|
swab32s(&swap_header->info.last_page);
|
||||||
|
Loading…
Reference in New Issue
Block a user