mm: Add total_unmovable_pages global variable
Vmalloc will exit if the amount it needs to allocate is greater than totalram_pages. Vmalloc cannot allocate from the movable zone, so pages in the movable zone should not be counted. This change adds a new global variable: total_unmovable_pages. It is calculated in init.c, based on totalram_pages minus the pages in the movable zone. Vmalloc now looks at this new global instead of totalram_pages. total_unmovable_pages can be modified during memory_hotplug. If the zone you are offlining/onlining is unmovable, then you modify it similar to totalram_pages. If the zone is movable, then no change is needed. Change-Id: Ie55c41051e9ad4b921eb04ecbb4798a8bd2344d6 Signed-off-by: Jack Cheung <jackc@codeaurora.org> (cherry picked from commit 59f9f1c9ae463a3d4499cd9353619f8b1993371b) Conflicts: arch/arm/mm/init.c mm/memory_hotplug.c mm/page_alloc.c mm/vmalloc.c
This commit is contained in:
committed by
Stephen Boyd
parent
18e44d3eaf
commit
f6e34be773
@@ -624,6 +624,9 @@ void __init mem_init(void)
|
||||
extern u32 dtcm_end;
|
||||
extern u32 itcm_end;
|
||||
#endif
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
struct zone *zone;
|
||||
#endif
|
||||
|
||||
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
|
||||
|
||||
@@ -662,6 +665,14 @@ void __init mem_init(void)
|
||||
} while (page < end);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
for_each_zone(zone) {
|
||||
if (zone_idx(zone) == ZONE_MOVABLE)
|
||||
total_unmovable_pages = totalram_pages -
|
||||
zone->spanned_pages;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Since our memory may not be contiguous, calculate the
|
||||
* real number of pages we have in this system
|
||||
@@ -759,6 +770,7 @@ void __init mem_init(void)
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
unsigned long reclaimed_initmem;
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
extern char __tcm_start, __tcm_end;
|
||||
|
||||
@@ -769,10 +781,15 @@ void free_initmem(void)
|
||||
#endif
|
||||
|
||||
poison_init_mem(__init_begin, __init_end - __init_begin);
|
||||
if (!machine_is_integrator() && !machine_is_cintegrator())
|
||||
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
|
||||
if (!machine_is_integrator() && !machine_is_cintegrator()) {
|
||||
reclaimed_initmem = free_area(__phys_to_pfn(__pa(__init_begin)),
|
||||
__phys_to_pfn(__pa(__init_end)),
|
||||
"init");
|
||||
totalram_pages += reclaimed_initmem;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
total_unmovable_pages += reclaimed_initmem;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@@ -781,11 +798,17 @@ static int keep_initrd;
|
||||
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long reclaimed_initrd_mem;
|
||||
|
||||
if (!keep_initrd) {
|
||||
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
|
||||
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
|
||||
reclaimed_initrd_mem = free_area(__phys_to_pfn(__pa(start)),
|
||||
__phys_to_pfn(__pa(end)),
|
||||
"initrd");
|
||||
totalram_pages += reclaimed_initrd_mem;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
total_unmovable_pages += reclaimed_initrd_mem;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,9 @@ extern unsigned long max_mapnr;
|
||||
|
||||
extern unsigned long num_physpages;
|
||||
extern unsigned long totalram_pages;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
extern unsigned long total_unmovable_pages;
|
||||
#endif
|
||||
extern void * high_memory;
|
||||
extern int page_cluster;
|
||||
|
||||
|
||||
@@ -410,6 +410,11 @@ void __online_page_set_limits(struct page *page)
|
||||
{
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
|
||||
totalram_pages++;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
if (zone_idx(page_zone(page)) != ZONE_MOVABLE)
|
||||
total_unmovable_pages++;
|
||||
#endif
|
||||
if (pfn >= num_physpages)
|
||||
num_physpages = pfn + 1;
|
||||
}
|
||||
@@ -962,6 +967,10 @@ repeat:
|
||||
zone->zone_pgdat->node_present_pages -= offlined_pages;
|
||||
totalram_pages -= offlined_pages;
|
||||
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
if (zone_idx(zone) != ZONE_MOVABLE)
|
||||
total_unmovable_pages -= offlined_pages;
|
||||
#endif
|
||||
init_per_zone_wmark_min();
|
||||
|
||||
if (!node_present_pages(node)) {
|
||||
|
||||
@@ -105,6 +105,9 @@ unsigned long totalreserve_pages __read_mostly;
|
||||
*/
|
||||
unsigned long dirty_balance_reserve __read_mostly;
|
||||
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
unsigned long total_unmovable_pages __read_mostly;
|
||||
#endif
|
||||
int percpu_pagelist_fraction;
|
||||
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
|
||||
|
||||
@@ -176,6 +179,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
|
||||
};
|
||||
|
||||
EXPORT_SYMBOL(totalram_pages);
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
EXPORT_SYMBOL(total_unmovable_pages);
|
||||
#endif
|
||||
|
||||
static char * const zone_names[MAX_NR_ZONES] = {
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
|
||||
@@ -1647,9 +1647,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
||||
struct vm_struct *area;
|
||||
void *addr;
|
||||
unsigned long real_size = size;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
unsigned long total_pages = total_unmovable_pages;
|
||||
#else
|
||||
unsigned long total_pages = totalram_pages;
|
||||
#endif
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
|
||||
if (!size || (size >> PAGE_SHIFT) > total_pages)
|
||||
goto fail;
|
||||
|
||||
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
|
||||
|
||||
Reference in New Issue
Block a user