mm: Add total_unmovable_pages global variable
Vmalloc will exit if the amount it needs to allocate is greater than totalram_pages. Vmalloc cannot allocate from the movable zone, so pages in the movable zone should not be counted. This change adds a new global variable: total_unmovable_pages. It is calculated in init.c, based on totalram_pages minus the pages in the movable zone. Vmalloc now looks at this new global instead of totalram_pages. total_unmovable_pages can be modified during memory_hotplug. If the zone you are offlining/onlining is unmovable, then you modify it similar to totalram_pages. If the zone is movable, then no change is needed. Change-Id: Ie55c41051e9ad4b921eb04ecbb4798a8bd2344d6 Signed-off-by: Jack Cheung <jackc@codeaurora.org>
This commit is contained in:
parent
c3a5a8cb8a
commit
59f9f1c9ae
|
@ -637,6 +637,9 @@ void __init mem_init(void)
|
|||
extern u32 dtcm_end;
|
||||
extern u32 itcm_end;
|
||||
#endif
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
struct zone *zone;
|
||||
#endif
|
||||
|
||||
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
|
||||
|
||||
|
@ -682,6 +685,14 @@ void __init mem_init(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
for_each_zone(zone) {
|
||||
if (zone_idx(zone) == ZONE_MOVABLE)
|
||||
total_unmovable_pages = totalram_pages -
|
||||
zone->spanned_pages;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Since our memory may not be contiguous, calculate the
|
||||
* real number of pages we have in this system
|
||||
|
@ -784,6 +795,7 @@ void __init mem_init(void)
|
|||
|
||||
void free_initmem(void)
|
||||
{
|
||||
unsigned long reclaimed_initmem;
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
extern char __tcm_start, __tcm_end;
|
||||
|
||||
|
@ -792,10 +804,15 @@ void free_initmem(void)
|
|||
"TCM link");
|
||||
#endif
|
||||
|
||||
if (!machine_is_integrator() && !machine_is_cintegrator())
|
||||
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
|
||||
if (!machine_is_integrator() && !machine_is_cintegrator()) {
|
||||
reclaimed_initmem = free_area(__phys_to_pfn(__pa(__init_begin)),
|
||||
__phys_to_pfn(__pa(__init_end)),
|
||||
"init");
|
||||
totalram_pages += reclaimed_initmem;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
total_unmovable_pages += reclaimed_initmem;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
|
@ -831,10 +848,16 @@ static int keep_initrd;
|
|||
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!keep_initrd)
|
||||
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
|
||||
unsigned long reclaimed_initrd_mem;
|
||||
if (!keep_initrd) {
|
||||
reclaimed_initrd_mem = free_area(__phys_to_pfn(__pa(start)),
|
||||
__phys_to_pfn(__pa(end)),
|
||||
"initrd");
|
||||
totalram_pages += reclaimed_initrd_mem;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
total_unmovable_pages += reclaimed_initrd_mem;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static int __init keepinitrd_setup(char *__unused)
|
||||
|
|
|
@ -28,6 +28,9 @@ extern unsigned long max_mapnr;
|
|||
|
||||
extern unsigned long num_physpages;
|
||||
extern unsigned long totalram_pages;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
extern unsigned long total_unmovable_pages;
|
||||
#endif
|
||||
extern void * high_memory;
|
||||
extern int page_cluster;
|
||||
|
||||
|
|
|
@ -379,6 +379,10 @@ void online_page(struct page *page)
|
|||
unsigned long pfn = page_to_pfn(page);
|
||||
|
||||
totalram_pages++;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
if (zone_idx(page_zone(page)) != ZONE_MOVABLE)
|
||||
total_unmovable_pages++;
|
||||
#endif
|
||||
if (pfn >= num_physpages)
|
||||
num_physpages = pfn + 1;
|
||||
|
||||
|
@ -965,6 +969,10 @@ repeat:
|
|||
zone->zone_pgdat->node_present_pages -= offlined_pages;
|
||||
totalram_pages -= offlined_pages;
|
||||
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
if (zone_idx(zone) != ZONE_MOVABLE)
|
||||
total_unmovable_pages -= offlined_pages;
|
||||
#endif
|
||||
init_per_zone_wmark_min();
|
||||
|
||||
if (!node_present_pages(node)) {
|
||||
|
|
|
@ -96,6 +96,9 @@ EXPORT_SYMBOL(node_states);
|
|||
|
||||
unsigned long totalram_pages __read_mostly;
|
||||
unsigned long totalreserve_pages __read_mostly;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
unsigned long total_unmovable_pages __read_mostly;
|
||||
#endif
|
||||
int percpu_pagelist_fraction;
|
||||
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
|
||||
|
||||
|
@ -174,6 +177,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
|
|||
};
|
||||
|
||||
EXPORT_SYMBOL(totalram_pages);
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
EXPORT_SYMBOL(total_unmovable_pages);
|
||||
#endif
|
||||
|
||||
static char * const zone_names[MAX_NR_ZONES] = {
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
|
|
|
@ -1611,9 +1611,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
|
|||
struct vm_struct *area;
|
||||
void *addr;
|
||||
unsigned long real_size = size;
|
||||
#ifdef CONFIG_FIX_MOVABLE_ZONE
|
||||
unsigned long total_pages = total_unmovable_pages;
|
||||
#else
|
||||
unsigned long total_pages = totalram_pages;
|
||||
#endif
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
|
||||
if (!size || (size >> PAGE_SHIFT) > total_pages)
|
||||
return NULL;
|
||||
|
||||
area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
|
||||
|
|
Loading…
Reference in New Issue