aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c39
1 files changed, 29 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 45e79be95e8d..4ae77db917f2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -274,6 +274,26 @@ int page_group_by_mobility_disabled __read_mostly;
274#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 274#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
275static inline void reset_deferred_meminit(pg_data_t *pgdat) 275static inline void reset_deferred_meminit(pg_data_t *pgdat)
276{ 276{
277 unsigned long max_initialise;
278 unsigned long reserved_lowmem;
279
280 /*
281 * Initialise at least 2G of a node but also take into account that
282 * two large system hashes that can take up 1GB for 0.25TB/node.
283 */
284 max_initialise = max(2UL << (30 - PAGE_SHIFT),
285 (pgdat->node_spanned_pages >> 8));
286
287 /*
288 * Compensate the all the memblock reservations (e.g. crash kernel)
289 * from the initial estimation to make sure we will initialize enough
290 * memory to boot.
291 */
292 reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
293 pgdat->node_start_pfn + max_initialise);
294 max_initialise += reserved_lowmem;
295
296 pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
277 pgdat->first_deferred_pfn = ULONG_MAX; 297 pgdat->first_deferred_pfn = ULONG_MAX;
278} 298}
279 299
@@ -307,10 +327,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
307 /* Always populate low zones for address-contrained allocations */ 327 /* Always populate low zones for address-contrained allocations */
308 if (zone_end < pgdat_end_pfn(pgdat)) 328 if (zone_end < pgdat_end_pfn(pgdat))
309 return true; 329 return true;
310
311 /* Initialise at least 2G of the highest zone */ 330 /* Initialise at least 2G of the highest zone */
312 (*nr_initialised)++; 331 (*nr_initialised)++;
313 if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) && 332 if ((*nr_initialised > pgdat->static_init_size) &&
314 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 333 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
315 pgdat->first_deferred_pfn = pfn; 334 pgdat->first_deferred_pfn = pfn;
316 return false; 335 return false;
@@ -1513,14 +1532,14 @@ int move_freepages(struct zone *zone,
1513#endif 1532#endif
1514 1533
1515 for (page = start_page; page <= end_page;) { 1534 for (page = start_page; page <= end_page;) {
1516 /* Make sure we are not inadvertently changing nodes */
1517 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1518
1519 if (!pfn_valid_within(page_to_pfn(page))) { 1535 if (!pfn_valid_within(page_to_pfn(page))) {
1520 page++; 1536 page++;
1521 continue; 1537 continue;
1522 } 1538 }
1523 1539
1540 /* Make sure we are not inadvertently changing nodes */
1541 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1542
1524 if (!PageBuddy(page)) { 1543 if (!PageBuddy(page)) {
1525 page++; 1544 page++;
1526 continue; 1545 continue;
@@ -2472,7 +2491,7 @@ static bool zone_local(struct zone *local_zone, struct zone *zone)
2472 2491
2473static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2492static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2474{ 2493{
2475 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < 2494 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
2476 RECLAIM_DISTANCE; 2495 RECLAIM_DISTANCE;
2477} 2496}
2478#else /* CONFIG_NUMA */ 2497#else /* CONFIG_NUMA */
@@ -5348,7 +5367,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5348 /* pg_data_t should be reset to zero when it's allocated */ 5367 /* pg_data_t should be reset to zero when it's allocated */
5349 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 5368 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
5350 5369
5351 reset_deferred_meminit(pgdat);
5352 pgdat->node_id = nid; 5370 pgdat->node_id = nid;
5353 pgdat->node_start_pfn = node_start_pfn; 5371 pgdat->node_start_pfn = node_start_pfn;
5354#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5372#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -5367,6 +5385,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5367 (unsigned long)pgdat->node_mem_map); 5385 (unsigned long)pgdat->node_mem_map);
5368#endif 5386#endif
5369 5387
5388 reset_deferred_meminit(pgdat);
5370 free_area_init_core(pgdat); 5389 free_area_init_core(pgdat);
5371} 5390}
5372 5391
@@ -5833,8 +5852,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
5833 } 5852 }
5834 5853
5835 if (pages && s) 5854 if (pages && s)
5836 pr_info("Freeing %s memory: %ldK (%p - %p)\n", 5855 pr_info("Freeing %s memory: %ldK\n",
5837 s, pages << (PAGE_SHIFT - 10), start, end); 5856 s, pages << (PAGE_SHIFT - 10));
5838 5857
5839 return pages; 5858 return pages;
5840} 5859}
@@ -6788,7 +6807,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
6788 6807
6789 /* Make sure the range is really isolated. */ 6808 /* Make sure the range is really isolated. */
6790 if (test_pages_isolated(outer_start, end, false)) { 6809 if (test_pages_isolated(outer_start, end, false)) {
6791 pr_info("%s: [%lx, %lx) PFNs busy\n", 6810 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
6792 __func__, outer_start, end); 6811 __func__, outer_start, end);
6793 ret = -EBUSY; 6812 ret = -EBUSY;
6794 goto done; 6813 goto done;