aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo2011-07-12 04:16:06 -0500
committerH. Peter Anvin2011-07-14 13:47:53 -0500
commit24aa07882b672fff2da2f5c955759f0bd13d32d5 (patch)
treee6dad38048ede1dbb9ad3c7fffcc4b37e72274a8
parentc378ddd53f9b8832a46fd4fec050a97fc2269858 (diff)
downloadkernel-common-24aa07882b672fff2da2f5c955759f0bd13d32d5.tar.gz
kernel-common-24aa07882b672fff2da2f5c955759f0bd13d32d5.tar.xz
kernel-common-24aa07882b672fff2da2f5c955759f0bd13d32d5.zip
memblock, x86: Replace memblock_x86_reserve/free_range() with generic ones
Other than sanity check and debug message, the x86 specific version of memblock reserve/free functions are simple wrappers around the generic versions - memblock_reserve/free(). This patch adds debug messages with caller identification to the generic versions and replaces x86 specific ones and kills them. arch/x86/include/asm/memblock.h and arch/x86/mm/memblock.c are empty after this change and removed. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310462166-31469-14-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/memblock.h7
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/check.c2
-rw-r--r--arch/x86/kernel/head.c2
-rw-r--r--arch/x86/kernel/head32.c5
-rw-r--r--arch/x86/kernel/head64.c5
-rw-r--r--arch/x86/kernel/mpparse.c6
-rw-r--r--arch/x86/kernel/setup.c17
-rw-r--r--arch/x86/kernel/trampoline.c2
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/init.c6
-rw-r--r--arch/x86/mm/memblock.c34
-rw-r--r--arch/x86/mm/memtest.c2
-rw-r--r--arch/x86/mm/numa.c5
-rw-r--r--arch/x86/mm/numa_32.c6
-rw-r--r--arch/x86/mm/numa_emulation.c4
-rw-r--r--arch/x86/platform/efi/efi.c6
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/setup.c7
-rw-r--r--include/linux/memblock.h2
-rw-r--r--mm/memblock.c5
-rw-r--r--mm/nobootmem.c6
22 files changed, 48 insertions, 97 deletions
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
deleted file mode 100644
index bc5667081ae..00000000000
--- a/arch/x86/include/asm/memblock.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef _X86_MEMBLOCK_H
2#define _X86_MEMBLOCK_H
3
4void memblock_x86_reserve_range(u64 start, u64 end, char *name);
5void memblock_x86_free_range(u64 start, u64 end);
6
7#endif
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 56363082bbd..6e76c191a83 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -94,7 +94,7 @@ static u32 __init allocate_aperture(void)
94 addr, aper_size>>10); 94 addr, aper_size>>10);
95 return 0; 95 return 0;
96 } 96 }
97 memblock_x86_reserve_range(addr, addr + aper_size, "aperture64"); 97 memblock_reserve(addr, aper_size);
98 /* 98 /*
99 * Kmemleak should not scan this block as it may not be mapped via the 99 * Kmemleak should not scan this block as it may not be mapped via the
100 * kernel direct mapping. 100 * kernel direct mapping.
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 621cd23bb4e..5da1269e8dd 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -91,7 +91,7 @@ void __init setup_bios_corruption_check(void)
91 if (start >= end) 91 if (start >= end)
92 continue; 92 continue;
93 93
94 memblock_x86_reserve_range(start, end, "SCAN RAM"); 94 memblock_reserve(start, end - start);
95 scan_areas[num_scan_areas].addr = start; 95 scan_areas[num_scan_areas].addr = start;
96 scan_areas[num_scan_areas].size = end - start; 96 scan_areas[num_scan_areas].size = end - start;
97 97
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index af0699ba48c..48d9d4ea102 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -52,5 +52,5 @@ void __init reserve_ebda_region(void)
52 lowmem = 0x9f000; 52 lowmem = 0x9f000;
53 53
54 /* reserve all memory between lowmem and the 1MB mark */ 54 /* reserve all memory between lowmem and the 1MB mark */
55 memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); 55 memblock_reserve(lowmem, 0x100000 - lowmem);
56} 56}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 3bb08509a7a..be9282bcda7 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -33,7 +33,8 @@ void __init i386_start_kernel(void)
33{ 33{
34 memblock_init(); 34 memblock_init();
35 35
36 memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 36 memblock_reserve(__pa_symbol(&_text),
37 __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
37 38
38#ifdef CONFIG_BLK_DEV_INITRD 39#ifdef CONFIG_BLK_DEV_INITRD
39 /* Reserve INITRD */ 40 /* Reserve INITRD */
@@ -42,7 +43,7 @@ void __init i386_start_kernel(void)
42 u64 ramdisk_image = boot_params.hdr.ramdisk_image; 43 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
43 u64 ramdisk_size = boot_params.hdr.ramdisk_size; 44 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
44 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); 45 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
45 memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); 46 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
46 } 47 }
47#endif 48#endif
48 49
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5655c2272ad..fd25b11549b 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -100,7 +100,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
100 100
101 memblock_init(); 101 memblock_init();
102 102
103 memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); 103 memblock_reserve(__pa_symbol(&_text),
104 __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
104 105
105#ifdef CONFIG_BLK_DEV_INITRD 106#ifdef CONFIG_BLK_DEV_INITRD
106 /* Reserve INITRD */ 107 /* Reserve INITRD */
@@ -109,7 +110,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
109 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; 110 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
110 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; 111 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
111 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); 112 unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
112 memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); 113 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
113 } 114 }
114#endif 115#endif
115 116
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 8faeaa0ed2c..a6b79c16ec7 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -564,9 +564,7 @@ void __init default_get_smp_config(unsigned int early)
564 564
565static void __init smp_reserve_memory(struct mpf_intel *mpf) 565static void __init smp_reserve_memory(struct mpf_intel *mpf)
566{ 566{
567 unsigned long size = get_mpc_size(mpf->physptr); 567 memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
568
569 memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc");
570} 568}
571 569
572static int __init smp_scan_config(unsigned long base, unsigned long length) 570static int __init smp_scan_config(unsigned long base, unsigned long length)
@@ -595,7 +593,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
595 mpf, (u64)virt_to_phys(mpf)); 593 mpf, (u64)virt_to_phys(mpf));
596 594
597 mem = virt_to_phys(mpf); 595 mem = virt_to_phys(mpf);
598 memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf"); 596 memblock_reserve(mem, sizeof(*mpf));
599 if (mpf->physptr) 597 if (mpf->physptr)
600 smp_reserve_memory(mpf); 598 smp_reserve_memory(mpf);
601 599
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 31ffe20d5d2..97d227ec995 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -306,7 +306,8 @@ static void __init cleanup_highmap(void)
306static void __init reserve_brk(void) 306static void __init reserve_brk(void)
307{ 307{
308 if (_brk_end > _brk_start) 308 if (_brk_end > _brk_start)
309 memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); 309 memblock_reserve(__pa(_brk_start),
310 __pa(_brk_end) - __pa(_brk_start));
310 311
311 /* Mark brk area as locked down and no longer taking any 312 /* Mark brk area as locked down and no longer taking any
312 new allocations */ 313 new allocations */
@@ -337,7 +338,7 @@ static void __init relocate_initrd(void)
337 338
338 /* Note: this includes all the lowmem currently occupied by 339 /* Note: this includes all the lowmem currently occupied by
339 the initrd, we rely on that fact to keep the data intact. */ 340 the initrd, we rely on that fact to keep the data intact. */
340 memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); 341 memblock_reserve(ramdisk_here, area_size);
341 initrd_start = ramdisk_here + PAGE_OFFSET; 342 initrd_start = ramdisk_here + PAGE_OFFSET;
342 initrd_end = initrd_start + ramdisk_size; 343 initrd_end = initrd_start + ramdisk_size;
343 printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", 344 printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
@@ -393,7 +394,7 @@ static void __init reserve_initrd(void)
393 initrd_start = 0; 394 initrd_start = 0;
394 395
395 if (ramdisk_size >= (end_of_lowmem>>1)) { 396 if (ramdisk_size >= (end_of_lowmem>>1)) {
396 memblock_x86_free_range(ramdisk_image, ramdisk_end); 397 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
397 printk(KERN_ERR "initrd too large to handle, " 398 printk(KERN_ERR "initrd too large to handle, "
398 "disabling initrd\n"); 399 "disabling initrd\n");
399 return; 400 return;
@@ -416,7 +417,7 @@ static void __init reserve_initrd(void)
416 417
417 relocate_initrd(); 418 relocate_initrd();
418 419
419 memblock_x86_free_range(ramdisk_image, ramdisk_end); 420 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
420} 421}
421#else 422#else
422static void __init reserve_initrd(void) 423static void __init reserve_initrd(void)
@@ -490,15 +491,13 @@ static void __init memblock_x86_reserve_range_setup_data(void)
490{ 491{
491 struct setup_data *data; 492 struct setup_data *data;
492 u64 pa_data; 493 u64 pa_data;
493 char buf[32];
494 494
495 if (boot_params.hdr.version < 0x0209) 495 if (boot_params.hdr.version < 0x0209)
496 return; 496 return;
497 pa_data = boot_params.hdr.setup_data; 497 pa_data = boot_params.hdr.setup_data;
498 while (pa_data) { 498 while (pa_data) {
499 data = early_memremap(pa_data, sizeof(*data)); 499 data = early_memremap(pa_data, sizeof(*data));
500 sprintf(buf, "setup data %x", data->type); 500 memblock_reserve(pa_data, sizeof(*data) + data->len);
501 memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
502 pa_data = data->next; 501 pa_data = data->next;
503 early_iounmap(data, sizeof(*data)); 502 early_iounmap(data, sizeof(*data));
504 } 503 }
@@ -568,7 +567,7 @@ static void __init reserve_crashkernel(void)
568 return; 567 return;
569 } 568 }
570 } 569 }
571 memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); 570 memblock_reserve(crash_base, crash_size);
572 571
573 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 572 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
574 "for crashkernel (System RAM: %ldMB)\n", 573 "for crashkernel (System RAM: %ldMB)\n",
@@ -626,7 +625,7 @@ static __init void reserve_ibft_region(void)
626 addr = find_ibft_region(&size); 625 addr = find_ibft_region(&size);
627 626
628 if (size) 627 if (size)
629 memblock_x86_reserve_range(addr, addr + size, "* ibft"); 628 memblock_reserve(addr, size);
630} 629}
631 630
632static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; 631static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index a1f13ddb06e..a73b61055ad 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -18,7 +18,7 @@ void __init setup_trampolines(void)
18 panic("Cannot allocate trampoline\n"); 18 panic("Cannot allocate trampoline\n");
19 19
20 x86_trampoline_base = __va(mem); 20 x86_trampoline_base = __va(mem);
21 memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE"); 21 memblock_reserve(mem, size);
22 22
23 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", 23 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
24 x86_trampoline_base, (unsigned long long)mem, size); 24 x86_trampoline_base, (unsigned long long)mem, size);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 3d11327c9ab..23d8e5fecf7 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
27obj-$(CONFIG_ACPI_NUMA) += srat.o 27obj-$(CONFIG_ACPI_NUMA) += srat.o
28obj-$(CONFIG_NUMA_EMU) += numa_emulation.o 28obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
29 29
30obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
31
32obj-$(CONFIG_MEMTEST) += memtest.o 30obj-$(CONFIG_MEMTEST) += memtest.o
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 13cf05a6160..0b736b99d92 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -81,7 +81,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
81 81
82void __init native_pagetable_reserve(u64 start, u64 end) 82void __init native_pagetable_reserve(u64 start, u64 end)
83{ 83{
84 memblock_x86_reserve_range(start, end, "PGTABLE"); 84 memblock_reserve(start, end - start);
85} 85}
86 86
87struct map_range { 87struct map_range {
@@ -280,8 +280,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
280 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) 280 * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
281 * so that they can be reused for other purposes. 281 * so that they can be reused for other purposes.
282 * 282 *
283 * On native it just means calling memblock_x86_reserve_range, on Xen it 283 * On native it just means calling memblock_reserve, on Xen it also
284 * also means marking RW the pagetable pages that we allocated before 284 * means marking RW the pagetable pages that we allocated before
285 * but that haven't been used. 285 * but that haven't been used.
286 * 286 *
287 * In fact on xen we mark RO the whole range pgt_buf_start - 287 * In fact on xen we mark RO the whole range pgt_buf_start -
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
deleted file mode 100644
index 7325c5d8ace..00000000000
--- a/arch/x86/mm/memblock.c
+++ /dev/null
@@ -1,34 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/types.h>
3#include <linux/init.h>
4#include <linux/bitops.h>
5#include <linux/memblock.h>
6#include <linux/bootmem.h>
7#include <linux/mm.h>
8#include <linux/range.h>
9
10void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
11{
12 if (start == end)
13 return;
14
15 if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
16 return;
17
18 memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
19
20 memblock_reserve(start, end - start);
21}
22
23void __init memblock_x86_free_range(u64 start, u64 end)
24{
25 if (start == end)
26 return;
27
28 if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
29 return;
30
31 memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
32
33 memblock_free(start, end - start);
34}
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 46a5ff25eda..c80b9fb9573 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
34 (unsigned long long) pattern, 34 (unsigned long long) pattern,
35 (unsigned long long) start_bad, 35 (unsigned long long) start_bad,
36 (unsigned long long) end_bad); 36 (unsigned long long) end_bad);
37 memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM"); 37 memblock_reserve(start_bad, end_bad - start_bad);
38} 38}
39 39
40static void __init memtest(u64 pattern, u64 start_phys, u64 size) 40static void __init memtest(u64 pattern, u64 start_phys, u64 size)
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 88e56272996..496f494593b 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -364,8 +364,7 @@ void __init numa_reset_distance(void)
364 364
365 /* numa_distance could be 1LU marking allocation failure, test cnt */ 365 /* numa_distance could be 1LU marking allocation failure, test cnt */
366 if (numa_distance_cnt) 366 if (numa_distance_cnt)
367 memblock_x86_free_range(__pa(numa_distance), 367 memblock_free(__pa(numa_distance), size);
368 __pa(numa_distance) + size);
369 numa_distance_cnt = 0; 368 numa_distance_cnt = 0;
370 numa_distance = NULL; /* enable table creation */ 369 numa_distance = NULL; /* enable table creation */
371} 370}
@@ -394,7 +393,7 @@ static int __init numa_alloc_distance(void)
394 numa_distance = (void *)1LU; 393 numa_distance = (void *)1LU;
395 return -ENOMEM; 394 return -ENOMEM;
396 } 395 }
397 memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); 396 memblock_reserve(phys, size);
398 397
399 numa_distance = __va(phys); 398 numa_distance = __va(phys);
400 numa_distance_cnt = cnt; 399 numa_distance_cnt = cnt;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 58878b536ef..534255a36b6 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -204,7 +204,7 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
204 size, nid); 204 size, nid);
205 return; 205 return;
206 } 206 }
207 memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); 207 memblock_reserve(node_pa, size);
208 208
209 remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, 209 remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
210 max_low_pfn << PAGE_SHIFT, 210 max_low_pfn << PAGE_SHIFT,
@@ -212,10 +212,10 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
212 if (!remap_pa) { 212 if (!remap_pa) {
213 pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", 213 pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
214 size, nid); 214 size, nid);
215 memblock_x86_free_range(node_pa, node_pa + size); 215 memblock_free(node_pa, size);
216 return; 216 return;
217 } 217 }
218 memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); 218 memblock_reserve(remap_pa, size);
219 remap_va = phys_to_virt(remap_pa); 219 remap_va = phys_to_virt(remap_pa);
220 220
221 /* perform actual remap */ 221 /* perform actual remap */
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 971fe70549b..46db56845f1 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -361,7 +361,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
361 pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); 361 pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
362 goto no_emu; 362 goto no_emu;
363 } 363 }
364 memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); 364 memblock_reserve(phys, phys_size);
365 phys_dist = __va(phys); 365 phys_dist = __va(phys);
366 366
367 for (i = 0; i < numa_dist_cnt; i++) 367 for (i = 0; i < numa_dist_cnt; i++)
@@ -430,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
430 430
431 /* free the copied physical distance table */ 431 /* free the copied physical distance table */
432 if (phys_dist) 432 if (phys_dist)
433 memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); 433 memblock_free(__pa(phys_dist), phys_size);
434 return; 434 return;
435 435
436no_emu: 436no_emu:
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index a4c322ca1a5..3b4e86bda3c 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -280,8 +280,7 @@ void __init efi_memblock_x86_reserve_range(void)
280 boot_params.efi_info.efi_memdesc_size; 280 boot_params.efi_info.efi_memdesc_size;
281 memmap.desc_version = boot_params.efi_info.efi_memdesc_version; 281 memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
282 memmap.desc_size = boot_params.efi_info.efi_memdesc_size; 282 memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
283 memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, 283 memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
284 "EFI memmap");
285} 284}
286 285
287#if EFI_DEBUG 286#if EFI_DEBUG
@@ -332,8 +331,7 @@ void __init efi_reserve_boot_services(void)
332 "[0x%010llx-0x%010llx]\n", 331 "[0x%010llx-0x%010llx]\n",
333 start, start+size-1); 332 start, start+size-1);
334 } else 333 } else
335 memblock_x86_reserve_range(start, start+size, 334 memblock_reserve(start, size);
336 "EFI Boot");
337 } 335 }
338} 336}
339 337
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 0ccccb67a99..ad54fa10f8a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1720,10 +1720,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1720 __xen_write_cr3(true, __pa(pgd)); 1720 __xen_write_cr3(true, __pa(pgd));
1721 xen_mc_issue(PARAVIRT_LAZY_CPU); 1721 xen_mc_issue(PARAVIRT_LAZY_CPU);
1722 1722
1723 memblock_x86_reserve_range(__pa(xen_start_info->pt_base), 1723 memblock_reserve(__pa(xen_start_info->pt_base),
1724 __pa(xen_start_info->pt_base + 1724 xen_start_info->nr_pt_frames * PAGE_SIZE);
1725 xen_start_info->nr_pt_frames * PAGE_SIZE),
1726 "XEN PAGETABLES");
1727 1725
1728 return pgd; 1726 return pgd;
1729} 1727}
@@ -1799,10 +1797,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1799 PFN_DOWN(__pa(initial_page_table))); 1797 PFN_DOWN(__pa(initial_page_table)));
1800 xen_write_cr3(__pa(initial_page_table)); 1798 xen_write_cr3(__pa(initial_page_table));
1801 1799
1802 memblock_x86_reserve_range(__pa(xen_start_info->pt_base), 1800 memblock_reserve(__pa(xen_start_info->pt_base),
1803 __pa(xen_start_info->pt_base + 1801 xen_start_info->nr_pt_frames * PAGE_SIZE));
1804 xen_start_info->nr_pt_frames * PAGE_SIZE),
1805 "XEN PAGETABLES");
1806 1802
1807 return initial_page_table; 1803 return initial_page_table;
1808} 1804}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 60aeeb56948..73daaf75801 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -63,7 +63,7 @@ static void __init xen_add_extra_mem(unsigned long pages)
63 e820_add_region(extra_start, size, E820_RAM); 63 e820_add_region(extra_start, size, E820_RAM);
64 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 64 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
65 65
66 memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); 66 memblock_reserve(extra_start, size);
67 67
68 xen_extra_mem_size += size; 68 xen_extra_mem_size += size;
69 69
@@ -287,9 +287,8 @@ char * __init xen_memory_setup(void)
287 * - xen_start_info 287 * - xen_start_info
288 * See comment above "struct start_info" in <xen/interface/xen.h> 288 * See comment above "struct start_info" in <xen/interface/xen.h>
289 */ 289 */
290 memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), 290 memblock_reserve(__pa(xen_start_info->mfn_list),
291 __pa(xen_start_info->pt_base), 291 xen_start_info->pt_base - xen_start_info->mfn_list);
292 "XEN START INFO");
293 292
294 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 293 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
295 294
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 2491355bb6e..90746318cec 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -17,8 +17,6 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19 19
20#include <asm/memblock.h>
21
22#define INIT_MEMBLOCK_REGIONS 128 20#define INIT_MEMBLOCK_REGIONS 128
23 21
24struct memblock_region { 22struct memblock_region {
diff --git a/mm/memblock.c b/mm/memblock.c
index ebc6119f128..0cb4da657b9 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -449,6 +449,9 @@ long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
449 449
450long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 450long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
451{ 451{
452 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
453 base, base + size, (void *)_RET_IP_);
454
452 return __memblock_remove(&memblock.reserved, base, size); 455 return __memblock_remove(&memblock.reserved, base, size);
453} 456}
454 457
@@ -456,6 +459,8 @@ long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
456{ 459{
457 struct memblock_type *_rgn = &memblock.reserved; 460 struct memblock_type *_rgn = &memblock.reserved;
458 461
462 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
463 base, base + size, (void *)_RET_IP_);
459 BUG_ON(0 == size); 464 BUG_ON(0 == size);
460 465
461 return memblock_add_region(_rgn, base, size); 466 return memblock_add_region(_rgn, base, size);
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 7075bc00fa8..29d948ce6d0 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -47,7 +47,7 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
47 47
48 ptr = phys_to_virt(addr); 48 ptr = phys_to_virt(addr);
49 memset(ptr, 0, size); 49 memset(ptr, 0, size);
50 memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); 50 memblock_reserve(addr, size);
51 /* 51 /*
52 * The min_count is set to 0 so that bootmem allocated blocks 52 * The min_count is set to 0 so that bootmem allocated blocks
53 * are never reported as leaks. 53 * are never reported as leaks.
@@ -175,7 +175,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
175 unsigned long size) 175 unsigned long size)
176{ 176{
177 kmemleak_free_part(__va(physaddr), size); 177 kmemleak_free_part(__va(physaddr), size);
178 memblock_x86_free_range(physaddr, physaddr + size); 178 memblock_free(physaddr, size);
179} 179}
180 180
181/** 181/**
@@ -190,7 +190,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
190void __init free_bootmem(unsigned long addr, unsigned long size) 190void __init free_bootmem(unsigned long addr, unsigned long size)
191{ 191{
192 kmemleak_free_part(__va(addr), size); 192 kmemleak_free_part(__va(addr), size);
193 memblock_x86_free_range(addr, addr + size); 193 memblock_free(addr, size);
194} 194}
195 195
196static void * __init ___alloc_bootmem_nopanic(unsigned long size, 196static void * __init ___alloc_bootmem_nopanic(unsigned long size,