aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTejun Heo2011-07-12 04:16:04 -0500
committerH. Peter Anvin2011-07-14 13:47:51 -0500
commit474b881bf4ee86aba55d46a4fdf293de32cba91b (patch)
treebd197e7989ec4d76445a393679ca1f18727825b6 /arch
parent6b5d41a1b97f5529284f16170211b87fd60264c0 (diff)
downloadkernel-common-474b881bf4ee86aba55d46a4fdf293de32cba91b.tar.gz
kernel-common-474b881bf4ee86aba55d46a4fdf293de32cba91b.tar.xz
kernel-common-474b881bf4ee86aba55d46a4fdf293de32cba91b.zip
x86: Use absent_pages_in_range() instead of memblock_x86_hole_size()
memblock_x86_hole_size() calculates the total size of holes in a given range according to memblock and is used by numa emulation code and numa_meminfo_cover_memory(). Since conversion to MEMBLOCK_NODE_MAP, absent_pages_in_range() also uses memblock and gives the same result. This patch replaces memblock_x86_hole_size() uses with absent_pages_in_range(). After the conversion the x86 function doesn't have any user left and is killed. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310462166-31469-12-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/mm/memblock.c52
-rw-r--r--arch/x86/mm/numa.c4
-rw-r--r--arch/x86/mm/numa_emulation.c30
4 files changed, 20 insertions, 68 deletions
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index a0cc7d66ac5..17a882e90ad 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -6,6 +6,4 @@
6void memblock_x86_reserve_range(u64 start, u64 end, char *name); 6void memblock_x86_reserve_range(u64 start, u64 end, char *name);
7void memblock_x86_free_range(u64 start, u64 end); 7void memblock_x86_free_range(u64 start, u64 end);
8 8
9u64 memblock_x86_hole_size(u64 start, u64 end);
10
11#endif 9#endif
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index a9d0972df10..7325c5d8ace 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -32,55 +32,3 @@ void __init memblock_x86_free_range(u64 start, u64 end)
32 32
33 memblock_free(start, end - start); 33 memblock_free(start, end - start);
34} 34}
35
36/*
37 * Finds an active region in the address range from start_pfn to last_pfn and
38 * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
39 */
40static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
41 unsigned long start_pfn,
42 unsigned long last_pfn,
43 unsigned long *ei_startpfn,
44 unsigned long *ei_endpfn)
45{
46 u64 align = PAGE_SIZE;
47
48 *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
49 *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
50
51 /* Skip map entries smaller than a page */
52 if (*ei_startpfn >= *ei_endpfn)
53 return 0;
54
55 /* Skip if map is outside the node */
56 if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
57 return 0;
58
59 /* Check for overlaps */
60 if (*ei_startpfn < start_pfn)
61 *ei_startpfn = start_pfn;
62 if (*ei_endpfn > last_pfn)
63 *ei_endpfn = last_pfn;
64
65 return 1;
66}
67
68/*
69 * Find the hole size (in bytes) in the memory range.
70 * @start: starting address of the memory range to scan
71 * @end: ending address of the memory range to scan
72 */
73u64 __init memblock_x86_hole_size(u64 start, u64 end)
74{
75 unsigned long start_pfn = start >> PAGE_SHIFT;
76 unsigned long last_pfn = end >> PAGE_SHIFT;
77 unsigned long ei_startpfn, ei_endpfn, ram = 0;
78 struct memblock_region *r;
79
80 for_each_memblock(memory, r)
81 if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
82 &ei_startpfn, &ei_endpfn))
83 ram += ei_endpfn - ei_startpfn;
84
85 return end - start - ((u64)ram << PAGE_SHIFT);
86}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index f4a40bdb2e4..88e56272996 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -475,8 +475,8 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
475 numaram = 0; 475 numaram = 0;
476 } 476 }
477 477
478 e820ram = max_pfn - (memblock_x86_hole_size(0, 478 e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
479 PFN_PHYS(max_pfn)) >> PAGE_SHIFT); 479
480 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ 480 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
481 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { 481 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
482 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", 482 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index e3d471c20cd..971fe70549b 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
28 return -ENOENT; 28 return -ENOENT;
29} 29}
30 30
31static u64 mem_hole_size(u64 start, u64 end)
32{
33 unsigned long start_pfn = PFN_UP(start);
34 unsigned long end_pfn = PFN_DOWN(end);
35
36 if (start_pfn < end_pfn)
37 return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
38 return 0;
39}
40
31/* 41/*
32 * Sets up nid to range from @start to @end. The return value is -errno if 42 * Sets up nid to range from @start to @end. The return value is -errno if
33 * something went wrong, 0 otherwise. 43 * something went wrong, 0 otherwise.
@@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
89 * Calculate target node size. x86_32 freaks on __udivdi3() so do 99 * Calculate target node size. x86_32 freaks on __udivdi3() so do
90 * the division in ulong number of pages and convert back. 100 * the division in ulong number of pages and convert back.
91 */ 101 */
92 size = max_addr - addr - memblock_x86_hole_size(addr, max_addr); 102 size = max_addr - addr - mem_hole_size(addr, max_addr);
93 size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes); 103 size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
94 104
95 /* 105 /*
@@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
135 * Continue to add memory to this fake node if its 145 * Continue to add memory to this fake node if its
136 * non-reserved memory is less than the per-node size. 146 * non-reserved memory is less than the per-node size.
137 */ 147 */
138 while (end - start - 148 while (end - start - mem_hole_size(start, end) < size) {
139 memblock_x86_hole_size(start, end) < size) {
140 end += FAKE_NODE_MIN_SIZE; 149 end += FAKE_NODE_MIN_SIZE;
141 if (end > limit) { 150 if (end > limit) {
142 end = limit; 151 end = limit;
@@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
150 * this one must extend to the boundary. 159 * this one must extend to the boundary.
151 */ 160 */
152 if (end < dma32_end && dma32_end - end - 161 if (end < dma32_end && dma32_end - end -
153 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) 162 mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
154 end = dma32_end; 163 end = dma32_end;
155 164
156 /* 165 /*
@@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
158 * next node, this one must extend to the end of the 167 * next node, this one must extend to the end of the
159 * physical node. 168 * physical node.
160 */ 169 */
161 if (limit - end - 170 if (limit - end - mem_hole_size(end, limit) < size)
162 memblock_x86_hole_size(end, limit) < size)
163 end = limit; 171 end = limit;
164 172
165 ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes, 173 ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
@@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
180{ 188{
181 u64 end = start + size; 189 u64 end = start + size;
182 190
183 while (end - start - memblock_x86_hole_size(start, end) < size) { 191 while (end - start - mem_hole_size(start, end) < size) {
184 end += FAKE_NODE_MIN_SIZE; 192 end += FAKE_NODE_MIN_SIZE;
185 if (end > max_addr) { 193 if (end > max_addr) {
186 end = max_addr; 194 end = max_addr;
@@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
211 * creates a uniform distribution of node sizes across the entire 219 * creates a uniform distribution of node sizes across the entire
212 * machine (but not necessarily over physical nodes). 220 * machine (but not necessarily over physical nodes).
213 */ 221 */
214 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / 222 min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
215 MAX_NUMNODES;
216 min_size = max(min_size, FAKE_NODE_MIN_SIZE); 223 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
217 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) 224 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
218 min_size = (min_size + FAKE_NODE_MIN_SIZE) & 225 min_size = (min_size + FAKE_NODE_MIN_SIZE) &
@@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
252 * this one must extend to the boundary. 259 * this one must extend to the boundary.
253 */ 260 */
254 if (end < dma32_end && dma32_end - end - 261 if (end < dma32_end && dma32_end - end -
255 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) 262 mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
256 end = dma32_end; 263 end = dma32_end;
257 264
258 /* 265 /*
@@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
260 * next node, this one must extend to the end of the 267 * next node, this one must extend to the end of the
261 * physical node. 268 * physical node.
262 */ 269 */
263 if (limit - end - 270 if (limit - end - mem_hole_size(end, limit) < size)
264 memblock_x86_hole_size(end, limit) < size)
265 end = limit; 271 end = limit;
266 272
267 ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES, 273 ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,