aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJerry Hoemann2013-04-30 16:15:55 -0500
committerGreg Kroah-Hartman2013-05-11 15:54:11 -0500
commit34660a13cda790e62bac8229b15317007cea247a (patch)
tree2caf0ae3070e778d5709a34bff55916593fbc575
parentfccd6eb97dab4cf45b2cda8278d5a7582cd40d08 (diff)
downloadkernel-omap-34660a13cda790e62bac8229b15317007cea247a.tar.gz
kernel-omap-34660a13cda790e62bac8229b15317007cea247a.tar.xz
kernel-omap-34660a13cda790e62bac8229b15317007cea247a.zip
x86/mm: account for PGDIR_SIZE alignment
Patch for -stable. Function find_early_table_space removed upstream. Fixes panic in alloc_low_page due to pgt_buf overflow during init_memory_mapping. find_early_table_space sizes pgt_buf based upon the size of the memory being mapped, but it does not take into account the alignment of the memory. When the region being mapped spans a 512GB (PGDIR_SIZE) alignment, a panic from alloc_low_pages occurs. kernel_physical_mapping_init takes into account PGDIR_SIZE alignment. This causes an extra call to alloc_low_page to be made. This extra call isn't accounted for by find_early_table_space and causes a kernel panic. Change is to take into account PGDIR_SIZE alignment in find_early_table_space. Signed-off-by: Jerry Hoemann <jerry.hoemann@hp.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/mm/init.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index d7aea41563b3..7d7a36d645d3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -45,11 +45,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
45 int i; 45 int i;
46 unsigned long puds = 0, pmds = 0, ptes = 0, tables; 46 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
47 unsigned long start = 0, good_end; 47 unsigned long start = 0, good_end;
48 unsigned long pgd_extra = 0;
48 phys_addr_t base; 49 phys_addr_t base;
49 50
50 for (i = 0; i < nr_range; i++) { 51 for (i = 0; i < nr_range; i++) {
51 unsigned long range, extra; 52 unsigned long range, extra;
52 53
54 if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
55 pgd_extra++;
56
53 range = mr[i].end - mr[i].start; 57 range = mr[i].end - mr[i].start;
54 puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; 58 puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
55 59
@@ -74,6 +78,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
74 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 78 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
75 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); 79 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
76 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); 80 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
81 tables += (pgd_extra * PAGE_SIZE);
77 82
78#ifdef CONFIG_X86_32 83#ifdef CONFIG_X86_32
79 /* for fixmap */ 84 /* for fixmap */