aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Andrianov2014-08-01 09:09:14 -0500
committerVitaly Andrianov2015-01-21 07:53:11 -0600
commit6a7b41529cb59e9647873f44f3febf14ce3d3358 (patch)
treeace741e517a351493aa812a1a777ffb0e7b91b9b
parent76161fa9b43c9c10a4a41fe7db333e4afd5e5e3f (diff)
downloadlinux-6a7b41529cb59e9647873f44f3febf14ce3d3358.tar.gz
linux-6a7b41529cb59e9647873f44f3febf14ce3d3358.tar.xz
linux-6a7b41529cb59e9647873f44f3febf14ce3d3358.zip
arm: keystone2: kexec: convert physical to aliased addresses and back
This patch converts physical addresses and pages that are used for kexec functionality to the corresponding aliased addresses and pages. It is required because: - Keystone2 SoC DDR3 addresses are outside the first 4GB of address range. In order to access the first 4GB of the memory on boot time before enabling MMU HW provide memory range aliasing (See KS2 TRM for details). Before rebooting the kernel we need to convert all physical addresses, which are used by recovery kernel to the corresponding aliased values. - Before enabling MMU ARM uses 32 bit addresses. Even if we need a real physical address instead of aliased one, we still convert it to aliased address in the working kernel and restore back to physical in the recovery kernel. - kexec user utility uses 32 bit variables to pass addresses to the kernel. So, we have to use aliased addresses as well and convert them to physical in kernel Signed-off-by: Vitaly Andrianov <vitalya@ti.com>
-rw-r--r--arch/arm/kernel/machine_kexec.c10
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/mach-keystone/include/mach/memory.h5
-rw-r--r--kernel/kexec.c45
4 files changed, 45 insertions, 19 deletions
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 70ae735dec5..0ac387b81eb 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -38,6 +38,7 @@ int machine_kexec_prepare(struct kimage *image)
38 struct kexec_segment *current_segment; 38 struct kexec_segment *current_segment;
39 __be32 header; 39 __be32 header;
40 int i, err; 40 int i, err;
41 phys_addr_t pmem, pmemsz;
41 42
42 /* 43 /*
43 * No segment at default ATAGs address. try to locate 44 * No segment at default ATAGs address. try to locate
@@ -46,8 +47,11 @@ int machine_kexec_prepare(struct kimage *image)
46 for (i = 0; i < image->nr_segments; i++) { 47 for (i = 0; i < image->nr_segments; i++) {
47 current_segment = &image->segment[i]; 48 current_segment = &image->segment[i];
48 49
49 if (!memblock_is_region_memory(current_segment->mem, 50 pmem = current_segment->mem + IDMAP_ADDR_OFFSET;
50 current_segment->memsz)) 51 pmemsz = current_segment->memsz;
52
53 if (!memblock_is_region_memory(pmem /*current_segment->mem*/,
54 pmemsz/*current_segment->memsz*/))
51 return -EINVAL; 55 return -EINVAL;
52 56
53 err = get_user(header, (__be32*)current_segment->buf); 57 err = get_user(header, (__be32*)current_segment->buf);
@@ -147,7 +151,7 @@ void machine_kexec(struct kimage *image)
147 151
148 /* we need both effective and real address here */ 152 /* we need both effective and real address here */
149 reboot_code_buffer_phys = 153 reboot_code_buffer_phys =
150 page_to_pfn(image->control_code_page) << PAGE_SHIFT; 154 (page_to_pfn(image->control_code_page) - IDMAP_PFN_OFFSET) << PAGE_SHIFT;
151 reboot_code_buffer = page_address(image->control_code_page); 155 reboot_code_buffer = page_address(image->control_code_page);
152 156
153 /* Prepare parameters for reboot_code_buffer*/ 157 /* Prepare parameters for reboot_code_buffer*/
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7262f76f1a1..e907163b48f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -632,6 +632,10 @@ static int __init early_mem(char *p)
632 if (*endp == '@') 632 if (*endp == '@')
633 start = memparse(endp + 1, NULL); 633 start = memparse(endp + 1, NULL);
634 634
635#ifdef CONFIG_ARCH_KEYSTONE
636 start += IDMAP_ADDR_OFFSET;
637#endif
638
635 arm_add_memory(start, size); 639 arm_add_memory(start, size);
636 640
637 return 0; 641 return 0;
diff --git a/arch/arm/mach-keystone/include/mach/memory.h b/arch/arm/mach-keystone/include/mach/memory.h
index 872183f55ef..514682e9f99 100644
--- a/arch/arm/mach-keystone/include/mach/memory.h
+++ b/arch/arm/mach-keystone/include/mach/memory.h
@@ -28,6 +28,11 @@
28#define KEYSTONE_HIGH_PHYS_SIZE 0x400000000ULL /* 16G */ 28#define KEYSTONE_HIGH_PHYS_SIZE 0x400000000ULL /* 16G */
29#define KEYSTONE_HIGH_PHYS_END (KEYSTONE_HIGH_PHYS_START + \ 29#define KEYSTONE_HIGH_PHYS_END (KEYSTONE_HIGH_PHYS_START + \
30 KEYSTONE_HIGH_PHYS_SIZE - 1) 30 KEYSTONE_HIGH_PHYS_SIZE - 1)
31
32#define IDMAP_ADDR_OFFSET (KEYSTONE_HIGH_PHYS_START - \
33 KEYSTONE_LOW_PHYS_START)
34#define IDMAP_PFN_OFFSET (IDMAP_ADDR_OFFSET >> PAGE_SHIFT)
35
31#ifdef CONFIG_ARM_LPAE 36#ifdef CONFIG_ARM_LPAE
32 37
33#ifndef __ASSEMBLY__ 38#ifndef __ASSEMBLY__
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 1f8d9382dba..228013bf9df 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -117,6 +117,12 @@ int kexec_should_crash(struct task_struct *p)
117 */ 117 */
118#define KIMAGE_NO_DEST (-1UL) 118#define KIMAGE_NO_DEST (-1UL)
119 119
120#ifndef CONFIG_ARCH_KEYSTONE
121#define IDMAP_ADDR_OFFSET 0x0ULL
122#define IDMAP_PFN_OFFSET 0x0
123#endif
124
125
120static int kimage_is_destination_range(struct kimage *image, 126static int kimage_is_destination_range(struct kimage *image,
121 unsigned long start, unsigned long end); 127 unsigned long start, unsigned long end);
122static struct page *kimage_alloc_page(struct kimage *image, 128static struct page *kimage_alloc_page(struct kimage *image,
@@ -286,7 +292,8 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
286 292
287 image = NULL; 293 image = NULL;
288 /* Verify we have a valid entry point */ 294 /* Verify we have a valid entry point */
289 if ((entry < crashk_res.start) || (entry > crashk_res.end)) { 295 if ((entry < (crashk_res.start - IDMAP_ADDR_OFFSET)) ||
296 (entry > (crashk_res.end - IDMAP_ADDR_OFFSET))) {
290 result = -EADDRNOTAVAIL; 297 result = -EADDRNOTAVAIL;
291 goto out; 298 goto out;
292 } 299 }
@@ -299,7 +306,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
299 /* Enable the special crash kernel control page 306 /* Enable the special crash kernel control page
300 * allocation policy. 307 * allocation policy.
301 */ 308 */
302 image->control_page = crashk_res.start; 309 image->control_page = crashk_res.start - IDMAP_ADDR_OFFSET;
303 image->type = KEXEC_TYPE_CRASH; 310 image->type = KEXEC_TYPE_CRASH;
304 311
305 /* 312 /*
@@ -318,7 +325,8 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
318 mstart = image->segment[i].mem; 325 mstart = image->segment[i].mem;
319 mend = mstart + image->segment[i].memsz - 1; 326 mend = mstart + image->segment[i].memsz - 1;
320 /* Ensure we are within the crash kernel limits */ 327 /* Ensure we are within the crash kernel limits */
321 if ((mstart < crashk_res.start) || (mend > crashk_res.end)) 328 if ((mstart < (crashk_res.start - IDMAP_ADDR_OFFSET)) ||
329 (mend > (crashk_res.end - IDMAP_ADDR_OFFSET)))
322 goto out_free; 330 goto out_free;
323 } 331 }
324 332
@@ -435,7 +443,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
435 pages = kimage_alloc_pages(GFP_KERNEL, order); 443 pages = kimage_alloc_pages(GFP_KERNEL, order);
436 if (!pages) 444 if (!pages)
437 break; 445 break;
438 pfn = page_to_pfn(pages); 446 pfn = page_to_pfn(pages) - IDMAP_PFN_OFFSET;
439 epfn = pfn + count; 447 epfn = pfn + count;
440 addr = pfn << PAGE_SHIFT; 448 addr = pfn << PAGE_SHIFT;
441 eaddr = epfn << PAGE_SHIFT; 449 eaddr = epfn << PAGE_SHIFT;
@@ -520,7 +528,8 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
520 } 528 }
521 /* If I don't overlap any segments I have found my hole! */ 529 /* If I don't overlap any segments I have found my hole! */
522 if (i == image->nr_segments) { 530 if (i == image->nr_segments) {
523 pages = pfn_to_page(hole_start >> PAGE_SHIFT); 531 pages = pfn_to_page(hole_start >> PAGE_SHIFT) +
532 IDMAP_PFN_OFFSET;
524 break; 533 break;
525 } 534 }
526 } 535 }
@@ -550,6 +559,8 @@ struct page *kimage_alloc_control_pages(struct kimage *image,
550 559
551static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 560static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
552{ 561{
562 phys_addr_t pa_tmp;
563
553 if (*image->entry != 0) 564 if (*image->entry != 0)
554 image->entry++; 565 image->entry++;
555 566
@@ -562,7 +573,8 @@ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
562 return -ENOMEM; 573 return -ENOMEM;
563 574
564 ind_page = page_address(page); 575 ind_page = page_address(page);
565 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; 576 pa_tmp = virt_to_phys(ind_page) - IDMAP_ADDR_OFFSET;
577 *image->entry = (unsigned int)pa_tmp | IND_INDIRECTION;
566 image->entry = ind_page; 578 image->entry = ind_page;
567 image->last_entry = ind_page + 579 image->last_entry = ind_page +
568 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 580 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
@@ -627,7 +639,7 @@ static void kimage_free_entry(kimage_entry_t entry)
627{ 639{
628 struct page *page; 640 struct page *page;
629 641
630 page = pfn_to_page(entry >> PAGE_SHIFT); 642 page = pfn_to_page(entry >> PAGE_SHIFT) + IDMAP_PFN_OFFSET;
631 kimage_free_pages(page); 643 kimage_free_pages(page);
632} 644}
633 645
@@ -714,7 +726,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
714 * have a match. 726 * have a match.
715 */ 727 */
716 list_for_each_entry(page, &image->dest_pages, lru) { 728 list_for_each_entry(page, &image->dest_pages, lru) {
717 addr = page_to_pfn(page) << PAGE_SHIFT; 729 addr = (page_to_pfn(page) - IDMAP_PFN_OFFSET) << PAGE_SHIFT;
718 if (addr == destination) { 730 if (addr == destination) {
719 list_del(&page->lru); 731 list_del(&page->lru);
720 return page; 732 return page;
@@ -729,12 +741,12 @@ static struct page *kimage_alloc_page(struct kimage *image,
729 if (!page) 741 if (!page)
730 return NULL; 742 return NULL;
731 /* If the page cannot be used file it away */ 743 /* If the page cannot be used file it away */
732 if (page_to_pfn(page) > 744 if ((page_to_pfn(page) - IDMAP_PFN_OFFSET) >
733 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 745 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
734 list_add(&page->lru, &image->unuseable_pages); 746 list_add(&page->lru, &image->unuseable_pages);
735 continue; 747 continue;
736 } 748 }
737 addr = page_to_pfn(page) << PAGE_SHIFT; 749 addr = (page_to_pfn(page) - IDMAP_PFN_OFFSET) << PAGE_SHIFT;
738 750
739 /* If it is the destination page we want use it */ 751 /* If it is the destination page we want use it */
740 if (addr == destination) 752 if (addr == destination)
@@ -757,7 +769,8 @@ static struct page *kimage_alloc_page(struct kimage *image,
757 struct page *old_page; 769 struct page *old_page;
758 770
759 old_addr = *old & PAGE_MASK; 771 old_addr = *old & PAGE_MASK;
760 old_page = pfn_to_page(old_addr >> PAGE_SHIFT); 772 old_page = pfn_to_page((old_addr >> PAGE_SHIFT) +
773 IDMAP_PFN_OFFSET);
761 copy_highpage(page, old_page); 774 copy_highpage(page, old_page);
762 *old = addr | (*old & ~PAGE_MASK); 775 *old = addr | (*old & ~PAGE_MASK);
763 776
@@ -807,13 +820,12 @@ static int kimage_load_normal_segment(struct kimage *image,
807 struct page *page; 820 struct page *page;
808 char *ptr; 821 char *ptr;
809 size_t uchunk, mchunk; 822 size_t uchunk, mchunk;
810
811 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 823 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
812 if (!page) { 824 if (!page) {
813 result = -ENOMEM; 825 result = -ENOMEM;
814 goto out; 826 goto out;
815 } 827 }
816 result = kimage_add_page(image, page_to_pfn(page) 828 result = kimage_add_page(image, (page_to_pfn(page) - IDMAP_PFN_OFFSET)
817 << PAGE_SHIFT); 829 << PAGE_SHIFT);
818 if (result < 0) 830 if (result < 0)
819 goto out; 831 goto out;
@@ -863,7 +875,7 @@ static int kimage_load_crash_segment(struct kimage *image,
863 char *ptr; 875 char *ptr;
864 size_t uchunk, mchunk; 876 size_t uchunk, mchunk;
865 877
866 page = pfn_to_page(maddr >> PAGE_SHIFT); 878 page = pfn_to_page(maddr >> PAGE_SHIFT) + IDMAP_PFN_OFFSET;
867 if (!page) { 879 if (!page) {
868 result = -ENOMEM; 880 result = -ENOMEM;
869 goto out; 881 goto out;
@@ -1011,6 +1023,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
1011 if (result) 1023 if (result)
1012 goto out; 1024 goto out;
1013 } 1025 }
1026 flush_cache_all();
1014 kimage_terminate(image); 1027 kimage_terminate(image);
1015 if (flags & KEXEC_ON_CRASH) 1028 if (flags & KEXEC_ON_CRASH)
1016 crash_unmap_reserved_pages(); 1029 crash_unmap_reserved_pages();
@@ -1115,7 +1128,7 @@ void __weak crash_free_reserved_phys_range(unsigned long begin,
1115 unsigned long addr; 1128 unsigned long addr;
1116 1129
1117 for (addr = begin; addr < end; addr += PAGE_SIZE) 1130 for (addr = begin; addr < end; addr += PAGE_SIZE)
1118 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); 1131 free_reserved_page(pfn_to_page((addr >> PAGE_SHIFT) + IDMAP_PFN_OFFSET));
1119} 1132}
1120 1133
1121int crash_shrink_memory(unsigned long new_size) 1134int crash_shrink_memory(unsigned long new_size)
@@ -1558,7 +1571,7 @@ void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1558 1571
1559unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) 1572unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1560{ 1573{
1561 return __pa((unsigned long)(char *)&vmcoreinfo_note); 1574 return __pa((unsigned long)(char *)&vmcoreinfo_note) - IDMAP_ADDR_OFFSET;
1562} 1575}
1563 1576
1564static int __init crash_save_vmcoreinfo_init(void) 1577static int __init crash_save_vmcoreinfo_init(void)