aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorMichel Lespinasse2013-02-21 17:10:28 -0600
committerTony Luck2013-02-22 15:46:59 -0600
commitf53f232504253a5e9f8f19ac1aa1b9161d7e6d2a (patch)
treeb15d7f891d7b4a68006feda62b63309e4f3177de /arch/ia64
parent19f949f52599ba7c3f67a5897ac6be14bfcb1200 (diff)
downloadam43-linux-kernel-f53f232504253a5e9f8f19ac1aa1b9161d7e6d2a.tar.gz
am43-linux-kernel-f53f232504253a5e9f8f19ac1aa1b9161d7e6d2a.tar.xz
am43-linux-kernel-f53f232504253a5e9f8f19ac1aa1b9161d7e6d2a.zip
mm: use vm_unmapped_area() on ia64 architecture
Update the ia64 arch_get_unmapped_area function to make use of vm_unmapped_area() instead of implementing a brute force search. Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/sys_ia64.c37
1 files changed, 12 insertions, 25 deletions
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index d9439ef2f66..41e33f84c18 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
25 unsigned long pgoff, unsigned long flags) 25 unsigned long pgoff, unsigned long flags)
26{ 26{
27 long map_shared = (flags & MAP_SHARED); 27 long map_shared = (flags & MAP_SHARED);
28 unsigned long start_addr, align_mask = PAGE_SIZE - 1; 28 unsigned long align_mask = 0;
29 struct mm_struct *mm = current->mm; 29 struct mm_struct *mm = current->mm;
30 struct vm_area_struct *vma; 30 struct vm_unmapped_area_info info;
31 31
32 if (len > RGN_MAP_LIMIT) 32 if (len > RGN_MAP_LIMIT)
33 return -ENOMEM; 33 return -ENOMEM;
@@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
44 addr = 0; 44 addr = 0;
45#endif 45#endif
46 if (!addr) 46 if (!addr)
47 addr = mm->free_area_cache; 47 addr = TASK_UNMAPPED_BASE;
48 48
49 if (map_shared && (TASK_SIZE > 0xfffffffful)) 49 if (map_shared && (TASK_SIZE > 0xfffffffful))
50 /* 50 /*
@@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
53 * tasks, we prefer to avoid exhausting the address space too quickly by 53 * tasks, we prefer to avoid exhausting the address space too quickly by
54 * limiting alignment to a single page. 54 * limiting alignment to a single page.
55 */ 55 */
56 align_mask = SHMLBA - 1; 56 align_mask = PAGE_MASK & (SHMLBA - 1);
57 57
58 full_search: 58 info.flags = 0;
59 start_addr = addr = (addr + align_mask) & ~align_mask; 59 info.length = len;
60 60 info.low_limit = addr;
61 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 61 info.high_limit = TASK_SIZE;
62 /* At this point: (!vma || addr < vma->vm_end). */ 62 info.align_mask = align_mask;
63 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { 63 info.align_offset = 0;
64 if (start_addr != TASK_UNMAPPED_BASE) { 64 return vm_unmapped_area(&info);
65 /* Start a new search --- just in case we missed some holes. */
66 addr = TASK_UNMAPPED_BASE;
67 goto full_search;
68 }
69 return -ENOMEM;
70 }
71 if (!vma || addr + len <= vma->vm_start) {
72 /* Remember the address where we stopped this search: */
73 mm->free_area_cache = addr + len;
74 return addr;
75 }
76 addr = (vma->vm_end + align_mask) & ~align_mask;
77 }
78} 65}
79 66
80asmlinkage long 67asmlinkage long