aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMichel Lespinasse2013-02-27 19:02:44 -0600
committerLinus Torvalds2013-02-27 21:10:09 -0600
commitff6a6da60b894d008f704fbeb5bc596f9994b16e (patch)
tree84c0fd2850edcd836afee8f9c542d4d4d98602f4 /mm
parentc5a51053cf3b499ddba60a89ab067ea05ad15840 (diff)
downloadam43-linux-kernel-ff6a6da60b894d008f704fbeb5bc596f9994b16e.tar.gz
am43-linux-kernel-ff6a6da60b894d008f704fbeb5bc596f9994b16e.tar.xz
am43-linux-kernel-ff6a6da60b894d008f704fbeb5bc596f9994b16e.zip
mm: accelerate munlock() treatment of THP pages
munlock_vma_pages_range() was always incrementing addresses by PAGE_SIZE at a time. When munlocking THP pages (or the huge zero page), this resulted in taking the mm->page_table_lock 512 times in a row. We can do better by making use of the page_mask returned by follow_page_mask (for the huge zero page case), or the size of the page munlock_vma_page() operated on (for the true THP page case). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h2
-rw-r--r--mm/mlock.c34
2 files changed, 24 insertions, 12 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 1c0c4cc0fcf..8562de0a519 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -195,7 +195,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
195 * must be called with vma's mmap_sem held for read or write, and page locked. 195 * must be called with vma's mmap_sem held for read or write, and page locked.
196 */ 196 */
197extern void mlock_vma_page(struct page *page); 197extern void mlock_vma_page(struct page *page);
198extern void munlock_vma_page(struct page *page); 198extern unsigned int munlock_vma_page(struct page *page);
199 199
200/* 200/*
201 * Clear the page's PageMlocked(). This can be useful in a situation where 201 * Clear the page's PageMlocked(). This can be useful in a situation where
diff --git a/mm/mlock.c b/mm/mlock.c
index e6638f565d4..1c5e33fce63 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -102,13 +102,16 @@ void mlock_vma_page(struct page *page)
102 * can't isolate the page, we leave it for putback_lru_page() and vmscan 102 * can't isolate the page, we leave it for putback_lru_page() and vmscan
103 * [page_referenced()/try_to_unmap()] to deal with. 103 * [page_referenced()/try_to_unmap()] to deal with.
104 */ 104 */
105void munlock_vma_page(struct page *page) 105unsigned int munlock_vma_page(struct page *page)
106{ 106{
107 unsigned int page_mask = 0;
108
107 BUG_ON(!PageLocked(page)); 109 BUG_ON(!PageLocked(page));
108 110
109 if (TestClearPageMlocked(page)) { 111 if (TestClearPageMlocked(page)) {
110 mod_zone_page_state(page_zone(page), NR_MLOCK, 112 unsigned int nr_pages = hpage_nr_pages(page);
111 -hpage_nr_pages(page)); 113 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
114 page_mask = nr_pages - 1;
112 if (!isolate_lru_page(page)) { 115 if (!isolate_lru_page(page)) {
113 int ret = SWAP_AGAIN; 116 int ret = SWAP_AGAIN;
114 117
@@ -141,6 +144,8 @@ void munlock_vma_page(struct page *page)
141 count_vm_event(UNEVICTABLE_PGMUNLOCKED); 144 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
142 } 145 }
143 } 146 }
147
148 return page_mask;
144} 149}
145 150
146/** 151/**
@@ -159,7 +164,6 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
159 unsigned long start, unsigned long end, int *nonblocking) 164 unsigned long start, unsigned long end, int *nonblocking)
160{ 165{
161 struct mm_struct *mm = vma->vm_mm; 166 struct mm_struct *mm = vma->vm_mm;
162 unsigned long addr = start;
163 unsigned long nr_pages = (end - start) / PAGE_SIZE; 167 unsigned long nr_pages = (end - start) / PAGE_SIZE;
164 int gup_flags; 168 int gup_flags;
165 169
@@ -189,7 +193,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
189 * We made sure addr is within a VMA, so the following will 193 * We made sure addr is within a VMA, so the following will
190 * not result in a stack expansion that recurses back here. 194 * not result in a stack expansion that recurses back here.
191 */ 195 */
192 return __get_user_pages(current, mm, addr, nr_pages, gup_flags, 196 return __get_user_pages(current, mm, start, nr_pages, gup_flags,
193 NULL, NULL, nonblocking); 197 NULL, NULL, nonblocking);
194} 198}
195 199
@@ -226,13 +230,12 @@ static int __mlock_posix_error_return(long retval)
226void munlock_vma_pages_range(struct vm_area_struct *vma, 230void munlock_vma_pages_range(struct vm_area_struct *vma,
227 unsigned long start, unsigned long end) 231 unsigned long start, unsigned long end)
228{ 232{
229 unsigned long addr;
230
231 lru_add_drain();
232 vma->vm_flags &= ~VM_LOCKED; 233 vma->vm_flags &= ~VM_LOCKED;
233 234
234 for (addr = start; addr < end; addr += PAGE_SIZE) { 235 while (start < end) {
235 struct page *page; 236 struct page *page;
237 unsigned int page_mask, page_increm;
238
236 /* 239 /*
237 * Although FOLL_DUMP is intended for get_dump_page(), 240 * Although FOLL_DUMP is intended for get_dump_page(),
238 * it just so happens that its special treatment of the 241 * it just so happens that its special treatment of the
@@ -240,13 +243,22 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
240 * suits munlock very well (and if somehow an abnormal page 243 * suits munlock very well (and if somehow an abnormal page
241 * has sneaked into the range, we won't oops here: great). 244 * has sneaked into the range, we won't oops here: great).
242 */ 245 */
243 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); 246 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
247 &page_mask);
244 if (page && !IS_ERR(page)) { 248 if (page && !IS_ERR(page)) {
245 lock_page(page); 249 lock_page(page);
246 munlock_vma_page(page); 250 lru_add_drain();
251 /*
252 * Any THP page found by follow_page_mask() may have
253 * gotten split before reaching munlock_vma_page(),
254 * so we need to recompute the page_mask here.
255 */
256 page_mask = munlock_vma_page(page);
247 unlock_page(page); 257 unlock_page(page);
248 put_page(page); 258 put_page(page);
249 } 259 }
260 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
261 start += page_increm * PAGE_SIZE;
250 cond_resched(); 262 cond_resched();
251 } 263 }
252} 264}