aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka2014-08-28 13:35:07 -0500
committerJiri Slaby2014-09-26 04:52:00 -0500
commit71a5b801344ae1f03e2fb6ddad600c554f243257 (patch)
treee3e2d28f2cddf664165674e8b40c586b887f2695
parent5e4084a627820bd6a8d94bca6acb878e5308716c (diff)
downloadti-linux-kernel-71a5b801344ae1f03e2fb6ddad600c554f243257.tar.gz
ti-linux-kernel-71a5b801344ae1f03e2fb6ddad600c554f243257.tar.xz
ti-linux-kernel-71a5b801344ae1f03e2fb6ddad600c554f243257.zip
mm/compaction: avoid rescanning pageblocks in isolate_freepages
commit e9ade569910a82614ff5f2c2cea2b65a8d785da4 upstream. The compaction free scanner in isolate_freepages() currently remembers PFN of the highest pageblock where it successfully isolates, to be used as the starting pageblock for the next invocation. The rationale behind this is that page migration might return free pages to the allocator when migration fails and we don't want to skip them if the compaction continues. Since migration now returns free pages back to compaction code where they can be reused, this is no longer a concern. This patch changes isolate_freepages() so that the PFN for restarting is updated with each pageblock where isolation is attempted. Using stress-highalloc from mmtests, this resulted in 10% reduction of the pages scanned by the free scanner. Note that the somewhat similar functionality that records highest successful pageblock in zone->compact_cached_free_pfn, remains unchanged. This cache is used when the whole compaction is restarted, not for multiple invocations of the free scanner during single compaction. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Jiri Slaby <jslaby@suse.cz>
-rw-r--r--mm/compaction.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 88b33e6af1ce..44aa2d4172b5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -686,7 +686,6 @@ static void isolate_freepages(struct zone *zone,
686 unsigned long block_start_pfn; /* start of current pageblock */ 686 unsigned long block_start_pfn; /* start of current pageblock */
687 unsigned long block_end_pfn; /* end of current pageblock */ 687 unsigned long block_end_pfn; /* end of current pageblock */
688 unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 688 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
689 unsigned long next_free_pfn; /* start pfn for scaning at next round */
690 int nr_freepages = cc->nr_freepages; 689 int nr_freepages = cc->nr_freepages;
691 struct list_head *freelist = &cc->freepages; 690 struct list_head *freelist = &cc->freepages;
692 691
@@ -707,12 +706,6 @@ static void isolate_freepages(struct zone *zone,
707 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 706 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
708 707
709 /* 708 /*
710 * If no pages are isolated, the block_start_pfn < low_pfn check
711 * will kick in.
712 */
713 next_free_pfn = 0;
714
715 /*
716 * Isolate free pages until enough are available to migrate the 709 * Isolate free pages until enough are available to migrate the
717 * pages on cc->migratepages. We stop searching if the migrate 710 * pages on cc->migratepages. We stop searching if the migrate
718 * and free page scanners meet or enough free pages are isolated. 711 * and free page scanners meet or enough free pages are isolated.
@@ -752,19 +745,19 @@ static void isolate_freepages(struct zone *zone,
752 continue; 745 continue;
753 746
754 /* Found a block suitable for isolating free pages from */ 747 /* Found a block suitable for isolating free pages from */
748 cc->free_pfn = block_start_pfn;
755 isolated = isolate_freepages_block(cc, block_start_pfn, 749 isolated = isolate_freepages_block(cc, block_start_pfn,
756 block_end_pfn, freelist, false); 750 block_end_pfn, freelist, false);
757 nr_freepages += isolated; 751 nr_freepages += isolated;
758 752
759 /* 753 /*
760 * Record the highest PFN we isolated pages from. When next 754 * Set a flag that we successfully isolated in this pageblock.
761 * looking for free pages, the search will restart here as 755 * In the next loop iteration, zone->compact_cached_free_pfn
762 * page migration may have returned some pages to the allocator 756 * will not be updated and thus it will effectively contain the
757 * highest pageblock we isolated pages from.
763 */ 758 */
764 if (isolated && next_free_pfn == 0) { 759 if (isolated)
765 cc->finished_update_free = true; 760 cc->finished_update_free = true;
766 next_free_pfn = block_start_pfn;
767 }
768 } 761 }
769 762
770 /* split_free_page does not map the pages */ 763 /* split_free_page does not map the pages */
@@ -775,9 +768,8 @@ static void isolate_freepages(struct zone *zone,
775 * so that compact_finished() may detect this 768 * so that compact_finished() may detect this
776 */ 769 */
777 if (block_start_pfn < low_pfn) 770 if (block_start_pfn < low_pfn)
778 next_free_pfn = cc->migrate_pfn; 771 cc->free_pfn = cc->migrate_pfn;
779 772
780 cc->free_pfn = next_free_pfn;
781 cc->nr_freepages = nr_freepages; 773 cc->nr_freepages = nr_freepages;
782} 774}
783 775