aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAl Viro2012-12-20 17:49:14 -0600
committerAl Viro2012-12-20 17:49:14 -0600
commit21e89c0c48bb799beb09181740796fc80c9676e2 (patch)
treebd5aef34a980f189ad41c75e881d225bc854bf44 /mm
parentb911a6bdeef5848c468597d040e3407e0aee04ce (diff)
parent91c7fbbf63f33c77d8d28de624834a21888842bb (diff)
downloadkernel-common-21e89c0c48bb799beb09181740796fc80c9676e2.tar.gz
kernel-common-21e89c0c48bb799beb09181740796fc80c9676e2.tar.xz
kernel-common-21e89c0c48bb799beb09181740796fc80c9676e2.zip
Merge branch 'fscache' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs into for-linus
Diffstat (limited to 'mm')
-rw-r--r--mm/highmem.c1
-rw-r--r--mm/ksm.c16
-rw-r--r--mm/vmscan.c12
3 files changed, 15 insertions, 14 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index d999077431d..b32b70cdaed 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -105,6 +105,7 @@ struct page *kmap_to_page(void *vaddr)
105 105
106 return virt_to_page(addr); 106 return virt_to_page(addr);
107} 107}
108EXPORT_SYMBOL(kmap_to_page);
108 109
109static void flush_all_zero_pkmaps(void) 110static void flush_all_zero_pkmaps(void)
110{ 111{
diff --git a/mm/ksm.c b/mm/ksm.c
index 82dfb4b5432..51573858938 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1624,7 +1624,7 @@ again:
1624 struct anon_vma_chain *vmac; 1624 struct anon_vma_chain *vmac;
1625 struct vm_area_struct *vma; 1625 struct vm_area_struct *vma;
1626 1626
1627 anon_vma_lock_write(anon_vma); 1627 anon_vma_lock_read(anon_vma);
1628 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 1628 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
1629 0, ULONG_MAX) { 1629 0, ULONG_MAX) {
1630 vma = vmac->vma; 1630 vma = vmac->vma;
@@ -1648,7 +1648,7 @@ again:
1648 if (!search_new_forks || !mapcount) 1648 if (!search_new_forks || !mapcount)
1649 break; 1649 break;
1650 } 1650 }
1651 anon_vma_unlock(anon_vma); 1651 anon_vma_unlock_read(anon_vma);
1652 if (!mapcount) 1652 if (!mapcount)
1653 goto out; 1653 goto out;
1654 } 1654 }
@@ -1678,7 +1678,7 @@ again:
1678 struct anon_vma_chain *vmac; 1678 struct anon_vma_chain *vmac;
1679 struct vm_area_struct *vma; 1679 struct vm_area_struct *vma;
1680 1680
1681 anon_vma_lock_write(anon_vma); 1681 anon_vma_lock_read(anon_vma);
1682 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 1682 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
1683 0, ULONG_MAX) { 1683 0, ULONG_MAX) {
1684 vma = vmac->vma; 1684 vma = vmac->vma;
@@ -1697,11 +1697,11 @@ again:
1697 ret = try_to_unmap_one(page, vma, 1697 ret = try_to_unmap_one(page, vma,
1698 rmap_item->address, flags); 1698 rmap_item->address, flags);
1699 if (ret != SWAP_AGAIN || !page_mapped(page)) { 1699 if (ret != SWAP_AGAIN || !page_mapped(page)) {
1700 anon_vma_unlock(anon_vma); 1700 anon_vma_unlock_read(anon_vma);
1701 goto out; 1701 goto out;
1702 } 1702 }
1703 } 1703 }
1704 anon_vma_unlock(anon_vma); 1704 anon_vma_unlock_read(anon_vma);
1705 } 1705 }
1706 if (!search_new_forks++) 1706 if (!search_new_forks++)
1707 goto again; 1707 goto again;
@@ -1731,7 +1731,7 @@ again:
1731 struct anon_vma_chain *vmac; 1731 struct anon_vma_chain *vmac;
1732 struct vm_area_struct *vma; 1732 struct vm_area_struct *vma;
1733 1733
1734 anon_vma_lock_write(anon_vma); 1734 anon_vma_lock_read(anon_vma);
1735 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 1735 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
1736 0, ULONG_MAX) { 1736 0, ULONG_MAX) {
1737 vma = vmac->vma; 1737 vma = vmac->vma;
@@ -1749,11 +1749,11 @@ again:
1749 1749
1750 ret = rmap_one(page, vma, rmap_item->address, arg); 1750 ret = rmap_one(page, vma, rmap_item->address, arg);
1751 if (ret != SWAP_AGAIN) { 1751 if (ret != SWAP_AGAIN) {
1752 anon_vma_unlock(anon_vma); 1752 anon_vma_unlock_read(anon_vma);
1753 goto out; 1753 goto out;
1754 } 1754 }
1755 } 1755 }
1756 anon_vma_unlock(anon_vma); 1756 anon_vma_unlock_read(anon_vma);
1757 } 1757 }
1758 if (!search_new_forks++) 1758 if (!search_new_forks++)
1759 goto again; 1759 goto again;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 828530e2794..adc7e905818 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2570,7 +2570,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2570static unsigned long balance_pgdat(pg_data_t *pgdat, int order, 2570static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2571 int *classzone_idx) 2571 int *classzone_idx)
2572{ 2572{
2573 int all_zones_ok; 2573 struct zone *unbalanced_zone;
2574 unsigned long balanced; 2574 unsigned long balanced;
2575 int i; 2575 int i;
2576 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2576 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
@@ -2604,7 +2604,7 @@ loop_again:
2604 unsigned long lru_pages = 0; 2604 unsigned long lru_pages = 0;
2605 int has_under_min_watermark_zone = 0; 2605 int has_under_min_watermark_zone = 0;
2606 2606
2607 all_zones_ok = 1; 2607 unbalanced_zone = NULL;
2608 balanced = 0; 2608 balanced = 0;
2609 2609
2610 /* 2610 /*
@@ -2743,7 +2743,7 @@ loop_again:
2743 } 2743 }
2744 2744
2745 if (!zone_balanced(zone, testorder, 0, end_zone)) { 2745 if (!zone_balanced(zone, testorder, 0, end_zone)) {
2746 all_zones_ok = 0; 2746 unbalanced_zone = zone;
2747 /* 2747 /*
2748 * We are still under min water mark. This 2748 * We are still under min water mark. This
2749 * means that we have a GFP_ATOMIC allocation 2749 * means that we have a GFP_ATOMIC allocation
@@ -2776,7 +2776,7 @@ loop_again:
2776 pfmemalloc_watermark_ok(pgdat)) 2776 pfmemalloc_watermark_ok(pgdat))
2777 wake_up(&pgdat->pfmemalloc_wait); 2777 wake_up(&pgdat->pfmemalloc_wait);
2778 2778
2779 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx))) 2779 if (!unbalanced_zone || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
2780 break; /* kswapd: all done */ 2780 break; /* kswapd: all done */
2781 /* 2781 /*
2782 * OK, kswapd is getting into trouble. Take a nap, then take 2782 * OK, kswapd is getting into trouble. Take a nap, then take
@@ -2786,7 +2786,7 @@ loop_again:
2786 if (has_under_min_watermark_zone) 2786 if (has_under_min_watermark_zone)
2787 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2787 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2788 else 2788 else
2789 congestion_wait(BLK_RW_ASYNC, HZ/10); 2789 wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10);
2790 } 2790 }
2791 2791
2792 /* 2792 /*
@@ -2805,7 +2805,7 @@ out:
2805 * high-order: Balanced zones must make up at least 25% of the node 2805 * high-order: Balanced zones must make up at least 25% of the node
2806 * for the node to be balanced 2806 * for the node to be balanced
2807 */ 2807 */
2808 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) { 2808 if (unbalanced_zone && (!order || !pgdat_balanced(pgdat, balanced, *classzone_idx))) {
2809 cond_resched(); 2809 cond_resched();
2810 2810
2811 try_to_freeze(); 2811 try_to_freeze();