aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds2012-12-20 22:00:43 -0600
committerLinus Torvalds2012-12-20 22:00:43 -0600
commit4c9a44aebeaef35570a67aed17b72a2cf8d0b219 (patch)
treeabb874fe7f50671627b282f6c7fb58db5e75a2e3 /mm
parent1f0377ff088ed2971c57debc9b0c3b846ec431fd (diff)
parentcfde819088422503b5c69e03ab7bb90f87121d4d (diff)
downloadkernel-common-4c9a44aebeaef35570a67aed17b72a2cf8d0b219.tar.gz
kernel-common-4c9a44aebeaef35570a67aed17b72a2cf8d0b219.tar.xz
kernel-common-4c9a44aebeaef35570a67aed17b72a2cf8d0b219.zip
Merge branch 'akpm' (Andrew's patch-bomb)
Merge the rest of Andrew's patches for -rc1: "A bunch of fixes and misc missed-out-on things. That'll do for -rc1. I still have a batch of IPC patches which still have a possible bug report which I'm chasing down." * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (25 commits) keys: use keyring_alloc() to create module signing keyring keys: fix unreachable code sendfile: allows bypassing of notifier events SGI-XP: handle non-fatal traps fat: fix incorrect function comment Documentation: ABI: remove testing/sysfs-devices-node proc: fix inconsistent lock state linux/kernel.h: fix DIV_ROUND_CLOSEST with unsigned divisors memcg: don't register hotcpu notifier from ->css_alloc() checkpatch: warn on uapi #includes that #include <uapi/... revert "rtc: recycle id when unloading a rtc driver" mm: clean up transparent hugepage sysfs error messages hfsplus: add error message for the case of failure of sync fs in delayed_sync_fs() method hfsplus: rework processing of hfs_btree_write() returned error hfsplus: rework processing errors in hfsplus_free_extents() hfsplus: avoid crash on failed block map free kcmp: include linux/ptrace.h drivers/rtc/rtc-imxdi.c: must include <linux/spinlock.h> mm: cma: WARN if freed memory is still in use exec: do not leave bprm->interp on stack ...
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c26
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/page-writeback.c25
-rw-r--r--mm/page_alloc.c11
5 files changed, 65 insertions, 17 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 5ad7f4f4d6f..6b807e46649 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -17,6 +17,21 @@
17#include <linux/balloon_compaction.h> 17#include <linux/balloon_compaction.h>
18#include "internal.h" 18#include "internal.h"
19 19
20#ifdef CONFIG_COMPACTION
21static inline void count_compact_event(enum vm_event_item item)
22{
23 count_vm_event(item);
24}
25
26static inline void count_compact_events(enum vm_event_item item, long delta)
27{
28 count_vm_events(item, delta);
29}
30#else
31#define count_compact_event(item) do { } while (0)
32#define count_compact_events(item, delta) do { } while (0)
33#endif
34
20#if defined CONFIG_COMPACTION || defined CONFIG_CMA 35#if defined CONFIG_COMPACTION || defined CONFIG_CMA
21 36
22#define CREATE_TRACE_POINTS 37#define CREATE_TRACE_POINTS
@@ -303,10 +318,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
303 if (blockpfn == end_pfn) 318 if (blockpfn == end_pfn)
304 update_pageblock_skip(cc, valid_page, total_isolated, false); 319 update_pageblock_skip(cc, valid_page, total_isolated, false);
305 320
306 count_vm_events(COMPACTFREE_SCANNED, nr_scanned); 321 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
307 if (total_isolated) 322 if (total_isolated)
308 count_vm_events(COMPACTISOLATED, total_isolated); 323 count_compact_events(COMPACTISOLATED, total_isolated);
309
310 return total_isolated; 324 return total_isolated;
311} 325}
312 326
@@ -613,9 +627,9 @@ next_pageblock:
613 627
614 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 628 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
615 629
616 count_vm_events(COMPACTMIGRATE_SCANNED, nr_scanned); 630 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
617 if (nr_isolated) 631 if (nr_isolated)
618 count_vm_events(COMPACTISOLATED, nr_isolated); 632 count_compact_events(COMPACTISOLATED, nr_isolated);
619 633
620 return low_pfn; 634 return low_pfn;
621} 635}
@@ -1110,7 +1124,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
1110 if (!order || !may_enter_fs || !may_perform_io) 1124 if (!order || !may_enter_fs || !may_perform_io)
1111 return rc; 1125 return rc;
1112 1126
1113 count_vm_event(COMPACTSTALL); 1127 count_compact_event(COMPACTSTALL);
1114 1128
1115#ifdef CONFIG_CMA 1129#ifdef CONFIG_CMA
1116 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1130 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 32754eece63..9e894edc781 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -574,19 +574,19 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
574 574
575 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); 575 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
576 if (unlikely(!*hugepage_kobj)) { 576 if (unlikely(!*hugepage_kobj)) {
577 printk(KERN_ERR "hugepage: failed kobject create\n"); 577 printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
578 return -ENOMEM; 578 return -ENOMEM;
579 } 579 }
580 580
581 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); 581 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
582 if (err) { 582 if (err) {
583 printk(KERN_ERR "hugepage: failed register hugeage group\n"); 583 printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
584 goto delete_obj; 584 goto delete_obj;
585 } 585 }
586 586
587 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); 587 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
588 if (err) { 588 if (err) {
589 printk(KERN_ERR "hugepage: failed register hugeage group\n"); 589 printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
590 goto remove_hp_group; 590 goto remove_hp_group;
591 } 591 }
592 592
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f3009b4bae5..09255ec8159 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6090,7 +6090,6 @@ mem_cgroup_css_alloc(struct cgroup *cont)
6090 &per_cpu(memcg_stock, cpu); 6090 &per_cpu(memcg_stock, cpu);
6091 INIT_WORK(&stock->work, drain_local_stock); 6091 INIT_WORK(&stock->work, drain_local_stock);
6092 } 6092 }
6093 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6094 } else { 6093 } else {
6095 parent = mem_cgroup_from_cont(cont->parent); 6094 parent = mem_cgroup_from_cont(cont->parent);
6096 memcg->use_hierarchy = parent->use_hierarchy; 6095 memcg->use_hierarchy = parent->use_hierarchy;
@@ -6756,6 +6755,19 @@ struct cgroup_subsys mem_cgroup_subsys = {
6756 .use_id = 1, 6755 .use_id = 1,
6757}; 6756};
6758 6757
6758/*
6759 * The rest of init is performed during ->css_alloc() for root css which
6760 * happens before initcalls. hotcpu_notifier() can't be done together as
6761 * it would introduce circular locking by adding cgroup_lock -> cpu hotplug
6762 * dependency. Do it from a subsys_initcall().
6763 */
6764static int __init mem_cgroup_init(void)
6765{
6766 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6767 return 0;
6768}
6769subsys_initcall(mem_cgroup_init);
6770
6759#ifdef CONFIG_MEMCG_SWAP 6771#ifdef CONFIG_MEMCG_SWAP
6760static int __init enable_swap_account(char *s) 6772static int __init enable_swap_account(char *s)
6761{ 6773{
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6f427122449..0713bfbf095 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
201 zone_reclaimable_pages(z) - z->dirty_balance_reserve; 201 zone_reclaimable_pages(z) - z->dirty_balance_reserve;
202 } 202 }
203 /* 203 /*
204 * Unreclaimable memory (kernel memory or anonymous memory
205 * without swap) can bring down the dirtyable pages below
206 * the zone's dirty balance reserve and the above calculation
207 * will underflow. However we still want to add in nodes
208 * which are below threshold (negative values) to get a more
209 * accurate calculation but make sure that the total never
210 * underflows.
211 */
212 if ((long)x < 0)
213 x = 0;
214
215 /*
204 * Make sure that the number of highmem pages is never larger 216 * Make sure that the number of highmem pages is never larger
205 * than the number of the total dirtyable memory. This can only 217 * than the number of the total dirtyable memory. This can only
206 * occur in very strange VM situations but we want to make sure 218 * occur in very strange VM situations but we want to make sure
@@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void)
222{ 234{
223 unsigned long x; 235 unsigned long x;
224 236
225 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - 237 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
226 dirty_balance_reserve; 238 x -= min(x, dirty_balance_reserve);
227 239
228 if (!vm_highmem_is_dirtyable) 240 if (!vm_highmem_is_dirtyable)
229 x -= highmem_dirtyable_memory(x); 241 x -= highmem_dirtyable_memory(x);
@@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
290 * highmem zone can hold its share of dirty pages, so we don't 302 * highmem zone can hold its share of dirty pages, so we don't
291 * care about vm_highmem_is_dirtyable here. 303 * care about vm_highmem_is_dirtyable here.
292 */ 304 */
293 return zone_page_state(zone, NR_FREE_PAGES) + 305 unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
294 zone_reclaimable_pages(zone) - 306 zone_reclaimable_pages(zone);
295 zone->dirty_balance_reserve; 307
308 /* don't allow this to underflow */
309 nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
310 return nr_pages;
296} 311}
297 312
298/** 313/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2ad2ad168ef..4ba5e37127f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5978,8 +5978,15 @@ done:
5978 5978
5979void free_contig_range(unsigned long pfn, unsigned nr_pages) 5979void free_contig_range(unsigned long pfn, unsigned nr_pages)
5980{ 5980{
5981 for (; nr_pages--; ++pfn) 5981 unsigned int count = 0;
5982 __free_page(pfn_to_page(pfn)); 5982
5983 for (; nr_pages--; pfn++) {
5984 struct page *page = pfn_to_page(pfn);
5985
5986 count += page_count(page) != 1;
5987 __free_page(page);
5988 }
5989 WARN(count != 0, "%d pages are still in use!\n", count);
5983} 5990}
5984#endif 5991#endif
5985 5992