aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim2014-03-12 03:26:20 -0500
committerPekka Enberg2014-03-27 07:27:34 -0500
commit80c3a9981a544b6e96debfbcca5190b727ecd09e (patch)
tree90e7b058d1378dfe049ae103e4e43139eed3d719 /mm
parent5087c8229986cc502c807a15f8ea416b0ef22346 (diff)
downloadlinux-phy-80c3a9981a544b6e96debfbcca5190b727ecd09e.tar.gz
linux-phy-80c3a9981a544b6e96debfbcca5190b727ecd09e.tar.xz
linux-phy-80c3a9981a544b6e96debfbcca5190b727ecd09e.zip
slub: fix high order page allocation problem with __GFP_NOFAIL
SLUB already try to allocate high order page with clearing __GFP_NOFAIL. But, when allocating shadow page for kmemcheck, it missed clearing the flag. This trigger WARN_ON_ONCE() reported by Christian Casteyde. https://bugzilla.kernel.org/show_bug.cgi?id=65991 https://lkml.org/lkml/2013/12/3/764 This patch fix this situation by using same allocation flag as original allocation. Reported-by: Christian Casteyde <casteyde.christian@free.fr> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7e3e0458bce4..591bf985aed0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1350,11 +1350,12 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1350 page = alloc_slab_page(alloc_gfp, node, oo); 1350 page = alloc_slab_page(alloc_gfp, node, oo);
1351 if (unlikely(!page)) { 1351 if (unlikely(!page)) {
1352 oo = s->min; 1352 oo = s->min;
1353 alloc_gfp = flags;
1353 /* 1354 /*
1354 * Allocation may have failed due to fragmentation. 1355 * Allocation may have failed due to fragmentation.
1355 * Try a lower order alloc if possible 1356 * Try a lower order alloc if possible
1356 */ 1357 */
1357 page = alloc_slab_page(flags, node, oo); 1358 page = alloc_slab_page(alloc_gfp, node, oo);
1358 1359
1359 if (page) 1360 if (page)
1360 stat(s, ORDER_FALLBACK); 1361 stat(s, ORDER_FALLBACK);
@@ -1364,7 +1365,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1364 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { 1365 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1365 int pages = 1 << oo_order(oo); 1366 int pages = 1 << oo_order(oo);
1366 1367
1367 kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); 1368 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
1368 1369
1369 /* 1370 /*
1370 * Objects from caches that have a constructor don't get 1371 * Objects from caches that have a constructor don't get