aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c98
1 files changed, 51 insertions, 47 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 24a615d42d74..462938fc7cb9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1670,6 +1670,14 @@ static void kmem_rcu_free(struct rcu_head *head)
1670} 1670}
1671 1671
1672#if DEBUG 1672#if DEBUG
1673static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1674{
1675 if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1676 (cachep->size % PAGE_SIZE) == 0)
1677 return true;
1678
1679 return false;
1680}
1673 1681
1674#ifdef CONFIG_DEBUG_PAGEALLOC 1682#ifdef CONFIG_DEBUG_PAGEALLOC
1675static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1683static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
@@ -1703,6 +1711,23 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1703 } 1711 }
1704 *addr++ = 0x87654321; 1712 *addr++ = 0x87654321;
1705} 1713}
1714
1715static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1716 int map, unsigned long caller)
1717{
1718 if (!is_debug_pagealloc_cache(cachep))
1719 return;
1720
1721 if (caller)
1722 store_stackinfo(cachep, objp, caller);
1723
1724 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1725}
1726
1727#else
1728static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1729 int map, unsigned long caller) {}
1730
1706#endif 1731#endif
1707 1732
1708static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1733static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
@@ -1781,6 +1806,9 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1781 int size, i; 1806 int size, i;
1782 int lines = 0; 1807 int lines = 0;
1783 1808
1809 if (is_debug_pagealloc_cache(cachep))
1810 return;
1811
1784 realobj = (char *)objp + obj_offset(cachep); 1812 realobj = (char *)objp + obj_offset(cachep);
1785 size = cachep->object_size; 1813 size = cachep->object_size;
1786 1814
@@ -1846,16 +1874,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1846 void *objp = index_to_obj(cachep, page, i); 1874 void *objp = index_to_obj(cachep, page, i);
1847 1875
1848 if (cachep->flags & SLAB_POISON) { 1876 if (cachep->flags & SLAB_POISON) {
1849#ifdef CONFIG_DEBUG_PAGEALLOC
1850 if (cachep->size % PAGE_SIZE == 0 &&
1851 OFF_SLAB(cachep))
1852 kernel_map_pages(virt_to_page(objp),
1853 cachep->size / PAGE_SIZE, 1);
1854 else
1855 check_poison_obj(cachep, objp);
1856#else
1857 check_poison_obj(cachep, objp); 1877 check_poison_obj(cachep, objp);
1858#endif 1878 slab_kernel_map(cachep, objp, 1, 0);
1859 } 1879 }
1860 if (cachep->flags & SLAB_RED_ZONE) { 1880 if (cachep->flags & SLAB_RED_ZONE) {
1861 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1881 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
@@ -2179,7 +2199,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2179 else 2199 else
2180 size += BYTES_PER_WORD; 2200 size += BYTES_PER_WORD;
2181 } 2201 }
2182#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2183 /* 2202 /*
2184 * To activate debug pagealloc, off-slab management is necessary 2203 * To activate debug pagealloc, off-slab management is necessary
2185 * requirement. In early phase of initialization, small sized slab 2204 * requirement. In early phase of initialization, small sized slab
@@ -2187,14 +2206,14 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2187 * to check size >= 256. It guarantees that all necessary small 2206 * to check size >= 256. It guarantees that all necessary small
2188 * sized slab is initialized in current slab initialization sequence. 2207 * sized slab is initialized in current slab initialization sequence.
2189 */ 2208 */
2190 if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && 2209 if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2210 !slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
2191 size >= 256 && cachep->object_size > cache_line_size() && 2211 size >= 256 && cachep->object_size > cache_line_size() &&
2192 ALIGN(size, cachep->align) < PAGE_SIZE) { 2212 ALIGN(size, cachep->align) < PAGE_SIZE) {
2193 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2213 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2194 size = PAGE_SIZE; 2214 size = PAGE_SIZE;
2195 } 2215 }
2196#endif 2216#endif
2197#endif
2198 2217
2199 /* 2218 /*
2200 * Determine if the slab management is 'on' or 'off' slab. 2219 * Determine if the slab management is 'on' or 'off' slab.
@@ -2237,15 +2256,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2237 if (flags & CFLGS_OFF_SLAB) { 2256 if (flags & CFLGS_OFF_SLAB) {
2238 /* really off slab. No need for manual alignment */ 2257 /* really off slab. No need for manual alignment */
2239 freelist_size = calculate_freelist_size(cachep->num, 0); 2258 freelist_size = calculate_freelist_size(cachep->num, 0);
2240
2241#ifdef CONFIG_PAGE_POISONING
2242 /* If we're going to use the generic kernel_map_pages()
2243 * poisoning, then it's going to smash the contents of
2244 * the redzone and userword anyhow, so switch them off.
2245 */
2246 if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2247 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2248#endif
2249 } 2259 }
2250 2260
2251 cachep->colour_off = cache_line_size(); 2261 cachep->colour_off = cache_line_size();
@@ -2261,7 +2271,19 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2261 cachep->size = size; 2271 cachep->size = size;
2262 cachep->reciprocal_buffer_size = reciprocal_value(size); 2272 cachep->reciprocal_buffer_size = reciprocal_value(size);
2263 2273
2264 if (flags & CFLGS_OFF_SLAB) { 2274#if DEBUG
2275 /*
2276 * If we're going to use the generic kernel_map_pages()
2277 * poisoning, then it's going to smash the contents of
2278 * the redzone and userword anyhow, so switch them off.
2279 */
2280 if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2281 (cachep->flags & SLAB_POISON) &&
2282 is_debug_pagealloc_cache(cachep))
2283 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2284#endif
2285
2286 if (OFF_SLAB(cachep)) {
2265 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); 2287 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
2266 /* 2288 /*
2267 * This is a possibility for one of the kmalloc_{dma,}_caches. 2289 * This is a possibility for one of the kmalloc_{dma,}_caches.
@@ -2488,9 +2510,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
2488 for (i = 0; i < cachep->num; i++) { 2510 for (i = 0; i < cachep->num; i++) {
2489 void *objp = index_to_obj(cachep, page, i); 2511 void *objp = index_to_obj(cachep, page, i);
2490#if DEBUG 2512#if DEBUG
2491 /* need to poison the objs? */
2492 if (cachep->flags & SLAB_POISON)
2493 poison_obj(cachep, objp, POISON_FREE);
2494 if (cachep->flags & SLAB_STORE_USER) 2513 if (cachep->flags & SLAB_STORE_USER)
2495 *dbg_userword(cachep, objp) = NULL; 2514 *dbg_userword(cachep, objp) = NULL;
2496 2515
@@ -2514,10 +2533,11 @@ static void cache_init_objs(struct kmem_cache *cachep,
2514 slab_error(cachep, "constructor overwrote the" 2533 slab_error(cachep, "constructor overwrote the"
2515 " start of an object"); 2534 " start of an object");
2516 } 2535 }
2517 if ((cachep->size % PAGE_SIZE) == 0 && 2536 /* need to poison the objs? */
2518 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2537 if (cachep->flags & SLAB_POISON) {
2519 kernel_map_pages(virt_to_page(objp), 2538 poison_obj(cachep, objp, POISON_FREE);
2520 cachep->size / PAGE_SIZE, 0); 2539 slab_kernel_map(cachep, objp, 0, 0);
2540 }
2521#else 2541#else
2522 if (cachep->ctor) 2542 if (cachep->ctor)
2523 cachep->ctor(objp); 2543 cachep->ctor(objp);
@@ -2736,17 +2756,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2736 2756
2737 set_obj_status(page, objnr, OBJECT_FREE); 2757 set_obj_status(page, objnr, OBJECT_FREE);
2738 if (cachep->flags & SLAB_POISON) { 2758 if (cachep->flags & SLAB_POISON) {
2739#ifdef CONFIG_DEBUG_PAGEALLOC
2740 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2741 store_stackinfo(cachep, objp, caller);
2742 kernel_map_pages(virt_to_page(objp),
2743 cachep->size / PAGE_SIZE, 0);
2744 } else {
2745 poison_obj(cachep, objp, POISON_FREE);
2746 }
2747#else
2748 poison_obj(cachep, objp, POISON_FREE); 2759 poison_obj(cachep, objp, POISON_FREE);
2749#endif 2760 slab_kernel_map(cachep, objp, 0, caller);
2750 } 2761 }
2751 return objp; 2762 return objp;
2752} 2763}
@@ -2873,15 +2884,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2873 if (!objp) 2884 if (!objp)
2874 return objp; 2885 return objp;
2875 if (cachep->flags & SLAB_POISON) { 2886 if (cachep->flags & SLAB_POISON) {
2876#ifdef CONFIG_DEBUG_PAGEALLOC
2877 if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
2878 kernel_map_pages(virt_to_page(objp),
2879 cachep->size / PAGE_SIZE, 1);
2880 else
2881 check_poison_obj(cachep, objp);
2882#else
2883 check_poison_obj(cachep, objp); 2887 check_poison_obj(cachep, objp);
2884#endif 2888 slab_kernel_map(cachep, objp, 1, 0);
2885 poison_obj(cachep, objp, POISON_INUSE); 2889 poison_obj(cachep, objp, POISON_INUSE);
2886 } 2890 }
2887 if (cachep->flags & SLAB_STORE_USER) 2891 if (cachep->flags & SLAB_STORE_USER)