aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile1
-rw-r--r--mm/ashmem.c748
-rw-r--r--mm/page_alloc.c25
-rw-r--r--mm/shmem.c15
4 files changed, 783 insertions, 6 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 836e4163c1b..2d00bf57ca4 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_HUGETLBFS) += hugetlb.o
30obj-$(CONFIG_NUMA) += mempolicy.o 30obj-$(CONFIG_NUMA) += mempolicy.o
31obj-$(CONFIG_SPARSEMEM) += sparse.o 31obj-$(CONFIG_SPARSEMEM) += sparse.o
32obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o 32obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
33obj-$(CONFIG_ASHMEM) += ashmem.o
33obj-$(CONFIG_SLOB) += slob.o 34obj-$(CONFIG_SLOB) += slob.o
34obj-$(CONFIG_COMPACTION) += compaction.o 35obj-$(CONFIG_COMPACTION) += compaction.o
35obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o 36obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
diff --git a/mm/ashmem.c b/mm/ashmem.c
new file mode 100644
index 00000000000..66e3f23ee33
--- /dev/null
+++ b/mm/ashmem.c
@@ -0,0 +1,748 @@
1/* mm/ashmem.c
2**
3** Anonymous Shared Memory Subsystem, ashmem
4**
5** Copyright (C) 2008 Google, Inc.
6**
7** Robert Love <rlove@google.com>
8**
9** This software is licensed under the terms of the GNU General Public
10** License version 2, as published by the Free Software Foundation, and
11** may be copied, distributed, and modified under those terms.
12**
13** This program is distributed in the hope that it will be useful,
14** but WITHOUT ANY WARRANTY; without even the implied warranty of
15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16** GNU General Public License for more details.
17*/
18
19#include <linux/module.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/miscdevice.h>
23#include <linux/security.h>
24#include <linux/mm.h>
25#include <linux/mman.h>
26#include <linux/uaccess.h>
27#include <linux/personality.h>
28#include <linux/bitops.h>
29#include <linux/mutex.h>
30#include <linux/shmem_fs.h>
31#include <linux/ashmem.h>
32
33#define ASHMEM_NAME_PREFIX "dev/ashmem/"
34#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
35#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
36
37/*
38 * ashmem_area - anonymous shared memory area
39 * Lifecycle: From our parent file's open() until its release()
40 * Locking: Protected by `ashmem_mutex'
41 * Big Note: Mappings do NOT pin this structure; it dies on close()
42 */
43struct ashmem_area {
44 char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */
45 struct list_head unpinned_list; /* list of all ashmem areas */
46 struct file *file; /* the shmem-based backing file */
47 size_t size; /* size of the mapping, in bytes */
48 unsigned long prot_mask; /* allowed prot bits, as vm_flags */
49};
50
51/*
52 * ashmem_range - represents an interval of unpinned (evictable) pages
53 * Lifecycle: From unpin to pin
54 * Locking: Protected by `ashmem_mutex'
55 */
56struct ashmem_range {
57 struct list_head lru; /* entry in LRU list */
58 struct list_head unpinned; /* entry in its area's unpinned list */
59 struct ashmem_area *asma; /* associated area */
60 size_t pgstart; /* starting page, inclusive */
61 size_t pgend; /* ending page, inclusive */
62 unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
63};
64
65/* LRU list of unpinned pages, protected by ashmem_mutex */
66static LIST_HEAD(ashmem_lru_list);
67
68/* Count of pages on our LRU list, protected by ashmem_mutex */
69static unsigned long lru_count;
70
71/*
72 * ashmem_mutex - protects the list of and each individual ashmem_area
73 *
74 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
75 */
76static DEFINE_MUTEX(ashmem_mutex);
77
78static struct kmem_cache *ashmem_area_cachep __read_mostly;
79static struct kmem_cache *ashmem_range_cachep __read_mostly;
80
81#define range_size(range) \
82 ((range)->pgend - (range)->pgstart + 1)
83
84#define range_on_lru(range) \
85 ((range)->purged == ASHMEM_NOT_PURGED)
86
87#define page_range_subsumes_range(range, start, end) \
88 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
89
90#define page_range_subsumed_by_range(range, start, end) \
91 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
92
93#define page_in_range(range, page) \
94 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
95
96#define page_range_in_range(range, start, end) \
97 (page_in_range(range, start) || page_in_range(range, end) || \
98 page_range_subsumes_range(range, start, end))
99
100#define range_before_page(range, page) \
101 ((range)->pgend < (page))
102
103#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
104
105static inline void lru_add(struct ashmem_range *range)
106{
107 list_add_tail(&range->lru, &ashmem_lru_list);
108 lru_count += range_size(range);
109}
110
111static inline void lru_del(struct ashmem_range *range)
112{
113 list_del(&range->lru);
114 lru_count -= range_size(range);
115}
116
117/*
118 * range_alloc - allocate and initialize a new ashmem_range structure
119 *
120 * 'asma' - associated ashmem_area
121 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
122 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
123 * 'start' - starting page, inclusive
124 * 'end' - ending page, inclusive
125 *
126 * Caller must hold ashmem_mutex.
127 */
128static int range_alloc(struct ashmem_area *asma,
129 struct ashmem_range *prev_range, unsigned int purged,
130 size_t start, size_t end)
131{
132 struct ashmem_range *range;
133
134 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
135 if (unlikely(!range))
136 return -ENOMEM;
137
138 range->asma = asma;
139 range->pgstart = start;
140 range->pgend = end;
141 range->purged = purged;
142
143 list_add_tail(&range->unpinned, &prev_range->unpinned);
144
145 if (range_on_lru(range))
146 lru_add(range);
147
148 return 0;
149}
150
151static void range_del(struct ashmem_range *range)
152{
153 list_del(&range->unpinned);
154 if (range_on_lru(range))
155 lru_del(range);
156 kmem_cache_free(ashmem_range_cachep, range);
157}
158
159/*
160 * range_shrink - shrinks a range
161 *
162 * Caller must hold ashmem_mutex.
163 */
164static inline void range_shrink(struct ashmem_range *range,
165 size_t start, size_t end)
166{
167 size_t pre = range_size(range);
168
169 range->pgstart = start;
170 range->pgend = end;
171
172 if (range_on_lru(range))
173 lru_count -= pre - range_size(range);
174}
175
176static int ashmem_open(struct inode *inode, struct file *file)
177{
178 struct ashmem_area *asma;
179 int ret;
180
181 ret = generic_file_open(inode, file);
182 if (unlikely(ret))
183 return ret;
184
185 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
186 if (unlikely(!asma))
187 return -ENOMEM;
188
189 INIT_LIST_HEAD(&asma->unpinned_list);
190 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
191 asma->prot_mask = PROT_MASK;
192 file->private_data = asma;
193
194 return 0;
195}
196
197static int ashmem_release(struct inode *ignored, struct file *file)
198{
199 struct ashmem_area *asma = file->private_data;
200 struct ashmem_range *range, *next;
201
202 mutex_lock(&ashmem_mutex);
203 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
204 range_del(range);
205 mutex_unlock(&ashmem_mutex);
206
207 if (asma->file)
208 fput(asma->file);
209 kmem_cache_free(ashmem_area_cachep, asma);
210
211 return 0;
212}
213
214static ssize_t ashmem_read(struct file *file, char __user *buf,
215 size_t len, loff_t *pos)
216{
217 struct ashmem_area *asma = file->private_data;
218 int ret = 0;
219
220 mutex_lock(&ashmem_mutex);
221
222 /* If size is not set, or set to 0, always return EOF. */
223 if (asma->size == 0) {
224 goto out;
225 }
226
227 if (!asma->file) {
228 ret = -EBADF;
229 goto out;
230 }
231
232 ret = asma->file->f_op->read(asma->file, buf, len, pos);
233 if (ret < 0) {
234 goto out;
235 }
236
237 /** Update backing file pos, since f_ops->read() doesn't */
238 asma->file->f_pos = *pos;
239
240out:
241 mutex_unlock(&ashmem_mutex);
242 return ret;
243}
244
245static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
246{
247 struct ashmem_area *asma = file->private_data;
248 int ret;
249
250 mutex_lock(&ashmem_mutex);
251
252 if (asma->size == 0) {
253 ret = -EINVAL;
254 goto out;
255 }
256
257 if (!asma->file) {
258 ret = -EBADF;
259 goto out;
260 }
261
262 ret = asma->file->f_op->llseek(asma->file, offset, origin);
263 if (ret < 0) {
264 goto out;
265 }
266
267 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
268 file->f_pos = asma->file->f_pos;
269
270out:
271 mutex_unlock(&ashmem_mutex);
272 return ret;
273}
274
275static inline unsigned long
276calc_vm_may_flags(unsigned long prot)
277{
278 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) |
279 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
280 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
281}
282
283static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
284{
285 struct ashmem_area *asma = file->private_data;
286 int ret = 0;
287
288 mutex_lock(&ashmem_mutex);
289
290 /* user needs to SET_SIZE before mapping */
291 if (unlikely(!asma->size)) {
292 ret = -EINVAL;
293 goto out;
294 }
295
296 /* requested protection bits must match our allowed protection mask */
297 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
298 calc_vm_prot_bits(PROT_MASK))) {
299 ret = -EPERM;
300 goto out;
301 }
302 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
303
304 if (!asma->file) {
305 char *name = ASHMEM_NAME_DEF;
306 struct file *vmfile;
307
308 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
309 name = asma->name;
310
311 /* ... and allocate the backing shmem file */
312 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
313 if (unlikely(IS_ERR(vmfile))) {
314 ret = PTR_ERR(vmfile);
315 goto out;
316 }
317 asma->file = vmfile;
318 }
319 get_file(asma->file);
320
321 if (vma->vm_flags & VM_SHARED)
322 shmem_set_file(vma, asma->file);
323 else {
324 if (vma->vm_file)
325 fput(vma->vm_file);
326 vma->vm_file = asma->file;
327 }
328 vma->vm_flags |= VM_CAN_NONLINEAR;
329
330out:
331 mutex_unlock(&ashmem_mutex);
332 return ret;
333}
334
335/*
336 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
337 *
338 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
339 * many objects (pages) we have in total.
340 *
341 * 'gfp_mask' is the mask of the allocation that got us into this mess.
342 *
343 * Return value is the number of objects (pages) remaining, or -1 if we cannot
344 * proceed without risk of deadlock (due to gfp_mask).
345 *
346 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
347 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
348 * pages freed.
349 */
350static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
351{
352 struct ashmem_range *range, *next;
353
354 /* We might recurse into filesystem code, so bail out if necessary */
355 if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
356 return -1;
357 if (!sc->nr_to_scan)
358 return lru_count;
359
360 mutex_lock(&ashmem_mutex);
361 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
362 struct inode *inode = range->asma->file->f_dentry->d_inode;
363 loff_t start = range->pgstart * PAGE_SIZE;
364 loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
365
366 vmtruncate_range(inode, start, end);
367 range->purged = ASHMEM_WAS_PURGED;
368 lru_del(range);
369
370 sc->nr_to_scan -= range_size(range);
371 if (sc->nr_to_scan <= 0)
372 break;
373 }
374 mutex_unlock(&ashmem_mutex);
375
376 return lru_count;
377}
378
379static struct shrinker ashmem_shrinker = {
380 .shrink = ashmem_shrink,
381 .seeks = DEFAULT_SEEKS * 4,
382};
383
384static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
385{
386 int ret = 0;
387
388 mutex_lock(&ashmem_mutex);
389
390 /* the user can only remove, not add, protection bits */
391 if (unlikely((asma->prot_mask & prot) != prot)) {
392 ret = -EINVAL;
393 goto out;
394 }
395
396 /* does the application expect PROT_READ to imply PROT_EXEC? */
397 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
398 prot |= PROT_EXEC;
399
400 asma->prot_mask = prot;
401
402out:
403 mutex_unlock(&ashmem_mutex);
404 return ret;
405}
406
407static int set_name(struct ashmem_area *asma, void __user *name)
408{
409 int ret = 0;
410
411 mutex_lock(&ashmem_mutex);
412
413 /* cannot change an existing mapping's name */
414 if (unlikely(asma->file)) {
415 ret = -EINVAL;
416 goto out;
417 }
418
419 if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
420 name, ASHMEM_NAME_LEN)))
421 ret = -EFAULT;
422 asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
423
424out:
425 mutex_unlock(&ashmem_mutex);
426
427 return ret;
428}
429
430static int get_name(struct ashmem_area *asma, void __user *name)
431{
432 int ret = 0;
433
434 mutex_lock(&ashmem_mutex);
435 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
436 size_t len;
437
438 /*
439 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
440 * prevents us from revealing one user's stack to another.
441 */
442 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
443 if (unlikely(copy_to_user(name,
444 asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
445 ret = -EFAULT;
446 } else {
447 if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
448 sizeof(ASHMEM_NAME_DEF))))
449 ret = -EFAULT;
450 }
451 mutex_unlock(&ashmem_mutex);
452
453 return ret;
454}
455
456/*
457 * ashmem_pin - pin the given ashmem region, returning whether it was
458 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
459 *
460 * Caller must hold ashmem_mutex.
461 */
462static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
463{
464 struct ashmem_range *range, *next;
465 int ret = ASHMEM_NOT_PURGED;
466
467 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
468 /* moved past last applicable page; we can short circuit */
469 if (range_before_page(range, pgstart))
470 break;
471
472 /*
473 * The user can ask us to pin pages that span multiple ranges,
474 * or to pin pages that aren't even unpinned, so this is messy.
475 *
476 * Four cases:
477 * 1. The requested range subsumes an existing range, so we
478 * just remove the entire matching range.
479 * 2. The requested range overlaps the start of an existing
480 * range, so we just update that range.
481 * 3. The requested range overlaps the end of an existing
482 * range, so we just update that range.
483 * 4. The requested range punches a hole in an existing range,
484 * so we have to update one side of the range and then
485 * create a new range for the other side.
486 */
487 if (page_range_in_range(range, pgstart, pgend)) {
488 ret |= range->purged;
489
490 /* Case #1: Easy. Just nuke the whole thing. */
491 if (page_range_subsumes_range(range, pgstart, pgend)) {
492 range_del(range);
493 continue;
494 }
495
496 /* Case #2: We overlap from the start, so adjust it */
497 if (range->pgstart >= pgstart) {
498 range_shrink(range, pgend + 1, range->pgend);
499 continue;
500 }
501
502 /* Case #3: We overlap from the rear, so adjust it */
503 if (range->pgend <= pgend) {
504 range_shrink(range, range->pgstart, pgstart-1);
505 continue;
506 }
507
508 /*
509 * Case #4: We eat a chunk out of the middle. A bit
510 * more complicated, we allocate a new range for the
511 * second half and adjust the first chunk's endpoint.
512 */
513 range_alloc(asma, range, range->purged,
514 pgend + 1, range->pgend);
515 range_shrink(range, range->pgstart, pgstart - 1);
516 break;
517 }
518 }
519
520 return ret;
521}
522
523/*
524 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
525 *
526 * Caller must hold ashmem_mutex.
527 */
528static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
529{
530 struct ashmem_range *range, *next;
531 unsigned int purged = ASHMEM_NOT_PURGED;
532
533restart:
534 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
535 /* short circuit: this is our insertion point */
536 if (range_before_page(range, pgstart))
537 break;
538
539 /*
540 * The user can ask us to unpin pages that are already entirely
541 * or partially pinned. We handle those two cases here.
542 */
543 if (page_range_subsumed_by_range(range, pgstart, pgend))
544 return 0;
545 if (page_range_in_range(range, pgstart, pgend)) {
546 pgstart = min_t(size_t, range->pgstart, pgstart),
547 pgend = max_t(size_t, range->pgend, pgend);
548 purged |= range->purged;
549 range_del(range);
550 goto restart;
551 }
552 }
553
554 return range_alloc(asma, range, purged, pgstart, pgend);
555}
556
557/*
558 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
559 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
560 *
561 * Caller must hold ashmem_mutex.
562 */
563static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
564 size_t pgend)
565{
566 struct ashmem_range *range;
567 int ret = ASHMEM_IS_PINNED;
568
569 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
570 if (range_before_page(range, pgstart))
571 break;
572 if (page_range_in_range(range, pgstart, pgend)) {
573 ret = ASHMEM_IS_UNPINNED;
574 break;
575 }
576 }
577
578 return ret;
579}
580
581static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
582 void __user *p)
583{
584 struct ashmem_pin pin;
585 size_t pgstart, pgend;
586 int ret = -EINVAL;
587
588 if (unlikely(!asma->file))
589 return -EINVAL;
590
591 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
592 return -EFAULT;
593
594 /* per custom, you can pass zero for len to mean "everything onward" */
595 if (!pin.len)
596 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
597
598 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
599 return -EINVAL;
600
601 if (unlikely(((__u32) -1) - pin.offset < pin.len))
602 return -EINVAL;
603
604 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
605 return -EINVAL;
606
607 pgstart = pin.offset / PAGE_SIZE;
608 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
609
610 mutex_lock(&ashmem_mutex);
611
612 switch (cmd) {
613 case ASHMEM_PIN:
614 ret = ashmem_pin(asma, pgstart, pgend);
615 break;
616 case ASHMEM_UNPIN:
617 ret = ashmem_unpin(asma, pgstart, pgend);
618 break;
619 case ASHMEM_GET_PIN_STATUS:
620 ret = ashmem_get_pin_status(asma, pgstart, pgend);
621 break;
622 }
623
624 mutex_unlock(&ashmem_mutex);
625
626 return ret;
627}
628
629static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
630{
631 struct ashmem_area *asma = file->private_data;
632 long ret = -ENOTTY;
633
634 switch (cmd) {
635 case ASHMEM_SET_NAME:
636 ret = set_name(asma, (void __user *) arg);
637 break;
638 case ASHMEM_GET_NAME:
639 ret = get_name(asma, (void __user *) arg);
640 break;
641 case ASHMEM_SET_SIZE:
642 ret = -EINVAL;
643 if (!asma->file) {
644 ret = 0;
645 asma->size = (size_t) arg;
646 }
647 break;
648 case ASHMEM_GET_SIZE:
649 ret = asma->size;
650 break;
651 case ASHMEM_SET_PROT_MASK:
652 ret = set_prot_mask(asma, arg);
653 break;
654 case ASHMEM_GET_PROT_MASK:
655 ret = asma->prot_mask;
656 break;
657 case ASHMEM_PIN:
658 case ASHMEM_UNPIN:
659 case ASHMEM_GET_PIN_STATUS:
660 ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
661 break;
662 case ASHMEM_PURGE_ALL_CACHES:
663 ret = -EPERM;
664 if (capable(CAP_SYS_ADMIN)) {
665 struct shrink_control sc = {
666 .gfp_mask = GFP_KERNEL,
667 .nr_to_scan = 0,
668 };
669 ret = ashmem_shrink(&ashmem_shrinker, &sc);
670 sc.nr_to_scan = ret;
671 ashmem_shrink(&ashmem_shrinker, &sc);
672 }
673 break;
674 }
675
676 return ret;
677}
678
679static struct file_operations ashmem_fops = {
680 .owner = THIS_MODULE,
681 .open = ashmem_open,
682 .release = ashmem_release,
683 .read = ashmem_read,
684 .llseek = ashmem_llseek,
685 .mmap = ashmem_mmap,
686 .unlocked_ioctl = ashmem_ioctl,
687 .compat_ioctl = ashmem_ioctl,
688};
689
690static struct miscdevice ashmem_misc = {
691 .minor = MISC_DYNAMIC_MINOR,
692 .name = "ashmem",
693 .fops = &ashmem_fops,
694};
695
696static int __init ashmem_init(void)
697{
698 int ret;
699
700 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
701 sizeof(struct ashmem_area),
702 0, 0, NULL);
703 if (unlikely(!ashmem_area_cachep)) {
704 printk(KERN_ERR "ashmem: failed to create slab cache\n");
705 return -ENOMEM;
706 }
707
708 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
709 sizeof(struct ashmem_range),
710 0, 0, NULL);
711 if (unlikely(!ashmem_range_cachep)) {
712 printk(KERN_ERR "ashmem: failed to create slab cache\n");
713 return -ENOMEM;
714 }
715
716 ret = misc_register(&ashmem_misc);
717 if (unlikely(ret)) {
718 printk(KERN_ERR "ashmem: failed to register misc device!\n");
719 return ret;
720 }
721
722 register_shrinker(&ashmem_shrinker);
723
724 printk(KERN_INFO "ashmem: initialized\n");
725
726 return 0;
727}
728
729static void __exit ashmem_exit(void)
730{
731 int ret;
732
733 unregister_shrinker(&ashmem_shrinker);
734
735 ret = misc_deregister(&ashmem_misc);
736 if (unlikely(ret))
737 printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
738
739 kmem_cache_destroy(ashmem_range_cachep);
740 kmem_cache_destroy(ashmem_area_cachep);
741
742 printk(KERN_INFO "ashmem: unloaded\n");
743}
744
745module_init(ashmem_init);
746module_exit(ashmem_exit);
747
748MODULE_LICENSE("GPL");
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1b94f0868c2..aed2f5598d3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -127,6 +127,20 @@ void pm_restrict_gfp_mask(void)
127 saved_gfp_mask = gfp_allowed_mask; 127 saved_gfp_mask = gfp_allowed_mask;
128 gfp_allowed_mask &= ~GFP_IOFS; 128 gfp_allowed_mask &= ~GFP_IOFS;
129} 129}
130
131static bool pm_suspending(void)
132{
133 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
134 return false;
135 return true;
136}
137
138#else
139
140static bool pm_suspending(void)
141{
142 return false;
143}
130#endif /* CONFIG_PM_SLEEP */ 144#endif /* CONFIG_PM_SLEEP */
131 145
132#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 146#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -176,6 +190,7 @@ static char * const zone_names[MAX_NR_ZONES] = {
176}; 190};
177 191
178int min_free_kbytes = 1024; 192int min_free_kbytes = 1024;
193int min_free_order_shift = 1;
179 194
180static unsigned long __meminitdata nr_kernel_pages; 195static unsigned long __meminitdata nr_kernel_pages;
181static unsigned long __meminitdata nr_all_pages; 196static unsigned long __meminitdata nr_all_pages;
@@ -1487,7 +1502,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1487 free_pages -= z->free_area[o].nr_free << o; 1502 free_pages -= z->free_area[o].nr_free << o;
1488 1503
1489 /* Require fewer higher order pages to be free */ 1504 /* Require fewer higher order pages to be free */
1490 min >>= 1; 1505 min >>= min_free_order_shift;
1491 1506
1492 if (free_pages <= min) 1507 if (free_pages <= min)
1493 return false; 1508 return false;
@@ -2249,6 +2264,14 @@ rebalance:
2249 2264
2250 goto restart; 2265 goto restart;
2251 } 2266 }
2267
2268 /*
2269 * Suspend converts GFP_KERNEL to __GFP_WAIT which can
2270 * prevent reclaim making forward progress without
2271 * invoking OOM. Bail if we are suspending
2272 */
2273 if (pm_suspending())
2274 goto nopage;
2252 } 2275 }
2253 2276
2254 /* Check if we should retry the allocation */ 2277 /* Check if we should retry the allocation */
diff --git a/mm/shmem.c b/mm/shmem.c
index 8b384776214..bcfa97dcc0a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3027,6 +3027,15 @@ put_memory:
3027} 3027}
3028EXPORT_SYMBOL_GPL(shmem_file_setup); 3028EXPORT_SYMBOL_GPL(shmem_file_setup);
3029 3029
3030void shmem_set_file(struct vm_area_struct *vma, struct file *file)
3031{
3032 if (vma->vm_file)
3033 fput(vma->vm_file);
3034 vma->vm_file = file;
3035 vma->vm_ops = &shmem_vm_ops;
3036 vma->vm_flags |= VM_CAN_NONLINEAR;
3037}
3038
3030/** 3039/**
3031 * shmem_zero_setup - setup a shared anonymous mapping 3040 * shmem_zero_setup - setup a shared anonymous mapping
3032 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 3041 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -3040,11 +3049,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
3040 if (IS_ERR(file)) 3049 if (IS_ERR(file))
3041 return PTR_ERR(file); 3050 return PTR_ERR(file);
3042 3051
3043 if (vma->vm_file) 3052 shmem_set_file(vma, file);
3044 fput(vma->vm_file);
3045 vma->vm_file = file;
3046 vma->vm_ops = &shmem_vm_ops;
3047 vma->vm_flags |= VM_CAN_NONLINEAR;
3048 return 0; 3053 return 0;
3049} 3054}
3050 3055