aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller2013-02-13 14:21:06 -0600
committerDavid S. Miller2013-02-13 14:22:14 -0600
commit89a77915e0f56dc7b9f9082ba787895b6a83f809 (patch)
treece147ee36f1117dddeac5e1ba53eab9986b1f47b /arch
parentb9156ebb7beef015745f917f373abc137efc3400 (diff)
downloadkernel-common-89a77915e0f56dc7b9f9082ba787895b6a83f809.tar.gz
kernel-common-89a77915e0f56dc7b9f9082ba787895b6a83f809.tar.xz
kernel-common-89a77915e0f56dc7b9f9082ba787895b6a83f809.zip
sparc64: Fix get_user_pages_fast() wrt. THP.
Mostly mirrors the s390 logic, as unlike x86 we don't need the SetPageReferenced() bits. On sparc64 we also lack a user/privileged bit in the huge PMDs. In order to make this work for THP and non-THP builds, some header file adjustments were necessary. Namely, provide the PMD_HUGE_* bit defines and the pmd_large() inline unconditionally rather than protected by TRANSPARENT_HUGEPAGE. Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/include/asm/pgtable_64.h14
-rw-r--r--arch/sparc/mm/gup.c59
2 files changed, 63 insertions, 10 deletions
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 7870be0f5ad..08fcce90316 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -71,7 +71,6 @@
71#define PMD_PADDR _AC(0xfffffffe,UL) 71#define PMD_PADDR _AC(0xfffffffe,UL)
72#define PMD_PADDR_SHIFT _AC(11,UL) 72#define PMD_PADDR_SHIFT _AC(11,UL)
73 73
74#ifdef CONFIG_TRANSPARENT_HUGEPAGE
75#define PMD_ISHUGE _AC(0x00000001,UL) 74#define PMD_ISHUGE _AC(0x00000001,UL)
76 75
77/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge 76/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge
@@ -86,7 +85,6 @@
86#define PMD_HUGE_ACCESSED _AC(0x00000080,UL) 85#define PMD_HUGE_ACCESSED _AC(0x00000080,UL)
87#define PMD_HUGE_EXEC _AC(0x00000040,UL) 86#define PMD_HUGE_EXEC _AC(0x00000040,UL)
88#define PMD_HUGE_SPLITTING _AC(0x00000020,UL) 87#define PMD_HUGE_SPLITTING _AC(0x00000020,UL)
89#endif
90 88
91/* PGDs point to PMD tables which are 8K aligned. */ 89/* PGDs point to PMD tables which are 8K aligned. */
92#define PGD_PADDR _AC(0xfffffffc,UL) 90#define PGD_PADDR _AC(0xfffffffc,UL)
@@ -628,6 +626,12 @@ static inline unsigned long pte_special(pte_t pte)
628 return pte_val(pte) & _PAGE_SPECIAL; 626 return pte_val(pte) & _PAGE_SPECIAL;
629} 627}
630 628
629static inline int pmd_large(pmd_t pmd)
630{
631 return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
632 (PMD_ISHUGE | PMD_HUGE_PRESENT);
633}
634
631#ifdef CONFIG_TRANSPARENT_HUGEPAGE 635#ifdef CONFIG_TRANSPARENT_HUGEPAGE
632static inline int pmd_young(pmd_t pmd) 636static inline int pmd_young(pmd_t pmd)
633{ 637{
@@ -646,12 +650,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
646 return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); 650 return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);
647} 651}
648 652
649static inline int pmd_large(pmd_t pmd)
650{
651 return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
652 (PMD_ISHUGE | PMD_HUGE_PRESENT);
653}
654
655static inline int pmd_trans_splitting(pmd_t pmd) 653static inline int pmd_trans_splitting(pmd_t pmd)
656{ 654{
657 return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == 655 return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) ==
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 42c55df3aec..01ee23dd724 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
66 return 1; 66 return 1;
67} 67}
68 68
69static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
70 unsigned long end, int write, struct page **pages,
71 int *nr)
72{
73 struct page *head, *page, *tail;
74 u32 mask;
75 int refs;
76
77 mask = PMD_HUGE_PRESENT;
78 if (write)
79 mask |= PMD_HUGE_WRITE;
80 if ((pmd_val(pmd) & mask) != mask)
81 return 0;
82
83 refs = 0;
84 head = pmd_page(pmd);
85 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
86 tail = page;
87 do {
88 VM_BUG_ON(compound_head(page) != head);
89 pages[*nr] = page;
90 (*nr)++;
91 page++;
92 refs++;
93 } while (addr += PAGE_SIZE, addr != end);
94
95 if (!page_cache_add_speculative(head, refs)) {
96 *nr -= refs;
97 return 0;
98 }
99
100 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
101 *nr -= refs;
102 while (refs--)
103 put_page(head);
104 return 0;
105 }
106
107 /* Any tail page need their mapcount reference taken before we
108 * return.
109 */
110 while (refs--) {
111 if (PageTail(tail))
112 get_huge_page_tail(tail);
113 tail++;
114 }
115
116 return 1;
117}
118
69static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 119static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
70 int write, struct page **pages, int *nr) 120 int write, struct page **pages, int *nr)
71{ 121{
@@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
77 pmd_t pmd = *pmdp; 127 pmd_t pmd = *pmdp;
78 128
79 next = pmd_addr_end(addr, end); 129 next = pmd_addr_end(addr, end);
80 if (pmd_none(pmd)) 130 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
81 return 0; 131 return 0;
82 if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 132 if (unlikely(pmd_large(pmd))) {
133 if (!gup_huge_pmd(pmdp, pmd, addr, next,
134 write, pages, nr))
135 return 0;
136 } else if (!gup_pte_range(pmd, addr, next, write,
137 pages, nr))
83 return 0; 138 return 0;
84 } while (pmdp++, addr = next, addr != end); 139 } while (pmdp++, addr = next, addr != end);
85 140