pgtable-generic.c 5.13 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 *  mm/pgtable-generic.c
 *
 *  Generic pgtable methods declared in asm-generic/pgtable.h
 *
 *  Copyright (C) 2010  Linus Torvalds
 */

9
#include <linux/pagemap.h>
10 11 12
#include <asm/tlb.h>
#include <asm-generic/pgtable.h>

13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * If a p?d_bad entry is found while walking page tables, report
 * the error, before resetting entry to p?d_none.  Usually (but
 * very seldom) called out from the p?d_none_or_clear_bad macros.
 */

void pgd_clear_bad(pgd_t *pgd)
{
	pgd_ERROR(*pgd);
	pgd_clear(pgd);
}

void pud_clear_bad(pud_t *pud)
{
	pud_ERROR(*pud);
	pud_clear(pud);
}

void pmd_clear_bad(pmd_t *pmd)
{
	pmd_ERROR(*pmd);
	pmd_clear(pmd);
}

37 38
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
39 40
 * Only sets the access flags (dirty, accessed), as well as write 
 * permission. Furthermore, we know it always gets set to a "more
41 42 43 44 45 46 47 48 49 50 51 52 53
 * permissive" setting, which allows most architectures to optimize
 * this. We return whether the PTE actually changed, which in turn
 * instructs the caller to do things like update__mmu_cache.  This
 * used to be done in the caller, but sparc needs minor faults to
 * force that call on sun4c so we changed this macro slightly
 */
int ptep_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pte_t *ptep,
			  pte_t entry, int dirty)
{
	int changed = !pte_same(*ptep, entry);
	if (changed) {
		set_pte_at(vma->vm_mm, address, ptep, entry);
54
		flush_tlb_fix_spurious_fault(vma, address);
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
	}
	return changed;
}
#endif

#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
int pmdp_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pmd_t *pmdp,
			  pmd_t entry, int dirty)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	int changed = !pmd_same(*pmdp, entry);
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	if (changed) {
		set_pmd_at(vma->vm_mm, address, pmdp, entry);
		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	}
	return changed;
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
	BUG();
	return 0;
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
#endif

#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
int ptep_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pte_t *ptep)
{
	int young;
	young = ptep_test_and_clear_young(vma, address, ptep);
	if (young)
		flush_tlb_page(vma, address);
	return young;
}
#endif

#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
int pmdp_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pmd_t *pmdp)
{
	int young;
97 98 99
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#else
100 101 102 103 104 105 106 107 108 109 110 111 112
	BUG();
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
	young = pmdp_test_and_clear_young(vma, address, pmdp);
	if (young)
		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	return young;
}
#endif

#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
		       pte_t *ptep)
{
113
	struct mm_struct *mm = (vma)->vm_mm;
114
	pte_t pte;
115 116
	pte = ptep_get_and_clear(mm, address, ptep);
	if (pte_accessible(mm, pte))
117
		flush_tlb_page(vma, address);
118 119 120 121 122
	return pte;
}
#endif

#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
123
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
124 125 126 127 128 129 130 131 132
pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
		       pmd_t *pmdp)
{
	pmd_t pmd;
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	return pmd;
}
133
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
134 135 136
#endif

#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
137
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
138 139
void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
			  pmd_t *pmdp)
140 141 142 143 144 145 146
{
	pmd_t pmd = pmd_mksplitting(*pmdp);
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
	/* tlb flush only to serialize against gup-fast */
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
147
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
148
#endif
149 150 151

#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
152 153
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
				pgtable_t pgtable)
154
{
155
	assert_spin_locked(pmd_lockptr(mm, pmdp));
156 157

	/* FIFO */
158
	if (!pmd_huge_pte(mm, pmdp))
159 160
		INIT_LIST_HEAD(&pgtable->lru);
	else
161 162
		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
	pmd_huge_pte(mm, pmdp) = pgtable;
163 164 165 166 167 168 169
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif

#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* no "address" argument so destroys page coloring of some arch */
170
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
171 172 173
{
	pgtable_t pgtable;

174
	assert_spin_locked(pmd_lockptr(mm, pmdp));
175 176

	/* FIFO */
177
	pgtable = pmd_huge_pte(mm, pmdp);
178
	if (list_empty(&pgtable->lru))
179
		pmd_huge_pte(mm, pmdp) = NULL;
180
	else {
181
		pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
182 183 184 185 186 187 188
					      struct page, lru);
		list_del(&pgtable->lru);
	}
	return pgtable;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
189 190 191 192 193 194

#ifndef __HAVE_ARCH_PMDP_INVALIDATE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
		     pmd_t *pmdp)
{
195
	pmd_t entry = *pmdp;
196
	set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
197 198 199 200
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif