summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/tlb_nohash.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash.c')
-rw-r--r--arch/powerpc/mm/tlb_nohash.c68
1 files changed, 63 insertions, 5 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index d32ec643c231..4e13d6f9023e 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -28,6 +28,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
@@ -36,14 +37,49 @@
#include <linux/spinlock.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
+#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/code-patching.h>
+#include <asm/hugetlb.h>
#include "mmu_decl.h"
-#ifdef CONFIG_PPC_BOOK3E
+/*
+ * This struct lists the sw-supported page sizes. The hardawre MMU may support
+ * other sizes not listed here. The .ind field is only used on MMUs that have
+ * indirect page table entries.
+ */
+#ifdef CONFIG_PPC_BOOK3E_MMU
+#ifdef CONFIG_FSL_BOOKE
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
+ [MMU_PAGE_4K] = {
+ .shift = 12,
+ .enc = BOOK3E_PAGESZ_4K,
+ },
+ [MMU_PAGE_4M] = {
+ .shift = 22,
+ .enc = BOOK3E_PAGESZ_4M,
+ },
+ [MMU_PAGE_16M] = {
+ .shift = 24,
+ .enc = BOOK3E_PAGESZ_16M,
+ },
+ [MMU_PAGE_64M] = {
+ .shift = 26,
+ .enc = BOOK3E_PAGESZ_64M,
+ },
+ [MMU_PAGE_256M] = {
+ .shift = 28,
+ .enc = BOOK3E_PAGESZ_256M,
+ },
+ [MMU_PAGE_1G] = {
+ .shift = 30,
+ .enc = BOOK3E_PAGESZ_1GB,
+ },
+};
+#else
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
.shift = 12,
@@ -77,6 +113,8 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.enc = BOOK3E_PAGESZ_1GB,
},
};
+#endif /* CONFIG_FSL_BOOKE */
+
static inline int mmu_get_tsize(int psize)
{
return mmu_psize_defs[psize].enc;
@@ -87,7 +125,7 @@ static inline int mmu_get_tsize(int psize)
/* This isn't used on !Book3E for now */
return 0;
}
-#endif
+#endif /* CONFIG_PPC_BOOK3E_MMU */
/* The variables below are currently only used on 64-bit Book3E
* though this will probably be made common with other nohash
@@ -266,6 +304,11 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
+#ifdef CONFIG_HUGETLB_PAGE
+ if (is_vm_hugetlb_page(vma))
+ flush_hugetlb_page(vma, vmaddr);
+#endif
+
__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
mmu_get_tsize(mmu_virtual_psize), 0);
}
@@ -600,13 +643,28 @@ void __cpuinit early_init_mmu_secondary(void)
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- /* On Embedded 64-bit, we adjust the RMA size to match
+ /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
* the bolted TLB entry. We know for now that only 1G
* entries are supported though that may eventually
- * change. We crop it to the size of the first MEMBLOCK to
+ * change.
+ *
+ * on FSL Embedded 64-bit, we adjust the RMA size to match the
+ * first bolted TLB entry size. We still limit max to 1G even if
+ * the TLB could cover more. This is due to what the early init
+ * code is setup to do.
+ *
+ * We crop it to the size of the first MEMBLOCK to
* avoid going over total available memory just in case...
*/
- ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ unsigned long linear_sz;
+ linear_sz = calc_cam_sz(first_memblock_size, PAGE_OFFSET,
+ first_memblock_base);
+ ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
+ } else
+#endif
+ ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
/* Finally limit subsequent allocations */
memblock_set_current_limit(first_memblock_base + ppc64_rma_size);