summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-15 18:28:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-15 18:28:46 -0700
commitf26a3988917913b3d11b2bd741601a2c64ab9204 (patch)
tree69b3da12c23ebe30a4ed0563648eb1bca5e83185 /arch/powerpc/mm
parentac0e9c30b1cb22c01f3edbb94857de2bae7611ca (diff)
parentfaa5b9daa8bd8a18b5b1f3a8dd79261503f7cdd3 (diff)
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: [POWERPC] macintosh: Replace deprecated __initcall with device_initcall [POWERPC] cell: Fix section mismatches in io-workarounds code [POWERPC] spufs: Fix compile error [POWERPC] Fix uninitialized variable bug in copy_{to|from}_user [POWERPC] Add null pointer check to of_find_property [POWERPC] vmemmap fixes to use smaller pages [POWERPC] spufs: Fix pointer reference in find_victim [POWERPC] 85xx: SBC8548 - Add flash support and HW Rev reporting [POWERPC] 85xx: Fix some sparse warnings for 85xx MDS [POWERPC] 83xx: Enable DMA engine on the MPC8377 MDS board. [POWERPC] 86xx: mpc8610_hpcd: fix second serial port [POWERPC] 86xx: mpc8610_hpcd: add support for NOR and NAND flashes [POWERPC] 85xx: Add 8568 PHY workarounds to board code [POWERPC] 86xx: mpc8610_hpcd: use ULI526X driver for on-board ethernet
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c28
-rw-r--r--arch/powerpc/mm/init_64.c10
-rw-r--r--arch/powerpc/mm/slb.c16
-rw-r--r--arch/powerpc/mm/slb_low.S16
4 files changed, 59 insertions, 11 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2b5a399f6fa6..0f2d239d94c4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -94,6 +94,9 @@ unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
@@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void)
}
#endif /* CONFIG_PPC_64K_PAGES */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* We try to use 16M pages for vmemmap if that is supported
+ * and we have at least 1G of RAM at boot
+ */
+ if (mmu_psize_defs[MMU_PAGE_16M].shift &&
+ lmb_phys_mem_size() >= 0x40000000)
+ mmu_vmemmap_psize = MMU_PAGE_16M;
+ else if (mmu_psize_defs[MMU_PAGE_64K].shift)
+ mmu_vmemmap_psize = MMU_PAGE_64K;
+ else
+ mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
printk(KERN_DEBUG "Page orders: linear mapping = %d, "
- "virtual = %d, io = %d\n",
+ "virtual = %d, io = %d"
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ ", vmemmap = %d"
+#endif
+ "\n",
mmu_psize_defs[mmu_linear_psize].shift,
mmu_psize_defs[mmu_virtual_psize].shift,
- mmu_psize_defs[mmu_io_psize].shift);
+ mmu_psize_defs[mmu_io_psize].shift
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ ,mmu_psize_defs[mmu_vmemmap_psize].shift
+#endif
+ );
#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index c5ac532a0161..6aa65375abf5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -19,6 +19,8 @@
*
*/
+#undef DEBUG
+
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -208,12 +210,12 @@ int __meminit vmemmap_populated(unsigned long start, int page_size)
}
int __meminit vmemmap_populate(struct page *start_page,
- unsigned long nr_pages, int node)
+ unsigned long nr_pages, int node)
{
unsigned long mode_rw;
unsigned long start = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + nr_pages);
- unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
+ unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
@@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page,
start, p, __pa(p));
mapped = htab_bolt_mapping(start, start + page_size,
- __pa(p), mode_rw, mmu_linear_psize,
+ __pa(p), mode_rw, mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(mapped < 0);
}
return 0;
}
-#endif
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index cf8705e32d60..89497fb04280 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -28,7 +28,7 @@
#include <asm/udbg.h>
#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) printk(fmt)
#else
#define DBG pr_debug
#endif
@@ -263,13 +263,19 @@ void slb_initialize(void)
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
extern unsigned int *slb_compare_rr_to_size;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ extern unsigned int *slb_miss_kernel_load_vmemmap;
+ unsigned long vmemmap_llp;
+#endif
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
io_llp = mmu_psize_defs[mmu_io_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
-
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
+#endif
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
@@ -281,6 +287,12 @@ void slb_initialize(void)
DBG("SLB: linear LLP = %04lx\n", linear_llp);
DBG("SLB: io LLP = %04lx\n", io_llp);
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ patch_slb_encoding(slb_miss_kernel_load_vmemmap,
+ SLB_VSID_KERNEL | vmemmap_llp);
+ DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
+#endif
}
get_paca()->stab_rr = SLB_NUM_BOLTED;
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 657f6b37e9df..bc44dc4b5c67 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -47,8 +47,7 @@ _GLOBAL(slb_allocate_realmode)
* it to VSID 0, which is reserved as a bad VSID - one which
* will never have any pages in it. */
- /* Check if hitting the linear mapping of the vmalloc/ioremap
- * kernel space
+ /* Check if hitting the linear mapping or some other kernel space
*/
bne cr7,1f
@@ -62,7 +61,18 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
b slb_finish_load_1T
-1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
+1:
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* Check virtual memmap region. To be patches at kernel boot */
+ cmpldi cr0,r9,0xf
+ bne 1f
+_GLOBAL(slb_miss_kernel_load_vmemmap)
+ li r11,0
+ b 6f
+1:
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+ /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
* will be patched by the kernel at boot
*/
BEGIN_FTR_SECTION