summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c20
-rw-r--r--arch/powerpc/mm/hash_low_64.S24
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/numa.c10
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S6
6 files changed, 42 insertions, 22 deletions
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 757c0bed9a91..b42f76c4948d 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset,
#endif
}
EXPORT_SYMBOL(__dma_sync_page);
+
+/*
+ * Return the PFN for a given cpu virtual address returned by
+ * __dma_alloc_coherent. This is used by dma_mmap_coherent()
+ */
+unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
+{
+ /* This should always be populated, so we don't test every
+ * level. If that fails, we'll have a nice crash which
+ * will be as good as a BUG_ON()
+ */
+ pgd_t *pgd = pgd_offset_k(cpu_addr);
+ pud_t *pud = pud_offset(pgd, cpu_addr);
+ pmd_t *pmd = pmd_offset(pud, cpu_addr);
+ pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
+
+ if (pte_none(*ptep) || !pte_present(*ptep))
+ return 0;
+ return pte_pfn(*ptep);
+}
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 3079f6b44cf5..5b7dd4ea02b5 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -192,8 +192,8 @@ htab_insert_pte:
rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -215,8 +215,8 @@ _GLOBAL(htab_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -495,8 +495,8 @@ htab_special_pfn:
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -522,8 +522,8 @@ _GLOBAL(htab_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -813,8 +813,8 @@ ht64_insert_pte:
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -836,8 +836,8 @@ _GLOBAL(ht64_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
- mr r4,r29 /* Retreive va */
+ ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARM(r9)(r1) /* segment size */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a5991facddce..58a022d0f463 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -753,7 +753,7 @@ void __cpuinit early_init_mmu_secondary(void)
mtspr(SPRN_SDR1, _SDR1);
/* Initialize STAB/SLB. We use a virtual address as it works
- * in real mode on pSeries and we want a virutal address on
+ * in real mode on pSeries and we want a virtual address on
* iSeries anyway
*/
if (cpu_has_feature(CPU_FTR_SLB))
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index a66499650909..57e545b84bf1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -424,7 +424,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
clear_page(page);
/*
- * We shouldnt have to do this, but some versions of glibc
+ * We shouldn't have to do this, but some versions of glibc
* require it (ld.so assumes zero filled pages are icache clean)
* - Anton
*/
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0dc95c0aa3be..5ec1dad2a19d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -440,11 +440,11 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
}
/*
- * Retreive and validate the ibm,dynamic-memory property of the device tree.
+ * Retrieve and validate the ibm,dynamic-memory property of the device tree.
*
* The layout of the ibm,dynamic-memory property is a number N of memblock
* list entries followed by N memblock list entries. Each memblock list entry
- * contains information as layed out in the of_drconf_cell struct above.
+ * contains information as laid out in the of_drconf_cell struct above.
*/
static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
{
@@ -468,7 +468,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
}
/*
- * Retreive and validate the ibm,lmb-size property for drconf memory
+ * Retrieve and validate the ibm,lmb-size property for drconf memory
* from the device tree.
*/
static u64 of_get_lmb_size(struct device_node *memory)
@@ -490,7 +490,7 @@ struct assoc_arrays {
};
/*
- * Retreive and validate the list of associativity arrays for drconf
+ * Retrieve and validate the list of associativity arrays for drconf
* memory from the ibm,associativity-lookup-arrays property of the
* device tree..
*
@@ -604,7 +604,7 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
* Returns the size the region should have to enforce the memory limit.
* This will either be the original value of size, a truncated value,
* or zero. If the returned value of size is 0 the region should be
- * discarded as it lies wholy above the memory limit.
+ * discarded as it lies wholly above the memory limit.
*/
static unsigned long __init numa_enforce_memory_limit(unsigned long start,
unsigned long size)
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 8526bd9d2aa3..af0892209417 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -192,7 +192,7 @@ normal_tlb_miss:
or r10,r15,r14
BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and seach for existing entry. Then load
+ /* Set the TLB reservation and search for existing entry. Then load
* the entry.
*/
PPC_TLBSRX_DOT(0,r16)
@@ -425,13 +425,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
virt_page_table_tlb_miss_fault:
/* If we fault here, things are a little bit tricky. We need to call
- * either data or instruction store fault, and we need to retreive
+ * either data or instruction store fault, and we need to retrieve
* the original fault address and ESR (for data).
*
* The thing is, we know that in normal circumstances, this is
* always called as a second level tlb miss for SW load or as a first
* level TLB miss for HW load, so we should be able to peek at the
- * relevant informations in the first exception frame in the PACA.
+ * relevant information in the first exception frame in the PACA.
*
* However, we do need to double check that, because we may just hit
* a stray kernel pointer or a userland attack trying to hit those