summaryrefslogtreecommitdiff
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c417
1 files changed, 288 insertions, 129 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 198bcff5e96f..e43ec600afcf 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -15,7 +15,6 @@
#include <linux/sysdev.h>
#include <linux/cpu.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
@@ -23,7 +22,8 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spinlock.h>
-#include <linux/rwlock.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
#include <asm/sizes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -52,12 +52,24 @@ struct pmb_entry {
struct pmb_entry *link;
};
+static struct {
+ unsigned long size;
+ int flag;
+} pmb_sizes[] = {
+ { .size = SZ_512M, .flag = PMB_SZ_512M, },
+ { .size = SZ_128M, .flag = PMB_SZ_128M, },
+ { .size = SZ_64M, .flag = PMB_SZ_64M, },
+ { .size = SZ_16M, .flag = PMB_SZ_16M, },
+};
+
static void pmb_unmap_entry(struct pmb_entry *, int depth);
static DEFINE_RWLOCK(pmb_rwlock);
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
+static unsigned int pmb_iomapping_enabled;
+
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
{
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
@@ -73,6 +85,142 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry)
return mk_pmb_entry(entry) | PMB_DATA;
}
+static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
+{
+ return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
+}
+
+/*
+ * Ensure that the PMB entries match our cache configuration.
+ *
+ * When we are in 32-bit address extended mode, CCR.CB becomes
+ * invalid, so care must be taken to manually adjust cacheable
+ * translations.
+ */
+static __always_inline unsigned long pmb_cache_flags(void)
+{
+ unsigned long flags = 0;
+
+#if defined(CONFIG_CACHE_OFF)
+ flags |= PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITETHROUGH)
+ flags |= PMB_C | PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ flags |= PMB_C;
+#endif
+
+ return flags;
+}
+
+/*
+ * Convert typical pgprot value to the PMB equivalent
+ */
+static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
+{
+ unsigned long pmb_flags = 0;
+ u64 flags = pgprot_val(prot);
+
+ if (flags & _PAGE_CACHABLE)
+ pmb_flags |= PMB_C;
+ if (flags & _PAGE_WT)
+ pmb_flags |= PMB_WT | PMB_UB;
+
+ return pmb_flags;
+}
+
+static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
+{
+ return (b->vpn == (a->vpn + a->size)) &&
+ (b->ppn == (a->ppn + a->size)) &&
+ (b->flags == a->flags);
+}
+
+static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
+ unsigned long size)
+{
+ int i;
+
+ read_lock(&pmb_rwlock);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ struct pmb_entry *pmbe, *iter;
+ unsigned long span;
+
+ if (!test_bit(i, pmb_map))
+ continue;
+
+ pmbe = &pmb_entry_list[i];
+
+ /*
+ * See if VPN and PPN are bounded by an existing mapping.
+ */
+ if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
+ continue;
+ if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
+ continue;
+
+ /*
+ * Now see if we're in range of a simple mapping.
+ */
+ if (size <= pmbe->size) {
+ read_unlock(&pmb_rwlock);
+ return true;
+ }
+
+ span = pmbe->size;
+
+ /*
+ * Finally for sizes that involve compound mappings, walk
+ * the chain.
+ */
+ for (iter = pmbe->link; iter; iter = iter->link)
+ span += iter->size;
+
+ /*
+ * Nothing else to do if the range requirements are met.
+ */
+ if (size <= span) {
+ read_unlock(&pmb_rwlock);
+ return true;
+ }
+ }
+
+ read_unlock(&pmb_rwlock);
+ return false;
+}
+
+static bool pmb_size_valid(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return true;
+
+ return false;
+}
+
+static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
+{
+ return (addr >= P1SEG && (addr + size - 1) < P3SEG);
+}
+
+static inline bool pmb_prot_valid(pgprot_t prot)
+{
+ return (pgprot_val(prot) & _PAGE_USER) == 0;
+}
+
+static int pmb_size_to_flags(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return pmb_sizes[i].flag;
+
+ return 0;
+}
+
static int pmb_alloc_entry(void)
{
int pos;
@@ -140,33 +288,22 @@ static void pmb_free(struct pmb_entry *pmbe)
}
/*
- * Ensure that the PMB entries match our cache configuration.
- *
- * When we are in 32-bit address extended mode, CCR.CB becomes
- * invalid, so care must be taken to manually adjust cacheable
- * translations.
+ * Must be run uncached.
*/
-static __always_inline unsigned long pmb_cache_flags(void)
+static void __set_pmb_entry(struct pmb_entry *pmbe)
{
- unsigned long flags = 0;
+ unsigned long addr, data;
-#if defined(CONFIG_CACHE_WRITETHROUGH)
- flags |= PMB_C | PMB_WT | PMB_UB;
-#elif defined(CONFIG_CACHE_WRITEBACK)
- flags |= PMB_C;
-#endif
+ addr = mk_pmb_addr(pmbe->entry);
+ data = mk_pmb_data(pmbe->entry);
- return flags;
-}
+ jump_to_uncached();
-/*
- * Must be run uncached.
- */
-static void __set_pmb_entry(struct pmb_entry *pmbe)
-{
- writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
- writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
- mk_pmb_data(pmbe->entry));
+ /* Set V-bit */
+ __raw_writel(pmbe->vpn | PMB_V, addr);
+ __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
+
+ back_to_cached();
}
static void __clear_pmb_entry(struct pmb_entry *pmbe)
@@ -185,6 +322,7 @@ static void __clear_pmb_entry(struct pmb_entry *pmbe)
writel_uncached(data_val & ~PMB_V, data);
}
+#ifdef CONFIG_PM
static void set_pmb_entry(struct pmb_entry *pmbe)
{
unsigned long flags;
@@ -193,145 +331,157 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
__set_pmb_entry(pmbe);
spin_unlock_irqrestore(&pmbe->lock, flags);
}
+#endif /* CONFIG_PM */
-static struct {
- unsigned long size;
- int flag;
-} pmb_sizes[] = {
- { .size = SZ_512M, .flag = PMB_SZ_512M, },
- { .size = SZ_128M, .flag = PMB_SZ_128M, },
- { .size = SZ_64M, .flag = PMB_SZ_64M, },
- { .size = SZ_16M, .flag = PMB_SZ_16M, },
-};
-
-long pmb_remap(unsigned long vaddr, unsigned long phys,
- unsigned long size, pgprot_t prot)
+int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
+ unsigned long size, pgprot_t prot)
{
struct pmb_entry *pmbp, *pmbe;
- unsigned long wanted;
- int pmb_flags, i;
- long err;
- u64 flags;
-
- flags = pgprot_val(prot);
+ unsigned long orig_addr, orig_size;
+ unsigned long flags, pmb_flags;
+ int i, mapped;
- pmb_flags = PMB_WT | PMB_UB;
+ if (!pmb_addr_valid(vaddr, size))
+ return -EFAULT;
+ if (pmb_mapping_exists(vaddr, phys, size))
+ return 0;
- /* Convert typical pgprot value to the PMB equivalent */
- if (flags & _PAGE_CACHABLE) {
- pmb_flags |= PMB_C;
+ orig_addr = vaddr;
+ orig_size = size;
- if ((flags & _PAGE_WT) == 0)
- pmb_flags &= ~(PMB_WT | PMB_UB);
- }
+ flush_tlb_kernel_range(vaddr, vaddr + size);
+ pmb_flags = pgprot_to_pmb_flags(prot);
pmbp = NULL;
- wanted = size;
-again:
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
- unsigned long flags;
+ do {
+ for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
+ if (size < pmb_sizes[i].size)
+ continue;
+
+ pmbe = pmb_alloc(vaddr, phys, pmb_flags |
+ pmb_sizes[i].flag, PMB_NO_ENTRY);
+ if (IS_ERR(pmbe)) {
+ pmb_unmap_entry(pmbp, mapped);
+ return PTR_ERR(pmbe);
+ }
- if (size < pmb_sizes[i].size)
- continue;
+ spin_lock_irqsave(&pmbe->lock, flags);
- pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
- PMB_NO_ENTRY);
- if (IS_ERR(pmbe)) {
- err = PTR_ERR(pmbe);
- goto out;
- }
+ pmbe->size = pmb_sizes[i].size;
- spin_lock_irqsave(&pmbe->lock, flags);
+ __set_pmb_entry(pmbe);
- __set_pmb_entry(pmbe);
+ phys += pmbe->size;
+ vaddr += pmbe->size;
+ size -= pmbe->size;
- phys += pmb_sizes[i].size;
- vaddr += pmb_sizes[i].size;
- size -= pmb_sizes[i].size;
+ /*
+ * Link adjacent entries that span multiple PMB
+ * entries for easier tear-down.
+ */
+ if (likely(pmbp)) {
+ spin_lock(&pmbp->lock);
+ pmbp->link = pmbe;
+ spin_unlock(&pmbp->lock);
+ }
- pmbe->size = pmb_sizes[i].size;
+ pmbp = pmbe;
- /*
- * Link adjacent entries that span multiple PMB entries
- * for easier tear-down.
- */
- if (likely(pmbp)) {
- spin_lock(&pmbp->lock);
- pmbp->link = pmbe;
- spin_unlock(&pmbp->lock);
+ /*
+ * Instead of trying smaller sizes on every
+ * iteration (even if we succeed in allocating
+ * space), try using pmb_sizes[i].size again.
+ */
+ i--;
+ mapped++;
+
+ spin_unlock_irqrestore(&pmbe->lock, flags);
}
+ } while (size >= SZ_16M);
- pmbp = pmbe;
+ flush_cache_vmap(orig_addr, orig_addr + orig_size);
- /*
- * Instead of trying smaller sizes on every iteration
- * (even if we succeed in allocating space), try using
- * pmb_sizes[i].size again.
- */
- i--;
+ return 0;
+}
- spin_unlock_irqrestore(&pmbe->lock, flags);
- }
+void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
+ pgprot_t prot, void *caller)
+{
+ unsigned long vaddr;
+ phys_addr_t offset, last_addr;
+ phys_addr_t align_mask;
+ unsigned long aligned;
+ struct vm_struct *area;
+ int i, ret;
- if (size >= SZ_16M)
- goto again;
+ if (!pmb_iomapping_enabled)
+ return NULL;
- return wanted - size;
+ /*
+ * Small mappings need to go through the TLB.
+ */
+ if (size < SZ_16M)
+ return ERR_PTR(-EINVAL);
+ if (!pmb_prot_valid(prot))
+ return ERR_PTR(-EINVAL);
-out:
- pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (size >= pmb_sizes[i].size)
+ break;
+
+ last_addr = phys + size;
+ align_mask = ~(pmb_sizes[i].size - 1);
+ offset = phys & ~align_mask;
+ phys &= align_mask;
+ aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
+
+ /*
+ * XXX: This should really start from uncached_end, but this
+ * causes the MMU to reset, so for now we restrict it to the
+ * 0xb000...0xc000 range.
+ */
+ area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
+ P3SEG, caller);
+ if (!area)
+ return NULL;
+
+ area->phys_addr = phys;
+ vaddr = (unsigned long)area->addr;
- return err;
+ ret = pmb_bolt_mapping(vaddr, phys, size, prot);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
+ return (void __iomem *)(offset + (char *)vaddr);
}
-void pmb_unmap(unsigned long addr)
+int pmb_unmap(void __iomem *addr)
{
struct pmb_entry *pmbe = NULL;
- int i;
+ unsigned long vaddr = (unsigned long __force)addr;
+ int i, found = 0;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
if (test_bit(i, pmb_map)) {
pmbe = &pmb_entry_list[i];
- if (pmbe->vpn == addr)
+ if (pmbe->vpn == vaddr) {
+ found = 1;
break;
+ }
}
}
read_unlock(&pmb_rwlock);
- pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
-}
-
-static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
-{
- return (b->vpn == (a->vpn + a->size)) &&
- (b->ppn == (a->ppn + a->size)) &&
- (b->flags == a->flags);
-}
-
-static bool pmb_size_valid(unsigned long size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
- if (pmb_sizes[i].size == size)
- return true;
-
- return false;
-}
-
-static int pmb_size_to_flags(unsigned long size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
- if (pmb_sizes[i].size == size)
- return pmb_sizes[i].flag;
+ if (found) {
+ pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
+ return 0;
+ }
- return 0;
+ return -EINVAL;
}
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
@@ -351,6 +501,8 @@ static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
*/
__clear_pmb_entry(pmbe);
+ flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
+
pmbe = pmblink->link;
pmb_free(pmblink);
@@ -369,11 +521,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
write_unlock_irqrestore(&pmb_rwlock, flags);
}
-static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
-{
- return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
-}
-
static void __init pmb_notify(void)
{
int i;
@@ -625,6 +772,18 @@ static void __init pmb_resize(void)
}
#endif
+static int __init early_pmb(char *p)
+{
+ if (!p)
+ return 0;
+
+ if (strstr(p, "iomap"))
+ pmb_iomapping_enabled = 1;
+
+ return 0;
+}
+early_param("pmb", early_pmb);
+
void __init pmb_init(void)
{
/* Synchronize software state */
@@ -644,7 +803,7 @@ void __init pmb_init(void)
writel_uncached(0, PMB_IRMCR);
/* Flush out the TLB */
- __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
+ local_flush_tlb_all();
ctrl_barrier();
}
@@ -713,7 +872,7 @@ static int __init pmb_debugfs_init(void)
return 0;
}
-postcore_initcall(pmb_debugfs_init);
+subsys_initcall(pmb_debugfs_init);
#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)