From a0ab36689a36e583b6e736f1c99ac8c9aebdad59 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 13 Jan 2010 18:31:48 +0900 Subject: sh: fixed PMB mode refactoring. This introduces some much overdue chainsawing of the fixed PMB support. fixed PMB was introduced initially to work around the fact that dynamic PMB mode was relatively broken, though they were never intended to converge. The main areas where there are differences are whether the system is booted in 29-bit mode or 32-bit mode, and whether legacy mappings are to be preserved. Any system booting in true 32-bit mode will not care about legacy mappings, so these are roughly decoupled. Regardless of the entry point, PMB and 32BIT are directly related as far as the kernel is concerned, so we also switch back to having one select the other. With legacy mappings iterated through and applied in the initialization path it's now possible to finally merge the two implementations and permit dynamic remapping overtop of remaining entries regardless of whether boot mappings are crafted by hand or inherited from the boot loader. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 106 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 61 insertions(+), 45 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 280f6a166035..8f7dbf183fb0 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -3,7 +3,7 @@ * * Privileged Space Mapping Buffer (PMB) Support. * - * Copyright (C) 2005, 2006, 2007 Paul Mundt + * Copyright (C) 2005 - 2010 Paul Mundt * * P1/P2 Section mapping definitions from map32.h, which was: * @@ -279,51 +279,12 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } while (pmbe); } -#ifdef CONFIG_PMB -int __uses_jump_to_uncached pmb_init(void) -{ - unsigned int i; - long size, ret; - - jump_to_uncached(); - - /* - * Insert PMB entries for the P1 and P2 areas so that, after - * we've switched the MMU to 32-bit mode, the semantics of P1 - * and P2 are the same as in 29-bit mode, e.g. - * - * P1 - provides a cached window onto physical memory - * P2 - provides an uncached window onto physical memory - */ - size = __MEMORY_START + __MEMORY_SIZE; - - ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); - BUG_ON(ret != size); - - ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); - BUG_ON(ret != size); - - ctrl_outl(0, PMB_IRMCR); - - /* PMB.SE and UB[7] */ - ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); - - /* Flush out the TLB */ - i = ctrl_inl(MMUCR); - i |= MMUCR_TI; - ctrl_outl(i, MMUCR); - - back_to_cached(); - - return 0; -} -#else -int __uses_jump_to_uncached pmb_init(void) +#ifdef CONFIG_PMB_LEGACY +static int pmb_apply_legacy_mappings(void) { int i; unsigned long addr, data; - - jump_to_uncached(); + unsigned int applied = 0; for (i = 0; i < PMB_ENTRY_MAX; i++) { struct pmb_entry *pmbe; @@ -357,13 +318,69 @@ int __uses_jump_to_uncached pmb_init(void) pmbe = pmb_alloc(vpn, ppn, flags, i); WARN_ON(IS_ERR(pmbe)); + + applied++; + } + + return (applied == 0); +} +#else +static inline int pmb_apply_legacy_mappings(void) +{ + return 1; +} +#endif + +int __uses_jump_to_uncached pmb_init(void) +{ + unsigned int i; + unsigned long size, ret; + + jump_to_uncached(); + + /* + * Attempt to apply the legacy boot mappings if configured. If + * this is successful then we simply carry on with those and + * don't bother establishing additional memory mappings. Dynamic + * device mappings through pmb_remap() can still be bolted on + * after this. + */ + ret = pmb_apply_legacy_mappings(); + if (ret == 0) { + back_to_cached(); + return 0; } + /* + * Insert PMB entries for the P1 and P2 areas so that, after + * we've switched the MMU to 32-bit mode, the semantics of P1 + * and P2 are the same as in 29-bit mode, e.g. + * + * P1 - provides a cached window onto physical memory + * P2 - provides an uncached window onto physical memory + */ + size = (unsigned long)__MEMORY_START + __MEMORY_SIZE; + + ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); + BUG_ON(ret != size); + + ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); + BUG_ON(ret != size); + + ctrl_outl(0, PMB_IRMCR); + + /* PMB.SE and UB[7] */ + ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); + + /* Flush out the TLB */ + i = ctrl_inl(MMUCR); + i |= MMUCR_TI; + ctrl_outl(i, MMUCR); + back_to_cached(); return 0; } -#endif /* CONFIG_PMB */ static int pmb_seq_show(struct seq_file *file, void *iter) { @@ -462,6 +479,5 @@ static int __init pmb_sysdev_init(void) { return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver); } - subsys_initcall(pmb_sysdev_init); #endif -- cgit v1.2.3 From 3d467676abf5f01f5ee99056273a58486968e252 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Mon, 18 Jan 2010 19:33:10 +0900 Subject: sh: Setup early PMB mappings. More and more boards are going to start shipping that boot with the MMU in 32BIT mode by default. Previously we relied on the bootloader to setup PMB mappings for use by the kernel but we also need to cater for boards whose bootloaders don't set them up. If CONFIG_PMB_LEGACY is not enabled we have full control over our PMB mappings and can compress our address space. Usually, the distance between the the cached and uncached mappings of RAM is always 512MB, however we can compress the distance to be the amount of RAM on the board. pmb_init() now becomes much simpler. It no longer has to calculate any mappings, it just has to synchronise the software PMB table with the hardware. Tested on SDK7786 and SH7785LCR. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 156 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 105 insertions(+), 51 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 8f7dbf183fb0..b796b6c021b4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -3,11 +3,8 @@ * * Privileged Space Mapping Buffer (PMB) Support. * - * Copyright (C) 2005 - 2010 Paul Mundt - * - * P1/P2 Section mapping definitions from map32.h, which was: - * - * Copyright 2003 (c) Lineo Solutions,Inc. + * Copyright (C) 2005 - 2010 Paul Mundt + * Copyright (C) 2010 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -280,46 +277,82 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } #ifdef CONFIG_PMB_LEGACY +static inline unsigned int pmb_ppn_in_range(unsigned long ppn) +{ + return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE; +} + static int pmb_apply_legacy_mappings(void) { - int i; - unsigned long addr, data; unsigned int applied = 0; + int i; - for (i = 0; i < PMB_ENTRY_MAX; i++) { - struct pmb_entry *pmbe; - unsigned long vpn, ppn, flags; - - addr = PMB_DATA + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); - if (!(data & PMB_V)) - continue; + pr_info("PMB: Preserving legacy mappings:\n"); - if (data & PMB_C) { -#if defined(CONFIG_CACHE_WRITETHROUGH) - data |= PMB_WT; -#elif defined(CONFIG_CACHE_WRITEBACK) - data &= ~PMB_WT; -#else - data &= ~(PMB_C | PMB_WT); -#endif - } - ctrl_outl(data, addr); - - ppn = data & PMB_PFN_MASK; + /* + * The following entries are setup by the bootloader. + * + * Entry VPN PPN V SZ C UB + * -------------------------------------------------------- + * 0 0xA0000000 0x00000000 1 64MB 0 0 + * 1 0xA4000000 0x04000000 1 16MB 0 0 + * 2 0xA6000000 0x08000000 1 16MB 0 0 + * 9 0x88000000 0x48000000 1 128MB 1 1 + * 10 0x90000000 0x50000000 1 128MB 1 1 + * 11 0x98000000 0x58000000 1 128MB 1 1 + * 13 0xA8000000 0x48000000 1 128MB 0 0 + * 14 0xB0000000 0x50000000 1 128MB 0 0 + * 15 0xB8000000 0x58000000 1 128MB 0 0 + * + * The only entries the we need are the ones that map the kernel + * at the cached and uncached addresses. + */ + for (i = 0; i < PMB_ENTRY_MAX; i++) { + unsigned long addr, data; + unsigned long addr_val, data_val; + unsigned long ppn, vpn; - flags = data & (PMB_C | PMB_WT | PMB_UB); - flags |= data & PMB_SZ_MASK; + addr = mk_pmb_addr(i); + data = mk_pmb_data(i); - addr = PMB_ADDR + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); + addr_val = __raw_readl(addr); + data_val = __raw_readl(data); - vpn = data & PMB_PFN_MASK; + /* + * Skip over any bogus entries + */ + if (!(data_val & PMB_V) || !(addr_val & PMB_V)) + continue; - pmbe = pmb_alloc(vpn, ppn, flags, i); - WARN_ON(IS_ERR(pmbe)); + ppn = data_val & PMB_PFN_MASK; + vpn = addr_val & PMB_PFN_MASK; - applied++; + /* + * Only preserve in-range mappings. + */ + if (pmb_ppn_in_range(ppn)) { + unsigned int size; + char *sz_str = NULL; + + size = data_val & PMB_SZ_MASK; + + sz_str = (size == PMB_SZ_16M) ? " 16MB": + (size == PMB_SZ_64M) ? " 64MB": + (size == PMB_SZ_128M) ? "128MB": + "512MB"; + + pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", + vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, + (data_val & PMB_C) ? "" : "un"); + + applied++; + } else { + /* + * Invalidate anything out of bounds. + */ + __raw_writel(addr_val & ~PMB_V, addr); + __raw_writel(data_val & ~PMB_V, data); + } } return (applied == 0); @@ -333,8 +366,9 @@ static inline int pmb_apply_legacy_mappings(void) int __uses_jump_to_uncached pmb_init(void) { - unsigned int i; - unsigned long size, ret; + int i; + unsigned long addr, data; + unsigned long ret; jump_to_uncached(); @@ -352,25 +386,45 @@ int __uses_jump_to_uncached pmb_init(void) } /* - * Insert PMB entries for the P1 and P2 areas so that, after - * we've switched the MMU to 32-bit mode, the semantics of P1 - * and P2 are the same as in 29-bit mode, e.g. - * - * P1 - provides a cached window onto physical memory - * P2 - provides an uncached window onto physical memory + * Sync our software copy of the PMB mappings with those in + * hardware. The mappings in the hardware PMB were either set up + * by the bootloader or very early on by the kernel. */ - size = (unsigned long)__MEMORY_START + __MEMORY_SIZE; + for (i = 0; i < PMB_ENTRY_MAX; i++) { + struct pmb_entry *pmbe; + unsigned long vpn, ppn, flags; - ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); - BUG_ON(ret != size); + addr = PMB_DATA + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + if (!(data & PMB_V)) + continue; - ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); - BUG_ON(ret != size); + if (data & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data &= ~PMB_WT; +#else + data &= ~(PMB_C | PMB_WT); +#endif + } + ctrl_outl(data, addr); - ctrl_outl(0, PMB_IRMCR); + ppn = data & PMB_PFN_MASK; + + flags = data & (PMB_C | PMB_WT | PMB_UB); + flags |= data & PMB_SZ_MASK; - /* PMB.SE and UB[7] */ - ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); + addr = PMB_ADDR + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + + vpn = data & PMB_PFN_MASK; + + pmbe = pmb_alloc(vpn, ppn, flags, i); + WARN_ON(IS_ERR(pmbe)); + } + + ctrl_outl(0, PMB_IRMCR); /* Flush out the TLB */ i = ctrl_inl(MMUCR); -- cgit v1.2.3 From 2efa53b269ec1e9289a108e1506f53f6f1de440b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 20 Jan 2010 16:40:48 +0900 Subject: sh: Make 29/32-bit mode check helper generally available. Presently __in_29bit_mode() is only defined for the PMB case, but it's also easily derived from the CONFIG_29BIT and CONFIG_32BIT && CONFIG_PMB=n cases. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b796b6c021b4..d318fa6caffe 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -436,6 +436,11 @@ int __uses_jump_to_uncached pmb_init(void) return 0; } +bool __in_29bit_mode(void) +{ + return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; +} + static int pmb_seq_show(struct seq_file *file, void *iter) { int i; -- cgit v1.2.3 From 2dc2f8e0c46864e2a3722c84eaa96513d4cf8b2f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 21 Jan 2010 16:05:25 +0900 Subject: sh: Kill off the special uncached section and fixmap. Now that cached_to_uncached works as advertized in 32-bit mode and we're never going to be able to map < 16MB anyways, there's no need for the special uncached section. Kill it off. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index d318fa6caffe..3d5eece7e6d0 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -127,14 +127,14 @@ static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); } -static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) +static void set_pmb_entry(struct pmb_entry *pmbe) { jump_to_uncached(); __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); back_to_cached(); } -static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) +static void clear_pmb_entry(struct pmb_entry *pmbe) { unsigned int entry = pmbe->entry; unsigned long addr; @@ -364,7 +364,7 @@ static inline int pmb_apply_legacy_mappings(void) } #endif -int __uses_jump_to_uncached pmb_init(void) +int pmb_init(void) { int i; unsigned long addr, data; -- cgit v1.2.3 From 9d56dd3b083a3bec56e9da35ce07baca81030b03 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 26 Jan 2010 12:58:40 +0900 Subject: sh: Mass ctrl_in/outX to __raw_read/writeX conversion. The old ctrl in/out routines are non-portable and unsuitable for cross-platform use. While drivers/sh has already been sanitized, there is still quite a lot of code that is not. This converts the arch/sh/ bits over, which permits us to flag the routines as deprecated whilst still building with -Werror for the architecture code, and to ensure that future users are not added. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 3d5eece7e6d0..3c9bf5b5c36f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -112,7 +112,7 @@ static void pmb_free(struct pmb_entry *pmbe) static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, unsigned long flags, int pos) { - ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); + __raw_writel(vpn | PMB_V, mk_pmb_addr(pos)); #ifdef CONFIG_CACHE_WRITETHROUGH /* @@ -124,7 +124,7 @@ static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, flags |= PMB_WT; #endif - ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); + __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos)); } static void set_pmb_entry(struct pmb_entry *pmbe) @@ -146,10 +146,10 @@ static void clear_pmb_entry(struct pmb_entry *pmbe) /* Clear V-bit */ addr = mk_pmb_addr(entry); - ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); + __raw_writel(__raw_readl(addr) & ~PMB_V, addr); addr = mk_pmb_data(entry); - ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); + __raw_writel(__raw_readl(addr) & ~PMB_V, addr); back_to_cached(); } @@ -395,7 +395,7 @@ int pmb_init(void) unsigned long vpn, ppn, flags; addr = PMB_DATA + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); + data = __raw_readl(addr); if (!(data & PMB_V)) continue; @@ -408,7 +408,7 @@ int pmb_init(void) data &= ~(PMB_C | PMB_WT); #endif } - ctrl_outl(data, addr); + __raw_writel(data, addr); ppn = data & PMB_PFN_MASK; @@ -416,7 +416,7 @@ int pmb_init(void) flags |= data & PMB_SZ_MASK; addr = PMB_ADDR + (i << PMB_E_SHIFT); - data = ctrl_inl(addr); + data = __raw_readl(addr); vpn = data & PMB_PFN_MASK; @@ -424,12 +424,12 @@ int pmb_init(void) WARN_ON(IS_ERR(pmbe)); } - ctrl_outl(0, PMB_IRMCR); + __raw_writel(0, PMB_IRMCR); /* Flush out the TLB */ - i = ctrl_inl(MMUCR); + i = __raw_readl(MMUCR); i |= MMUCR_TI; - ctrl_outl(i, MMUCR); + __raw_writel(i, MMUCR); back_to_cached(); @@ -454,8 +454,8 @@ static int pmb_seq_show(struct seq_file *file, void *iter) unsigned int size; char *sz_str = NULL; - addr = ctrl_inl(mk_pmb_addr(i)); - data = ctrl_inl(mk_pmb_data(i)); + addr = __raw_readl(mk_pmb_addr(i)); + data = __raw_readl(mk_pmb_data(i)); size = data & PMB_SZ_MASK; sz_str = (size == PMB_SZ_16M) ? " 16MB": -- cgit v1.2.3 From 55cef91a5d553265f03fe159f9fcdfac36902248 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 16 Feb 2010 17:14:04 +0900 Subject: sh: Prevent fixed slot PMB remapping from clobbering boot entries. The PMB initialization code walks the entries and synchronizes the software PMB state with the hardware mappings, preserving the slot index. Unfortunately pmb_alloc() only tested the bit position in the entry map and failed to set it, resulting in subsequent remaps being able to be dynamically assigned a slot that trampled an existing boot mapping with general badness ensuing. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 3c9bf5b5c36f..a06483076a41 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -77,7 +77,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, if (pos < 0) return ERR_PTR(pos); } else { - if (test_bit(entry, &pmb_map)) + if (test_and_set_bit(entry, &pmb_map)) return ERR_PTR(-ENOSPC); pos = entry; } -- cgit v1.2.3 From efd54ea315f645ef318708aab5714a5f1f432d03 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 16 Feb 2010 18:39:30 +0900 Subject: sh: Merge the legacy PMB mapping and entry synchronization code. This merges the code for iterating over the legacy PMB mappings and the code for synchronizing software state with the hardware mappings. There's really no reason to do the same iteration twice, and this also buys us the legacy entry logging facility for the dynamic PMB case. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 162 ++++++++++++++++++++++++------------------------------- 1 file changed, 69 insertions(+), 93 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index a06483076a41..f822f83418e4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -276,41 +276,57 @@ static void __pmb_unmap(struct pmb_entry *pmbe) } while (pmbe); } -#ifdef CONFIG_PMB_LEGACY +static inline void +pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn) +{ + unsigned int size; + const char *sz_str; + + size = data_val & PMB_SZ_MASK; + + sz_str = (size == PMB_SZ_16M) ? " 16MB": + (size == PMB_SZ_64M) ? " 64MB": + (size == PMB_SZ_128M) ? "128MB": + "512MB"; + + pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", + vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, + (data_val & PMB_C) ? "" : "un"); +} + static inline unsigned int pmb_ppn_in_range(unsigned long ppn) { - return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE; + return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } -static int pmb_apply_legacy_mappings(void) +static int pmb_synchronize_mappings(void) { unsigned int applied = 0; int i; - pr_info("PMB: Preserving legacy mappings:\n"); + pr_info("PMB: boot mappings:\n"); /* - * The following entries are setup by the bootloader. + * Run through the initial boot mappings, log the established + * ones, and blow away anything that falls outside of the valid + * PPN range. Specifically, we only care about existing mappings + * that impact the cached/uncached sections. * - * Entry VPN PPN V SZ C UB - * -------------------------------------------------------- - * 0 0xA0000000 0x00000000 1 64MB 0 0 - * 1 0xA4000000 0x04000000 1 16MB 0 0 - * 2 0xA6000000 0x08000000 1 16MB 0 0 - * 9 0x88000000 0x48000000 1 128MB 1 1 - * 10 0x90000000 0x50000000 1 128MB 1 1 - * 11 0x98000000 0x58000000 1 128MB 1 1 - * 13 0xA8000000 0x48000000 1 128MB 0 0 - * 14 0xB0000000 0x50000000 1 128MB 0 0 - * 15 0xB8000000 0x58000000 1 128MB 0 0 + * Note that touching these can be a bit of a minefield; the boot + * loader can establish multi-page mappings with the same caching + * attributes, so we need to ensure that we aren't modifying a + * mapping that we're presently executing from, or may execute + * from in the case of straddling page boundaries. * - * The only entries the we need are the ones that map the kernel - * at the cached and uncached addresses. + * In the future we will have to tidy up after the boot loader by + * jumping between the cached and uncached mappings and tearing + * down alternating mappings while executing from the other. */ for (i = 0; i < PMB_ENTRY_MAX; i++) { unsigned long addr, data; unsigned long addr_val, data_val; - unsigned long ppn, vpn; + unsigned long ppn, vpn, flags; + struct pmb_entry *pmbe; addr = mk_pmb_addr(i); data = mk_pmb_data(i); @@ -330,106 +346,66 @@ static int pmb_apply_legacy_mappings(void) /* * Only preserve in-range mappings. */ - if (pmb_ppn_in_range(ppn)) { - unsigned int size; - char *sz_str = NULL; - - size = data_val & PMB_SZ_MASK; - - sz_str = (size == PMB_SZ_16M) ? " 16MB": - (size == PMB_SZ_64M) ? " 64MB": - (size == PMB_SZ_128M) ? "128MB": - "512MB"; - - pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, - (data_val & PMB_C) ? "" : "un"); - - applied++; - } else { + if (!pmb_ppn_in_range(ppn)) { /* * Invalidate anything out of bounds. */ __raw_writel(addr_val & ~PMB_V, addr); __raw_writel(data_val & ~PMB_V, data); + continue; } + + /* + * Update the caching attributes if necessary + */ + if (data_val & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data_val |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data_val &= ~PMB_WT; +#else + data_val &= ~(PMB_C | PMB_WT); +#endif + __raw_writel(data_val, data); + } + + flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); + + pmbe = pmb_alloc(vpn, ppn, flags, i); + if (IS_ERR(pmbe)) { + WARN_ON_ONCE(1); + continue; + } + + pmb_log_mapping(data_val, vpn, ppn); + + applied++; } return (applied == 0); } -#else -static inline int pmb_apply_legacy_mappings(void) -{ - return 1; -} -#endif int pmb_init(void) { - int i; - unsigned long addr, data; - unsigned long ret; + int ret; jump_to_uncached(); - /* - * Attempt to apply the legacy boot mappings if configured. If - * this is successful then we simply carry on with those and - * don't bother establishing additional memory mappings. Dynamic - * device mappings through pmb_remap() can still be bolted on - * after this. - */ - ret = pmb_apply_legacy_mappings(); - if (ret == 0) { - back_to_cached(); - return 0; - } - /* * Sync our software copy of the PMB mappings with those in * hardware. The mappings in the hardware PMB were either set up * by the bootloader or very early on by the kernel. */ - for (i = 0; i < PMB_ENTRY_MAX; i++) { - struct pmb_entry *pmbe; - unsigned long vpn, ppn, flags; - - addr = PMB_DATA + (i << PMB_E_SHIFT); - data = __raw_readl(addr); - if (!(data & PMB_V)) - continue; - - if (data & PMB_C) { -#if defined(CONFIG_CACHE_WRITETHROUGH) - data |= PMB_WT; -#elif defined(CONFIG_CACHE_WRITEBACK) - data &= ~PMB_WT; -#else - data &= ~(PMB_C | PMB_WT); -#endif - } - __raw_writel(data, addr); - - ppn = data & PMB_PFN_MASK; - - flags = data & (PMB_C | PMB_WT | PMB_UB); - flags |= data & PMB_SZ_MASK; - - addr = PMB_ADDR + (i << PMB_E_SHIFT); - data = __raw_readl(addr); - - vpn = data & PMB_PFN_MASK; - - pmbe = pmb_alloc(vpn, ppn, flags, i); - WARN_ON(IS_ERR(pmbe)); + ret = pmb_synchronize_mappings(); + if (unlikely(ret == 0)) { + back_to_cached(); + return 0; } __raw_writel(0, PMB_IRMCR); /* Flush out the TLB */ - i = __raw_readl(MMUCR); - i |= MMUCR_TI; - __raw_writel(i, MMUCR); + __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); back_to_cached(); -- cgit v1.2.3 From 7bdda6209f224aa784a036df54b22cb338d2e859 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 13:23:00 +0900 Subject: sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB. Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index f822f83418e4..509a444a30ab 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -166,12 +167,15 @@ static struct { }; long pmb_remap(unsigned long vaddr, unsigned long phys, - unsigned long size, unsigned long flags) + unsigned long size, pgprot_t prot) { struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; long err; + u64 flags; + + flags = pgprot_val(prot); /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { -- cgit v1.2.3 From 51becfd96287b3913b13075699433730984e2f4f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 15:33:30 +0900 Subject: sh: PMB tidying. Some overdue cleanup of the PMB code, killing off unused functionality and duplication sprinkled about the tree. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 83 ++++++++++++++++++++++++++------------------------------ 1 file changed, 38 insertions(+), 45 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 509a444a30ab..924f3e4b3a82 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -21,32 +21,31 @@ #include #include #include +#include +#include #include #include #include #include #include -#include #include -#define NR_PMB_ENTRIES 16 - -static void __pmb_unmap(struct pmb_entry *); +static void pmb_unmap_entry(struct pmb_entry *); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; -static unsigned long pmb_map; +static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); -static inline unsigned long mk_pmb_entry(unsigned int entry) +static __always_inline unsigned long mk_pmb_entry(unsigned int entry) { return (entry & PMB_E_MASK) << PMB_E_SHIFT; } -static inline unsigned long mk_pmb_addr(unsigned int entry) +static __always_inline unsigned long mk_pmb_addr(unsigned int entry) { return mk_pmb_entry(entry) | PMB_ADDR; } -static inline unsigned long mk_pmb_data(unsigned int entry) +static __always_inline unsigned long mk_pmb_data(unsigned int entry) { return mk_pmb_entry(entry) | PMB_DATA; } @@ -56,12 +55,12 @@ static int pmb_alloc_entry(void) unsigned int pos; repeat: - pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); + pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); if (unlikely(pos > NR_PMB_ENTRIES)) return -ENOSPC; - if (test_and_set_bit(pos, &pmb_map)) + if (test_and_set_bit(pos, pmb_map)) goto repeat; return pos; @@ -78,7 +77,7 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, if (pos < 0) return ERR_PTR(pos); } else { - if (test_and_set_bit(entry, &pmb_map)) + if (test_and_set_bit(entry, pmb_map)) return ERR_PTR(-ENOSPC); pos = entry; } @@ -104,16 +103,17 @@ static void pmb_free(struct pmb_entry *pmbe) pmbe->flags = 0; pmbe->entry = 0; - clear_bit(pos, &pmb_map); + clear_bit(pos, pmb_map); } /* - * Must be in P2 for __set_pmb_entry() + * Must be run uncached. */ -static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, - unsigned long flags, int pos) +static void set_pmb_entry(struct pmb_entry *pmbe) { - __raw_writel(vpn | PMB_V, mk_pmb_addr(pos)); + jump_to_uncached(); + + __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); #ifdef CONFIG_CACHE_WRITETHROUGH /* @@ -121,17 +121,12 @@ static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, * invalid, so care must be taken to manually adjust cacheable * translations. */ - if (likely(flags & PMB_C)) - flags |= PMB_WT; + if (likely(pmbe->flags & PMB_C)) + pmbe->flags |= PMB_WT; #endif - __raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos)); -} + __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); -static void set_pmb_entry(struct pmb_entry *pmbe) -{ - jump_to_uncached(); - __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); back_to_cached(); } @@ -140,9 +135,6 @@ static void clear_pmb_entry(struct pmb_entry *pmbe) unsigned int entry = pmbe->entry; unsigned long addr; - if (unlikely(entry >= NR_PMB_ENTRIES)) - return; - jump_to_uncached(); /* Clear V-bit */ @@ -155,15 +147,14 @@ static void clear_pmb_entry(struct pmb_entry *pmbe) back_to_cached(); } - static struct { unsigned long size; int flag; } pmb_sizes[] = { - { .size = 0x20000000, .flag = PMB_SZ_512M, }, - { .size = 0x08000000, .flag = PMB_SZ_128M, }, - { .size = 0x04000000, .flag = PMB_SZ_64M, }, - { .size = 0x01000000, .flag = PMB_SZ_16M, }, + { .size = SZ_512M, .flag = PMB_SZ_512M, }, + { .size = SZ_128M, .flag = PMB_SZ_128M, }, + { .size = SZ_64M, .flag = PMB_SZ_64M, }, + { .size = SZ_16M, .flag = PMB_SZ_16M, }, }; long pmb_remap(unsigned long vaddr, unsigned long phys, @@ -230,34 +221,36 @@ again: return wanted - size; out: - if (pmbp) - __pmb_unmap(pmbp); + pmb_unmap_entry(pmbp); return err; } void pmb_unmap(unsigned long addr) { - struct pmb_entry *pmbe = NULL; + struct pmb_entry *pmbe; int i; for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { - if (test_bit(i, &pmb_map)) { + if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; - if (pmbe->vpn == addr) + if (pmbe->vpn == addr) { + pmb_unmap_entry(pmbe); break; + } } } +} +static void pmb_unmap_entry(struct pmb_entry *pmbe) +{ if (unlikely(!pmbe)) return; - __pmb_unmap(pmbe); -} - -static void __pmb_unmap(struct pmb_entry *pmbe) -{ - BUG_ON(!test_bit(pmbe->entry, &pmb_map)); + if (!test_bit(pmbe->entry, pmb_map)) { + WARN_ON(1); + return; + } do { struct pmb_entry *pmblink = pmbe; @@ -326,7 +319,7 @@ static int pmb_synchronize_mappings(void) * jumping between the cached and uncached mappings and tearing * down alternating mappings while executing from the other. */ - for (i = 0; i < PMB_ENTRY_MAX; i++) { + for (i = 0; i < NR_PMB_ENTRIES; i++) { unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; @@ -494,7 +487,7 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { - if (test_bit(i, &pmb_map)) { + if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; set_pmb_entry(pmbe); } -- cgit v1.2.3 From d7813bc9e8e384f5a293b05c095c799d41af3668 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 17:56:38 +0900 Subject: sh: Build PMB entry links for existing contiguous multi-page mappings. This plugs in entry sizing support for existing mappings and then builds on top of that for linking together entries that are mapping contiguous areas. This will ultimately permit us to coalesce mappings and promote head pages while reclaiming PMB slots for dynamic remapping. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 59 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 29 insertions(+), 30 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 924f3e4b3a82..f2ad6e374b64 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -90,20 +90,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->ppn = ppn; pmbe->flags = flags; pmbe->entry = pos; + pmbe->size = 0; return pmbe; } static void pmb_free(struct pmb_entry *pmbe) { - int pos = pmbe->entry; - - pmbe->vpn = 0; - pmbe->ppn = 0; - pmbe->flags = 0; - pmbe->entry = 0; - - clear_bit(pos, pmb_map); + clear_bit(pmbe->entry, pmb_map); + pmbe->entry = PMB_NO_ENTRY; } /* @@ -198,6 +193,8 @@ again: vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; + pmbe->size = pmb_sizes[i].size; + /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. @@ -273,25 +270,7 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) } while (pmbe); } -static inline void -pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn) -{ - unsigned int size; - const char *sz_str; - - size = data_val & PMB_SZ_MASK; - - sz_str = (size == PMB_SZ_16M) ? " 16MB": - (size == PMB_SZ_64M) ? " 64MB": - (size == PMB_SZ_128M) ? "128MB": - "512MB"; - - pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, - (data_val & PMB_C) ? "" : "un"); -} - -static inline unsigned int pmb_ppn_in_range(unsigned long ppn) +static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) { return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } @@ -299,7 +278,8 @@ static inline unsigned int pmb_ppn_in_range(unsigned long ppn) static int pmb_synchronize_mappings(void) { unsigned int applied = 0; - int i; + struct pmb_entry *pmbp = NULL; + int i, j; pr_info("PMB: boot mappings:\n"); @@ -323,6 +303,7 @@ static int pmb_synchronize_mappings(void) unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; + unsigned int size; struct pmb_entry *pmbe; addr = mk_pmb_addr(i); @@ -366,7 +347,8 @@ static int pmb_synchronize_mappings(void) __raw_writel(data_val, data); } - flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); + size = data_val & PMB_SZ_MASK; + flags = size | (data_val & PMB_CACHE_MASK); pmbe = pmb_alloc(vpn, ppn, flags, i); if (IS_ERR(pmbe)) { @@ -374,7 +356,24 @@ static int pmb_synchronize_mappings(void) continue; } - pmb_log_mapping(data_val, vpn, ppn); + for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) + if (pmb_sizes[j].flag == size) + pmbe->size = pmb_sizes[j].size; + + /* + * Compare the previous entry against the current one to + * see if the entries span a contiguous mapping. If so, + * setup the entry links accordingly. + */ + if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && + (pmbe->ppn == (pmbp->ppn + pmbp->size)))) + pmbp->link = pmbe; + + pmbp = pmbe; + + pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", + vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, + (data_val & PMB_C) ? "" : "un"); applied++; } -- cgit v1.2.3 From 0065b96775f1eff167a2c3343a41582e8fab4c6c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 18:05:23 +0900 Subject: sh: Fix up dynamically created write-through PMB mappings. Write-through PMB mappings still require the cache bit to be set, even if they're to be flagged with a different cache policy and bufferability bit. To reduce some of the confusion surrounding the flag encoding we centralize the cache mask based on the system cache policy while we're at it. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 56 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 24 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index f2ad6e374b64..cb808a8aaffc 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -101,6 +101,26 @@ static void pmb_free(struct pmb_entry *pmbe) pmbe->entry = PMB_NO_ENTRY; } +/* + * Ensure that the PMB entries match our cache configuration. + * + * When we are in 32-bit address extended mode, CCR.CB becomes + * invalid, so care must be taken to manually adjust cacheable + * translations. + */ +static __always_inline unsigned long pmb_cache_flags(void) +{ + unsigned long flags = 0; + +#if defined(CONFIG_CACHE_WRITETHROUGH) + flags |= PMB_C | PMB_WT | PMB_UB; +#elif defined(CONFIG_CACHE_WRITEBACK) + flags |= PMB_C; +#endif + + return flags; +} + /* * Must be run uncached. */ @@ -108,18 +128,10 @@ static void set_pmb_entry(struct pmb_entry *pmbe) { jump_to_uncached(); - __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); - -#ifdef CONFIG_CACHE_WRITETHROUGH - /* - * When we are in 32-bit address extended mode, CCR.CB becomes - * invalid, so care must be taken to manually adjust cacheable - * translations. - */ - if (likely(pmbe->flags & PMB_C)) - pmbe->flags |= PMB_WT; -#endif + pmbe->flags &= ~PMB_CACHE_MASK; + pmbe->flags |= pmb_cache_flags(); + __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); back_to_cached(); @@ -163,14 +175,15 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, flags = pgprot_val(prot); + pmb_flags = PMB_WT | PMB_UB; + /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { - if (flags & _PAGE_WT) - pmb_flags = PMB_WT; - else - pmb_flags = PMB_C; - } else - pmb_flags = PMB_WT | PMB_UB; + pmb_flags |= PMB_C; + + if ((flags & _PAGE_WT) == 0) + pmb_flags &= ~(PMB_WT | PMB_UB); + } pmbp = NULL; wanted = size; @@ -337,13 +350,8 @@ static int pmb_synchronize_mappings(void) * Update the caching attributes if necessary */ if (data_val & PMB_C) { -#if defined(CONFIG_CACHE_WRITETHROUGH) - data_val |= PMB_WT; -#elif defined(CONFIG_CACHE_WRITEBACK) - data_val &= ~PMB_WT; -#else - data_val &= ~(PMB_C | PMB_WT); -#endif + data_val &= ~PMB_CACHE_MASK; + data_val |= pmb_cache_flags(); __raw_writel(data_val, data); } -- cgit v1.2.3 From d53a0d33bc3a50ea0e8dd1680a2e8435770b162a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 21:17:02 +0900 Subject: sh: PMB locking overhaul. This implements some locking for the PMB code. A high level rwlock is added for dealing with rw accesses on the entry map while a per-entry data structure spinlock is added to deal with the PMB entry changing out from underneath us. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 152 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 114 insertions(+), 38 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index cb808a8aaffc..e65e8b8e2a5e 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include #include @@ -30,8 +32,29 @@ #include #include +struct pmb_entry; + +struct pmb_entry { + unsigned long vpn; + unsigned long ppn; + unsigned long flags; + unsigned long size; + + spinlock_t lock; + + /* + * 0 .. NR_PMB_ENTRIES for specific entry selection, or + * PMB_NO_ENTRY to search for a free one + */ + int entry; + + /* Adjacent entry link for contiguous multi-entry mappings */ + struct pmb_entry *link; +}; + static void pmb_unmap_entry(struct pmb_entry *); +static DEFINE_RWLOCK(pmb_rwlock); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); @@ -52,16 +75,13 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry) static int pmb_alloc_entry(void) { - unsigned int pos; + int pos; -repeat: pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); - - if (unlikely(pos > NR_PMB_ENTRIES)) - return -ENOSPC; - - if (test_and_set_bit(pos, pmb_map)) - goto repeat; + if (pos >= 0 && pos < NR_PMB_ENTRIES) + __set_bit(pos, pmb_map); + else + pos = -ENOSPC; return pos; } @@ -70,21 +90,32 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, unsigned long flags, int entry) { struct pmb_entry *pmbe; + unsigned long irqflags; + void *ret = NULL; int pos; + write_lock_irqsave(&pmb_rwlock, irqflags); + if (entry == PMB_NO_ENTRY) { pos = pmb_alloc_entry(); - if (pos < 0) - return ERR_PTR(pos); + if (unlikely(pos < 0)) { + ret = ERR_PTR(pos); + goto out; + } } else { - if (test_and_set_bit(entry, pmb_map)) - return ERR_PTR(-ENOSPC); + if (__test_and_set_bit(entry, pmb_map)) { + ret = ERR_PTR(-ENOSPC); + goto out; + } + pos = entry; } + write_unlock_irqrestore(&pmb_rwlock, irqflags); + pmbe = &pmb_entry_list[pos]; - if (!pmbe) - return ERR_PTR(-ENOMEM); + + spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; @@ -93,11 +124,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->size = 0; return pmbe; + +out: + write_unlock_irqrestore(&pmb_rwlock, irqflags); + return ret; } static void pmb_free(struct pmb_entry *pmbe) { - clear_bit(pmbe->entry, pmb_map); + __clear_bit(pmbe->entry, pmb_map); pmbe->entry = PMB_NO_ENTRY; } @@ -124,7 +159,7 @@ static __always_inline unsigned long pmb_cache_flags(void) /* * Must be run uncached. */ -static void set_pmb_entry(struct pmb_entry *pmbe) +static void __set_pmb_entry(struct pmb_entry *pmbe) { jump_to_uncached(); @@ -137,7 +172,7 @@ static void set_pmb_entry(struct pmb_entry *pmbe) back_to_cached(); } -static void clear_pmb_entry(struct pmb_entry *pmbe) +static void __clear_pmb_entry(struct pmb_entry *pmbe) { unsigned int entry = pmbe->entry; unsigned long addr; @@ -154,6 +189,15 @@ static void clear_pmb_entry(struct pmb_entry *pmbe) back_to_cached(); } +static void set_pmb_entry(struct pmb_entry *pmbe) +{ + unsigned long flags; + + spin_lock_irqsave(&pmbe->lock, flags); + __set_pmb_entry(pmbe); + spin_unlock_irqrestore(&pmbe->lock, flags); +} + static struct { unsigned long size; int flag; @@ -190,6 +234,8 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { + unsigned long flags; + if (size < pmb_sizes[i].size) continue; @@ -200,7 +246,9 @@ again: goto out; } - set_pmb_entry(pmbe); + spin_lock_irqsave(&pmbe->lock, flags); + + __set_pmb_entry(pmbe); phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; @@ -212,8 +260,11 @@ again: * Link adjacent entries that span multiple PMB entries * for easier tear-down. */ - if (likely(pmbp)) + if (likely(pmbp)) { + spin_lock(&pmbp->lock); pmbp->link = pmbe; + spin_unlock(&pmbp->lock); + } pmbp = pmbe; @@ -223,9 +274,11 @@ again: * pmb_sizes[i].size again. */ i--; + + spin_unlock_irqrestore(&pmbe->lock, flags); } - if (size >= 0x1000000) + if (size >= SZ_16M) goto again; return wanted - size; @@ -238,29 +291,32 @@ out: void pmb_unmap(unsigned long addr) { - struct pmb_entry *pmbe; + struct pmb_entry *pmbe = NULL; int i; + read_lock(&pmb_rwlock); + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; - if (pmbe->vpn == addr) { - pmb_unmap_entry(pmbe); + if (pmbe->vpn == addr) break; - } } } + + read_unlock(&pmb_rwlock); + + pmb_unmap_entry(pmbe); } static void pmb_unmap_entry(struct pmb_entry *pmbe) { + unsigned long flags; + if (unlikely(!pmbe)) return; - if (!test_bit(pmbe->entry, pmb_map)) { - WARN_ON(1); - return; - } + write_lock_irqsave(&pmb_rwlock, flags); do { struct pmb_entry *pmblink = pmbe; @@ -272,15 +328,17 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) * this entry in pmb_alloc() (even if we haven't filled * it yet). * - * Therefore, calling clear_pmb_entry() is safe as no + * Therefore, calling __clear_pmb_entry() is safe as no * other mapping can be using that slot. */ - clear_pmb_entry(pmbe); + __clear_pmb_entry(pmbe); pmbe = pmblink->link; pmb_free(pmblink); } while (pmbe); + + write_unlock_irqrestore(&pmb_rwlock, flags); } static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) @@ -316,6 +374,7 @@ static int pmb_synchronize_mappings(void) unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; + unsigned long irqflags; unsigned int size; struct pmb_entry *pmbe; @@ -364,21 +423,31 @@ static int pmb_synchronize_mappings(void) continue; } + spin_lock_irqsave(&pmbe->lock, irqflags); + for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) if (pmb_sizes[j].flag == size) pmbe->size = pmb_sizes[j].size; - /* - * Compare the previous entry against the current one to - * see if the entries span a contiguous mapping. If so, - * setup the entry links accordingly. - */ - if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && - (pmbe->ppn == (pmbp->ppn + pmbp->size)))) - pmbp->link = pmbe; + if (pmbp) { + spin_lock(&pmbp->lock); + + /* + * Compare the previous entry against the current one to + * see if the entries span a contiguous mapping. If so, + * setup the entry links accordingly. + */ + if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && + (pmbe->ppn == (pmbp->ppn + pmbp->size))) + pmbp->link = pmbe; + + spin_unlock(&pmbp->lock); + } pmbp = pmbe; + spin_unlock_irqrestore(&pmbe->lock, irqflags); + pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, (data_val & PMB_C) ? "" : "un"); @@ -493,14 +562,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) if (state.event == PM_EVENT_ON && prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; + + read_lock(&pmb_rwlock); + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; set_pmb_entry(pmbe); } } + + read_unlock(&pmb_rwlock); } + prev_state = state; + return 0; } -- cgit v1.2.3 From 2e450643d70b62e0192577681b227d7d5d2efa45 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 18 Feb 2010 13:26:05 +0900 Subject: sh: Use uncached I/O helpers in PMB setup. The PMB code is an example of something that spends an absurd amount of time running uncached when only a couple of operations really need to be. This switches over to the shiny new uncached helpers, permitting us to spend far more time running cached. Additionally, MMUCR twiddling is perfectly safe from cached space given that it's paired with a control register barrier, so fix that up, too. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 46 +++++++++++++++++++--------------------------- 1 file changed, 19 insertions(+), 27 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index e65e8b8e2a5e..b9d5476e1284 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -161,32 +161,28 @@ static __always_inline unsigned long pmb_cache_flags(void) */ static void __set_pmb_entry(struct pmb_entry *pmbe) { - jump_to_uncached(); - pmbe->flags &= ~PMB_CACHE_MASK; pmbe->flags |= pmb_cache_flags(); - __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); - __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); - - back_to_cached(); + writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); + writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, + mk_pmb_data(pmbe->entry)); } static void __clear_pmb_entry(struct pmb_entry *pmbe) { - unsigned int entry = pmbe->entry; - unsigned long addr; + unsigned long addr, data; + unsigned long addr_val, data_val; - jump_to_uncached(); + addr = mk_pmb_addr(pmbe->entry); + data = mk_pmb_data(pmbe->entry); - /* Clear V-bit */ - addr = mk_pmb_addr(entry); - __raw_writel(__raw_readl(addr) & ~PMB_V, addr); + addr_val = __raw_readl(addr); + data_val = __raw_readl(data); - addr = mk_pmb_data(entry); - __raw_writel(__raw_readl(addr) & ~PMB_V, addr); - - back_to_cached(); + /* Clear V-bit */ + writel_uncached(addr_val & ~PMB_V, addr); + writel_uncached(data_val & ~PMB_V, data); } static void set_pmb_entry(struct pmb_entry *pmbe) @@ -400,8 +396,8 @@ static int pmb_synchronize_mappings(void) /* * Invalidate anything out of bounds. */ - __raw_writel(addr_val & ~PMB_V, addr); - __raw_writel(data_val & ~PMB_V, data); + writel_uncached(addr_val & ~PMB_V, addr); + writel_uncached(data_val & ~PMB_V, data); continue; } @@ -411,7 +407,8 @@ static int pmb_synchronize_mappings(void) if (data_val & PMB_C) { data_val &= ~PMB_CACHE_MASK; data_val |= pmb_cache_flags(); - __raw_writel(data_val, data); + + writel_uncached(data_val, data); } size = data_val & PMB_SZ_MASK; @@ -462,25 +459,20 @@ int pmb_init(void) { int ret; - jump_to_uncached(); - /* * Sync our software copy of the PMB mappings with those in * hardware. The mappings in the hardware PMB were either set up * by the bootloader or very early on by the kernel. */ ret = pmb_synchronize_mappings(); - if (unlikely(ret == 0)) { - back_to_cached(); + if (unlikely(ret == 0)) return 0; - } - __raw_writel(0, PMB_IRMCR); + writel_uncached(0, PMB_IRMCR); /* Flush out the TLB */ __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); - - back_to_cached(); + ctrl_barrier(); return 0; } -- cgit v1.2.3 From d01447b3197c2c470a14666be2c640407bbbfec7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 18 Feb 2010 18:13:51 +0900 Subject: sh: Merge legacy and dynamic PMB modes. This implements a bit of rework for the PMB code, which permits us to kill off the legacy PMB mode completely. Rather than trusting the boot loader to do the right thing, we do a quick verification of the PMB contents to determine whether to have the kernel setup the initial mappings or whether it needs to mangle them later on instead. If we're booting from legacy mappings, the kernel will now take control of them and make them match the kernel's initial mapping configuration. This is accomplished by breaking the initialization phase out in to multiple steps: synchronization, merging, and resizing. With the recent rework, the synchronization code establishes page links for compound mappings already, so we build on top of this for promoting mappings and reclaiming unused slots. At the same time, the changes introduced for the uncached helpers also permit us to dynamically resize the uncached mapping without any particular headaches. The smallest page size is more than sufficient for mapping all of kernel text, and as we're careful not to jump to any far off locations in the setup code the mapping can safely be resized regardless of whether we are executing from it or not. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 243 ++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 207 insertions(+), 36 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b9d5476e1284..198bcff5e96f 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -52,7 +52,7 @@ struct pmb_entry { struct pmb_entry *link; }; -static void pmb_unmap_entry(struct pmb_entry *); +static void pmb_unmap_entry(struct pmb_entry *, int depth); static DEFINE_RWLOCK(pmb_rwlock); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; @@ -115,13 +115,14 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe = &pmb_entry_list[pos]; + memset(pmbe, 0, sizeof(struct pmb_entry)); + spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; pmbe->flags = flags; pmbe->entry = pos; - pmbe->size = 0; return pmbe; @@ -133,7 +134,9 @@ out: static void pmb_free(struct pmb_entry *pmbe) { __clear_bit(pmbe->entry, pmb_map); - pmbe->entry = PMB_NO_ENTRY; + + pmbe->entry = PMB_NO_ENTRY; + pmbe->link = NULL; } /* @@ -161,9 +164,6 @@ static __always_inline unsigned long pmb_cache_flags(void) */ static void __set_pmb_entry(struct pmb_entry *pmbe) { - pmbe->flags &= ~PMB_CACHE_MASK; - pmbe->flags |= pmb_cache_flags(); - writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry)); @@ -280,7 +280,7 @@ again: return wanted - size; out: - pmb_unmap_entry(pmbp); + pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); return err; } @@ -302,18 +302,40 @@ void pmb_unmap(unsigned long addr) read_unlock(&pmb_rwlock); - pmb_unmap_entry(pmbe); + pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); } -static void pmb_unmap_entry(struct pmb_entry *pmbe) +static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) { - unsigned long flags; + return (b->vpn == (a->vpn + a->size)) && + (b->ppn == (a->ppn + a->size)) && + (b->flags == a->flags); +} - if (unlikely(!pmbe)) - return; +static bool pmb_size_valid(unsigned long size) +{ + int i; - write_lock_irqsave(&pmb_rwlock, flags); + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return true; + + return false; +} + +static int pmb_size_to_flags(unsigned long size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) + if (pmb_sizes[i].size == size) + return pmb_sizes[i].flag; + return 0; +} + +static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ do { struct pmb_entry *pmblink = pmbe; @@ -332,8 +354,18 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) pmbe = pmblink->link; pmb_free(pmblink); - } while (pmbe); + } while (pmbe && --depth); +} + +static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) +{ + unsigned long flags; + if (unlikely(!pmbe)) + return; + + write_lock_irqsave(&pmb_rwlock, flags); + __pmb_unmap_entry(pmbe, depth); write_unlock_irqrestore(&pmb_rwlock, flags); } @@ -342,14 +374,40 @@ static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } -static int pmb_synchronize_mappings(void) +static void __init pmb_notify(void) { - unsigned int applied = 0; - struct pmb_entry *pmbp = NULL; - int i, j; + int i; pr_info("PMB: boot mappings:\n"); + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", + pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, + pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); + } + + read_unlock(&pmb_rwlock); +} + +/* + * Sync our software copy of the PMB mappings with those in hardware. The + * mappings in the hardware PMB were either set up by the bootloader or + * very early on by the kernel. + */ +static void __init pmb_synchronize(void) +{ + struct pmb_entry *pmbp = NULL; + int i, j; + /* * Run through the initial boot mappings, log the established * ones, and blow away anything that falls outside of the valid @@ -432,10 +490,10 @@ static int pmb_synchronize_mappings(void) /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, - * setup the entry links accordingly. + * setup the entry links accordingly. Compound mappings + * are later coalesced. */ - if ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && - (pmbe->ppn == (pmbp->ppn + pmbp->size))) + if (pmb_can_merge(pmbp, pmbe)) pmbp->link = pmbe; spin_unlock(&pmbp->lock); @@ -444,37 +502,150 @@ static int pmb_synchronize_mappings(void) pmbp = pmbe; spin_unlock_irqrestore(&pmbe->lock, irqflags); + } +} - pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, - (data_val & PMB_C) ? "" : "un"); +static void __init pmb_merge(struct pmb_entry *head) +{ + unsigned long span, newsize; + struct pmb_entry *tail; + int i = 1, depth = 0; + + span = newsize = head->size; - applied++; + tail = head->link; + while (tail) { + span += tail->size; + + if (pmb_size_valid(span)) { + newsize = span; + depth = i; + } + + /* This is the end of the line.. */ + if (!tail->link) + break; + + tail = tail->link; + i++; } - return (applied == 0); + /* + * The merged page size must be valid. + */ + if (!pmb_size_valid(newsize)) + return; + + head->flags &= ~PMB_SZ_MASK; + head->flags |= pmb_size_to_flags(newsize); + + head->size = newsize; + + __pmb_unmap_entry(head->link, depth); + __set_pmb_entry(head); } -int pmb_init(void) +static void __init pmb_coalesce(void) { - int ret; + unsigned long flags; + int i; + + write_lock_irqsave(&pmb_rwlock, flags); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + /* + * We're only interested in compound mappings + */ + if (!pmbe->link) + continue; + + /* + * Nothing to do if it already uses the largest possible + * page size. + */ + if (pmbe->size == SZ_512M) + continue; + + pmb_merge(pmbe); + } + + write_unlock_irqrestore(&pmb_rwlock, flags); +} + +#ifdef CONFIG_UNCACHED_MAPPING +static void __init pmb_resize(void) +{ + int i; /* - * Sync our software copy of the PMB mappings with those in - * hardware. The mappings in the hardware PMB were either set up - * by the bootloader or very early on by the kernel. + * If the uncached mapping was constructed by the kernel, it will + * already be a reasonable size. */ - ret = pmb_synchronize_mappings(); - if (unlikely(ret == 0)) - return 0; + if (uncached_size == SZ_16M) + return; + + read_lock(&pmb_rwlock); + + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { + struct pmb_entry *pmbe; + unsigned long flags; + + if (!test_bit(i, pmb_map)) + continue; + + pmbe = &pmb_entry_list[i]; + + if (pmbe->vpn != uncached_start) + continue; + + /* + * Found it, now resize it. + */ + spin_lock_irqsave(&pmbe->lock, flags); + + pmbe->size = SZ_16M; + pmbe->flags &= ~PMB_SZ_MASK; + pmbe->flags |= pmb_size_to_flags(pmbe->size); + + uncached_resize(pmbe->size); + + __set_pmb_entry(pmbe); + + spin_unlock_irqrestore(&pmbe->lock, flags); + } + + read_lock(&pmb_rwlock); +} +#endif + +void __init pmb_init(void) +{ + /* Synchronize software state */ + pmb_synchronize(); + + /* Attempt to combine compound mappings */ + pmb_coalesce(); + +#ifdef CONFIG_UNCACHED_MAPPING + /* Resize initial mappings, if necessary */ + pmb_resize(); +#endif + + /* Log them */ + pmb_notify(); writel_uncached(0, PMB_IRMCR); /* Flush out the TLB */ __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR); ctrl_barrier(); - - return 0; } bool __in_29bit_mode(void) -- cgit v1.2.3