summaryrefslogtreecommitdiff
path: root/arch/xtensa/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-09 16:32:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-09 16:32:13 -0800
commit3510ca19a82ba4c6a17af79c1f0448622a406efa (patch)
tree79b9e734100e40f379e9b4c3c50d261c3cdc2fa8 /arch/xtensa/include
parente4da7e9a54649d6877ac23828ff93ce7191eae2c (diff)
parentafaa7c542cc9c4d8a99ba252a8ea5e8bc7c897e2 (diff)
Merge tag 'xtensa-20151108' of git://github.com/czankel/xtensa-linux
Pull xtensa updates from Chris Zankel: - fix remaining issues with noMMU cores - fix build for cores w/o cache or zero overhead loop options - fix boot of secondary cores in SMP configuration - add support for DMA to high memory pages - add dma_to_phys and phys_to_dma functions. * tag 'xtensa-20151108' of git://github.com/czankel/xtensa-linux: xtensa: implement dma_to_phys and phys_to_dma xtensa: support DMA to high memory Revert "xtensa: cache inquiry and unaligned cache handling functions" xtensa: drop unused sections and remapped reset handlers xtensa: fix secondary core boot in SMP xtensa: add FORCE_MAX_ZONEORDER to Kconfig xtensa: nommu: provide defconfig for de212 on kc705 xtensa: nommu: xtfpga: add kc705 DTS xtensa: add de212 core variant xtensa: nommu: select HAVE_FUTEX_CMPXCHG xtensa: nommu: fix default memory start address xtensa: nommu: provide correct KIO addresses xtensa: nommu: fix USER_RING definition xtensa: xtfpga: fix integer overflow in TASK_SIZE xtensa: fix build for configs without cache options xtensa: fixes for configs without loop option
Diffstat (limited to 'arch/xtensa/include')
-rw-r--r--arch/xtensa/include/asm/asmmacro.h7
-rw-r--r--arch/xtensa/include/asm/cacheasm.h26
-rw-r--r--arch/xtensa/include/asm/cacheflush.h106
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h10
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h13
-rw-r--r--arch/xtensa/include/asm/io.h9
-rw-r--r--arch/xtensa/include/asm/pgtable.h4
-rw-r--r--arch/xtensa/include/asm/vectors.h28
8 files changed, 80 insertions, 123 deletions
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 755320f6e0bc..746dcc8b5abc 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -35,9 +35,10 @@
* __loop as
* restart loop. 'as' register must not have been modified!
*
- * __endla ar, at, incr
+ * __endla ar, as, incr
* ar start address (modified)
- * as scratch register used by macro
+ * as scratch register used by __loops/__loopi macros or
+ * end address used by __loopt macro
* inc increment
*/
@@ -97,7 +98,7 @@
.endm
/*
- * loop from ar to ax
+ * loop from ar to as
*/
.macro __loopt ar, as, at, incr_log2
diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h
index 60e18773ecb8..e0f9e1109c83 100644
--- a/arch/xtensa/include/asm/cacheasm.h
+++ b/arch/xtensa/include/asm/cacheasm.h
@@ -73,7 +73,9 @@
.macro ___unlock_dcache_all ar at
+#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
@@ -90,30 +92,38 @@
.macro ___flush_invalidate_dcache_all ar at
+#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___flush_dcache_all ar at
+#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_dcache_all ar at
+#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_icache_all ar at
+#if XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
XCHAL_ICACHE_LINEWIDTH
+#endif
.endm
@@ -121,28 +131,36 @@
.macro ___flush_invalidate_dcache_range ar as at
+#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___flush_dcache_range ar as at
+#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_dcache_range ar as at
+#if XCHAL_DCACHE_SIZE
__loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_icache_range ar as at
+#if XCHAL_ICACHE_SIZE
__loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
+#endif
.endm
@@ -150,27 +168,35 @@
.macro ___flush_invalidate_dcache_page ar as
+#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___flush_dcache_page ar as
+#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_dcache_page ar as
+#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
+#endif
.endm
.macro ___invalidate_icache_page ar as
+#if XCHAL_ICACHE_SIZE
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
+#endif
.endm
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 5f67ace97b32..397d6a1a4224 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -55,9 +55,14 @@ extern void __flush_dcache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#else
-# define __flush_dcache_range(p,s) do { } while(0)
-# define __flush_dcache_page(p) do { } while(0)
-# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
+static inline void __flush_dcache_page(unsigned long va)
+{
+}
+static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
+{
+}
+# define __flush_invalidate_dcache_all() __invalidate_dcache_all()
+# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
#endif
@@ -174,99 +179,4 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
#endif
-#define XTENSA_CACHEBLK_LOG2 29
-#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
-#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
-
-#if XCHAL_HAVE_CACHEATTR
-static inline u32 xtensa_get_cacheattr(void)
-{
- u32 r;
- asm volatile(" rsr %0, cacheattr" : "=a"(r));
- return r;
-}
-
-static inline u32 xtensa_get_dtlb1(u32 addr)
-{
- u32 r = addr & XTENSA_CACHEBLK_MASK;
- return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
- & 0xF);
-}
-#else
-static inline u32 xtensa_get_dtlb1(u32 addr)
-{
- u32 r;
- asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
- asm volatile(" dsync");
- return r;
-}
-
-static inline u32 xtensa_get_cacheattr(void)
-{
- u32 r = 0;
- u32 a = 0;
- do {
- a -= XTENSA_CACHEBLK_SIZE;
- r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
- } while (a);
- return r;
-}
-#endif
-
-static inline int xtensa_need_flush_dma_source(u32 addr)
-{
- return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
-}
-
-static inline int xtensa_need_invalidate_dma_destination(u32 addr)
-{
- return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
-}
-
-static inline void flush_dcache_unaligned(u32 addr, u32 size)
-{
- u32 cnt;
- if (size) {
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt--) {
- asm volatile(" dhwb %0, 0" : : "a"(addr));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dsync");
- }
-}
-
-static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
-{
- int cnt;
- if (size) {
- asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt-- > 0) {
- asm volatile(" dhi %0, %1" : : "a"(addr),
- "n"(XCHAL_DCACHE_LINESIZE));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dhwbi %0, %1" : : "a"(addr),
- "n"(XCHAL_DCACHE_LINESIZE));
- asm volatile(" dsync");
- }
-}
-
-static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
-{
- u32 cnt;
- if (size) {
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt--) {
- asm volatile(" dhwbi %0, 0" : : "a"(addr));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dsync");
- }
-}
-
#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 4427f38b634e..66c9ba261e30 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -35,4 +35,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return (dma_addr_t)paddr;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return (phys_addr_t)daddr;
+}
+
#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index e256f2270ec9..7a1e075969a3 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -161,7 +161,8 @@
#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
XCHAL_HAVE_SPANNING_WAY */
-#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS && \
+ (XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE)
/* Enable data and instruction cache in the DEFAULT_MEMORY region
* if the processor has DTLB and ITLB.
*/
@@ -175,14 +176,18 @@
1:
sub a9, a9, a8
2:
+#if XCHAL_DCACHE_SIZE
rdtlb1 a3, a5
- ritlb1 a4, a5
and a3, a3, a6
- and a4, a4, a6
or a3, a3, a7
- or a4, a4, a7
wdtlb a3, a5
+#endif
+#if XCHAL_ICACHE_SIZE
+ ritlb1 a4, a5
+ and a4, a4, a6
+ or a4, a4, a7
witlb a4, a5
+#endif
add a5, a5, a8
bltu a8, a9, 1b
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 867840f5400f..74fed0b4e2c2 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -25,15 +25,6 @@
#ifdef CONFIG_MMU
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
-extern unsigned long xtensa_kio_paddr;
-
-static inline unsigned long xtensa_get_kio_paddr(void)
-{
- return xtensa_kio_paddr;
-}
-#endif
-
/*
* Return the virtual address for the specified bus memory.
* Note that we currently don't support any address outside the KIO segment.
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index a5e929a10c20..fb02fdc5ecee 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -18,7 +18,11 @@
* We only use two ring levels, user and kernel space.
*/
+#ifdef CONFIG_MMU
#define USER_RING 1 /* user ring level */
+#else
+#define USER_RING 0
+#endif
#define KERNEL_RING 0 /* kernel ring level */
/*
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index a46c53f36113..288c776736d3 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -21,13 +21,26 @@
#include <variant/core.h>
#include <platform/hardware.h>
+#if XCHAL_HAVE_PTP_MMU
#define XCHAL_KIO_CACHED_VADDR 0xe0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
#define XCHAL_KIO_SIZE 0x10000000
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
#endif
@@ -48,6 +61,9 @@
#define LOAD_MEMORY_ADDRESS 0xD0003000
#endif
+#define RESET_VECTOR1_VADDR (VIRTUAL_MEMORY_ADDRESS + \
+ XCHAL_RESET_VECTOR1_PADDR)
+
#else /* !defined(CONFIG_MMU) */
/* MMU Not being used - Virtual == Physical */
@@ -60,6 +76,8 @@
/* Loaded just above possibly live vectors */
#define LOAD_MEMORY_ADDRESS (PLATFORM_DEFAULT_MEM_START + 0x3000)
+#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
+
#endif /* CONFIG_MMU */
#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
@@ -67,14 +85,6 @@
/* Used to set VECBASE register */
#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
-#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \
- VECBASE_RESET_VADDR)
-#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS)
-
-#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \
- VECBASE_RESET_VADDR)
-#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
-
#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)