summaryrefslogtreecommitdiff
path: root/include/linux/io-mapping.h
diff options
context:
space:
mode:
authorKeith Packard <keithp@keithp.com>2008-11-03 18:21:45 +0100
committerIngo Molnar <mingo@elte.hu>2008-11-03 18:21:45 +0100
commite5beae16901795223d677f15aa2fe192976278ee (patch)
treeda0879ee11a79beda5e95aa3c541ea6ff5322a53 /include/linux/io-mapping.h
parent8d5c6603c408d91ecf543f244f10ccb8b500ad95 (diff)
io mapping: clean up #ifdefs
Impact: cleanup clean up ifdefs: change #ifdef CONFIG_X86_32/64 to CONFIG_HAVE_ATOMIC_IOMAP. flip around the #ifdef sections to clean up the structure. Signed-off-by: Keith Packard <keithp@keithp.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/io-mapping.h')
-rw-r--r--include/linux/io-mapping.h43
1 files changed, 25 insertions, 18 deletions
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 1b566993db6e..82df31726a54 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -33,86 +33,93 @@
/* this struct isn't actually defined anywhere */
struct io_mapping;
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+
+/*
+ * For small address space machines, mapping large objects
+ * into the kernel virtual space isn't practical. Where
+ * available, use fixmap support to dynamically map pages
+ * of the object at run time.
+ */
-/* Create the io_mapping object*/
static inline struct io_mapping *
io_mapping_create_wc(unsigned long base, unsigned long size)
{
- return (struct io_mapping *) ioremap_wc(base, size);
+ return (struct io_mapping *) base;
}
static inline void
io_mapping_free(struct io_mapping *mapping)
{
- iounmap(mapping);
}
/* Atomic map/unmap */
static inline void *
io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
{
- return ((char *) mapping) + offset;
+ offset += (unsigned long) mapping;
+ return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0,
+ __pgprot(__PAGE_KERNEL_WC));
}
static inline void
io_mapping_unmap_atomic(void *vaddr)
{
+ iounmap_atomic(vaddr, KM_USER0);
}
-/* Non-atomic map/unmap */
static inline void *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
- return ((char *) mapping) + offset;
+ offset += (unsigned long) mapping;
+ return ioremap_wc(offset, PAGE_SIZE);
}
static inline void
io_mapping_unmap(void *vaddr)
{
+ iounmap(vaddr);
}
-#endif /* CONFIG_X86_64 */
+#else
-#ifdef CONFIG_X86_32
+/* Create the io_mapping object*/
static inline struct io_mapping *
io_mapping_create_wc(unsigned long base, unsigned long size)
{
- return (struct io_mapping *) base;
+ return (struct io_mapping *) ioremap_wc(base, size);
}
static inline void
io_mapping_free(struct io_mapping *mapping)
{
+ iounmap(mapping);
}
/* Atomic map/unmap */
static inline void *
io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
{
- offset += (unsigned long) mapping;
- return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0,
- __pgprot(__PAGE_KERNEL_WC));
+ return ((char *) mapping) + offset;
}
static inline void
io_mapping_unmap_atomic(void *vaddr)
{
- iounmap_atomic(vaddr, KM_USER0);
}
+/* Non-atomic map/unmap */
static inline void *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
- offset += (unsigned long) mapping;
- return ioremap_wc(offset, PAGE_SIZE);
+ return ((char *) mapping) + offset;
}
static inline void
io_mapping_unmap(void *vaddr)
{
- iounmap(vaddr);
}
-#endif /* CONFIG_X86_32 */
+
+#endif /* HAVE_ATOMIC_IOMAP */
#endif /* _LINUX_IO_MAPPING_H */