summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-08-01 10:13:32 -0700
committerTony Luck <tony.luck@intel.com>2008-08-01 10:21:21 -0700
commit7f30491ccd28627742e37899453ae20e3da8e18f (patch)
tree7291c0a26ed3a31acf9542857af3981d278f5de8 /arch
parent94ad374a0751f40d25e22e036c37f7263569d24c (diff)
[IA64] Move include/asm-ia64 to arch/ia64/include/asm
After moving the the include files there were a few clean-ups: 1) Some files used #include <asm-ia64/xyz.h>, changed to <asm/xyz.h> 2) Some comments alerted maintainers to look at various header files to make matching updates if certain code were to be changed. Updated these comments to use the new include paths. 3) Some header files mentioned their own names in initial comments. Just deleted these self references. Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/include/asm/Kbuild16
-rw-r--r--arch/ia64/include/asm/a.out.h32
-rw-r--r--arch/ia64/include/asm/acpi-ext.h21
-rw-r--r--arch/ia64/include/asm/acpi.h165
-rw-r--r--arch/ia64/include/asm/agp.h30
-rw-r--r--arch/ia64/include/asm/asmmacro.h135
-rw-r--r--arch/ia64/include/asm/atomic.h226
-rw-r--r--arch/ia64/include/asm/auxvec.h11
-rw-r--r--arch/ia64/include/asm/bitops.h468
-rw-r--r--arch/ia64/include/asm/break.h23
-rw-r--r--arch/ia64/include/asm/bug.h14
-rw-r--r--arch/ia64/include/asm/bugs.h19
-rw-r--r--arch/ia64/include/asm/byteorder.h42
-rw-r--r--arch/ia64/include/asm/cache.h29
-rw-r--r--arch/ia64/include/asm/cacheflush.h51
-rw-r--r--arch/ia64/include/asm/checksum.h79
-rw-r--r--arch/ia64/include/asm/compat.h207
-rw-r--r--arch/ia64/include/asm/cpu.h22
-rw-r--r--arch/ia64/include/asm/cputime.h109
-rw-r--r--arch/ia64/include/asm/current.h17
-rw-r--r--arch/ia64/include/asm/cyclone.h15
-rw-r--r--arch/ia64/include/asm/delay.h88
-rw-r--r--arch/ia64/include/asm/device.h15
-rw-r--r--arch/ia64/include/asm/div64.h1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h97
-rw-r--r--arch/ia64/include/asm/dma.h24
-rw-r--r--arch/ia64/include/asm/dmi.h11
-rw-r--r--arch/ia64/include/asm/elf.h269
-rw-r--r--arch/ia64/include/asm/emergency-restart.h6
-rw-r--r--arch/ia64/include/asm/errno.h1
-rw-r--r--arch/ia64/include/asm/esi.h29
-rw-r--r--arch/ia64/include/asm/fb.h23
-rw-r--r--arch/ia64/include/asm/fcntl.h13
-rw-r--r--arch/ia64/include/asm/fpswa.h73
-rw-r--r--arch/ia64/include/asm/fpu.h66
-rw-r--r--arch/ia64/include/asm/futex.h124
-rw-r--r--arch/ia64/include/asm/gcc_intrin.h620
-rw-r--r--arch/ia64/include/asm/hardirq.h37
-rw-r--r--arch/ia64/include/asm/hpsim.h16
-rw-r--r--arch/ia64/include/asm/hugetlb.h80
-rw-r--r--arch/ia64/include/asm/hw_irq.h192
-rw-r--r--arch/ia64/include/asm/ia32.h40
-rw-r--r--arch/ia64/include/asm/ia64regs.h100
-rw-r--r--arch/ia64/include/asm/intel_intrin.h161
-rw-r--r--arch/ia64/include/asm/intrinsics.h241
-rw-r--r--arch/ia64/include/asm/io.h459
-rw-r--r--arch/ia64/include/asm/ioctl.h1
-rw-r--r--arch/ia64/include/asm/ioctls.h93
-rw-r--r--arch/ia64/include/asm/iosapic.h126
-rw-r--r--arch/ia64/include/asm/ipcbuf.h28
-rw-r--r--arch/ia64/include/asm/irq.h34
-rw-r--r--arch/ia64/include/asm/irq_regs.h1
-rw-r--r--arch/ia64/include/asm/kdebug.h57
-rw-r--r--arch/ia64/include/asm/kexec.h44
-rw-r--r--arch/ia64/include/asm/kmap_types.h30
-rw-r--r--arch/ia64/include/asm/kprobes.h132
-rw-r--r--arch/ia64/include/asm/kregs.h165
-rw-r--r--arch/ia64/include/asm/kvm.h211
-rw-r--r--arch/ia64/include/asm/kvm_host.h527
-rw-r--r--arch/ia64/include/asm/kvm_para.h27
-rw-r--r--arch/ia64/include/asm/libata-portmap.h12
-rw-r--r--arch/ia64/include/asm/linkage.h14
-rw-r--r--arch/ia64/include/asm/local.h1
-rw-r--r--arch/ia64/include/asm/machvec.h460
-rw-r--r--arch/ia64/include/asm/machvec_dig.h16
-rw-r--r--arch/ia64/include/asm/machvec_hpsim.h18
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1.h37
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1_swiotlb.h42
-rw-r--r--arch/ia64/include/asm/machvec_init.h33
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h139
-rw-r--r--arch/ia64/include/asm/machvec_uv.h26
-rw-r--r--arch/ia64/include/asm/mc146818rtc.h10
-rw-r--r--arch/ia64/include/asm/mca.h179
-rw-r--r--arch/ia64/include/asm/mca_asm.h242
-rw-r--r--arch/ia64/include/asm/meminit.h75
-rw-r--r--arch/ia64/include/asm/mman.h33
-rw-r--r--arch/ia64/include/asm/mmu.h13
-rw-r--r--arch/ia64/include/asm/mmu_context.h198
-rw-r--r--arch/ia64/include/asm/mmzone.h50
-rw-r--r--arch/ia64/include/asm/module.h36
-rw-r--r--arch/ia64/include/asm/msgbuf.h27
-rw-r--r--arch/ia64/include/asm/mutex.h92
-rw-r--r--arch/ia64/include/asm/native/inst.h175
-rw-r--r--arch/ia64/include/asm/native/irq.h33
-rw-r--r--arch/ia64/include/asm/nodedata.h63
-rw-r--r--arch/ia64/include/asm/numa.h82
-rw-r--r--arch/ia64/include/asm/page.h223
-rw-r--r--arch/ia64/include/asm/pal.h1827
-rw-r--r--arch/ia64/include/asm/param.h33
-rw-r--r--arch/ia64/include/asm/paravirt.h253
-rw-r--r--arch/ia64/include/asm/paravirt_privop.h112
-rw-r--r--arch/ia64/include/asm/parport.h20
-rw-r--r--arch/ia64/include/asm/patch.h27
-rw-r--r--arch/ia64/include/asm/pci.h167
-rw-r--r--arch/ia64/include/asm/percpu.h51
-rw-r--r--arch/ia64/include/asm/perfmon.h279
-rw-r--r--arch/ia64/include/asm/perfmon_default_smpl.h83
-rw-r--r--arch/ia64/include/asm/pgalloc.h122
-rw-r--r--arch/ia64/include/asm/pgtable.h615
-rw-r--r--arch/ia64/include/asm/poll.h1
-rw-r--r--arch/ia64/include/asm/posix_types.h126
-rw-r--r--arch/ia64/include/asm/processor.h771
-rw-r--r--arch/ia64/include/asm/ptrace.h364
-rw-r--r--arch/ia64/include/asm/ptrace_offsets.h268
-rw-r--r--arch/ia64/include/asm/resource.h7
-rw-r--r--arch/ia64/include/asm/rse.h66
-rw-r--r--arch/ia64/include/asm/rwsem.h182
-rw-r--r--arch/ia64/include/asm/sal.h905
-rw-r--r--arch/ia64/include/asm/scatterlist.h38
-rw-r--r--arch/ia64/include/asm/sections.h25
-rw-r--r--arch/ia64/include/asm/segment.h6
-rw-r--r--arch/ia64/include/asm/sembuf.h22
-rw-r--r--arch/ia64/include/asm/serial.h17
-rw-r--r--arch/ia64/include/asm/setup.h6
-rw-r--r--arch/ia64/include/asm/shmbuf.h38
-rw-r--r--arch/ia64/include/asm/shmparam.h12
-rw-r--r--arch/ia64/include/asm/sigcontext.h70
-rw-r--r--arch/ia64/include/asm/siginfo.h139
-rw-r--r--arch/ia64/include/asm/signal.h160
-rw-r--r--arch/ia64/include/asm/smp.h138
-rw-r--r--arch/ia64/include/asm/sn/acpi.h17
-rw-r--r--arch/ia64/include/asm/sn/addrs.h299
-rw-r--r--arch/ia64/include/asm/sn/arch.h86
-rw-r--r--arch/ia64/include/asm/sn/bte.h233
-rw-r--r--arch/ia64/include/asm/sn/clksupport.h28
-rw-r--r--arch/ia64/include/asm/sn/geo.h132
-rw-r--r--arch/ia64/include/asm/sn/intr.h68
-rw-r--r--arch/ia64/include/asm/sn/io.h274
-rw-r--r--arch/ia64/include/asm/sn/ioc3.h241
-rw-r--r--arch/ia64/include/asm/sn/klconfig.h246
-rw-r--r--arch/ia64/include/asm/sn/l1.h51
-rw-r--r--arch/ia64/include/asm/sn/leds.h33
-rw-r--r--arch/ia64/include/asm/sn/module.h127
-rw-r--r--arch/ia64/include/asm/sn/mspec.h59
-rw-r--r--arch/ia64/include/asm/sn/nodepda.h82
-rw-r--r--arch/ia64/include/asm/sn/pcibr_provider.h150
-rw-r--r--arch/ia64/include/asm/sn/pcibus_provider_defs.h68
-rw-r--r--arch/ia64/include/asm/sn/pcidev.h85
-rw-r--r--arch/ia64/include/asm/sn/pda.h69
-rw-r--r--arch/ia64/include/asm/sn/pic.h261
-rw-r--r--arch/ia64/include/asm/sn/rw_mmr.h28
-rw-r--r--arch/ia64/include/asm/sn/shub_mmr.h502
-rw-r--r--arch/ia64/include/asm/sn/shubio.h3358
-rw-r--r--arch/ia64/include/asm/sn/simulator.h25
-rw-r--r--arch/ia64/include/asm/sn/sn2/sn_hwperf.h242
-rw-r--r--arch/ia64/include/asm/sn/sn_cpuid.h132
-rw-r--r--arch/ia64/include/asm/sn/sn_feature_sets.h58
-rw-r--r--arch/ia64/include/asm/sn/sn_sal.h1188
-rw-r--r--arch/ia64/include/asm/sn/tioca.h596
-rw-r--r--arch/ia64/include/asm/sn/tioca_provider.h207
-rw-r--r--arch/ia64/include/asm/sn/tioce.h760
-rw-r--r--arch/ia64/include/asm/sn/tioce_provider.h63
-rw-r--r--arch/ia64/include/asm/sn/tiocp.h257
-rw-r--r--arch/ia64/include/asm/sn/tiocx.h72
-rw-r--r--arch/ia64/include/asm/sn/types.h26
-rw-r--r--arch/ia64/include/asm/socket.h66
-rw-r--r--arch/ia64/include/asm/sockios.h20
-rw-r--r--arch/ia64/include/asm/sparsemem.h20
-rw-r--r--arch/ia64/include/asm/spinlock.h220
-rw-r--r--arch/ia64/include/asm/spinlock_types.h21
-rw-r--r--arch/ia64/include/asm/stat.h51
-rw-r--r--arch/ia64/include/asm/statfs.h62
-rw-r--r--arch/ia64/include/asm/string.h21
-rw-r--r--arch/ia64/include/asm/suspend.h1
-rw-r--r--arch/ia64/include/asm/system.h292
-rw-r--r--arch/ia64/include/asm/termbits.h207
-rw-r--r--arch/ia64/include/asm/termios.h97
-rw-r--r--arch/ia64/include/asm/thread_info.h148
-rw-r--r--arch/ia64/include/asm/timex.h42
-rw-r--r--arch/ia64/include/asm/tlb.h257
-rw-r--r--arch/ia64/include/asm/tlbflush.h102
-rw-r--r--arch/ia64/include/asm/topology.h126
-rw-r--r--arch/ia64/include/asm/types.h46
-rw-r--r--arch/ia64/include/asm/uaccess.h401
-rw-r--r--arch/ia64/include/asm/ucontext.h12
-rw-r--r--arch/ia64/include/asm/unaligned.h11
-rw-r--r--arch/ia64/include/asm/uncached.h12
-rw-r--r--arch/ia64/include/asm/unistd.h384
-rw-r--r--arch/ia64/include/asm/unwind.h233
-rw-r--r--arch/ia64/include/asm/user.h58
-rw-r--r--arch/ia64/include/asm/ustack.h20
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h309
-rw-r--r--arch/ia64/include/asm/uv/uv_mmrs.h673
-rw-r--r--arch/ia64/include/asm/vga.h25
-rw-r--r--arch/ia64/include/asm/xor.h31
-rw-r--r--arch/ia64/kernel/asm-offsets.c10
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/jprobes.S2
-rw-r--r--arch/ia64/kernel/nr-irqs.c2
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/sn/kernel/iomv.c2
192 files changed, 29374 insertions, 11 deletions
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
new file mode 100644
index 000000000000..ccbe8ae47a61
--- /dev/null
+++ b/arch/ia64/include/asm/Kbuild
@@ -0,0 +1,16 @@
+include include/asm-generic/Kbuild.asm
+
+header-y += break.h
+header-y += fpu.h
+header-y += fpswa.h
+header-y += ia64regs.h
+header-y += intel_intrin.h
+header-y += perfmon_default_smpl.h
+header-y += ptrace_offsets.h
+header-y += rse.h
+header-y += ucontext.h
+
+unifdef-y += gcc_intrin.h
+unifdef-y += intrinsics.h
+unifdef-y += perfmon.h
+unifdef-y += ustack.h
diff --git a/arch/ia64/include/asm/a.out.h b/arch/ia64/include/asm/a.out.h
new file mode 100644
index 000000000000..193dcfb67596
--- /dev/null
+++ b/arch/ia64/include/asm/a.out.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_IA64_A_OUT_H
+#define _ASM_IA64_A_OUT_H
+
+/*
+ * No a.out format has been (or should be) defined so this file is
+ * just a dummy that allows us to get binfmt_elf compiled. It
+ * probably would be better to clean up binfmt_elf.c so it does not
+ * necessarily depend on there being a.out support.
+ *
+ * Modified 1998-2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+ */
+
+#include <linux/types.h>
+
+struct exec {
+ unsigned long a_info;
+ unsigned long a_text;
+ unsigned long a_data;
+ unsigned long a_bss;
+ unsigned long a_entry;
+};
+
+#define N_TXTADDR(x) 0
+#define N_DATADDR(x) 0
+#define N_BSSADDR(x) 0
+#define N_DRSIZE(x) 0
+#define N_TRSIZE(x) 0
+#define N_SYMSIZE(x) 0
+#define N_TXTOFF(x) 0
+
+#endif /* _ASM_IA64_A_OUT_H */
diff --git a/arch/ia64/include/asm/acpi-ext.h b/arch/ia64/include/asm/acpi-ext.h
new file mode 100644
index 000000000000..734d137dda6e
--- /dev/null
+++ b/arch/ia64/include/asm/acpi-ext.h
@@ -0,0 +1,21 @@
+/*
+ * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
+ * Alex Williamson <alex.williamson@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vendor specific extensions to ACPI.
+ */
+
+#ifndef _ASM_IA64_ACPI_EXT_H
+#define _ASM_IA64_ACPI_EXT_H
+
+#include <linux/types.h>
+#include <acpi/actypes.h>
+
+extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
+
+#endif /* _ASM_IA64_ACPI_EXT_H */
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
new file mode 100644
index 000000000000..0f82cc2934e1
--- /dev/null
+++ b/arch/ia64/include/asm/acpi.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
+ * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+#ifdef __KERNEL__
+
+#include <acpi/pdc_intel.h>
+
+#include <linux/init.h>
+#include <linux/numa.h>
+#include <asm/system.h>
+#include <asm/numa.h>
+
+#define COMPILER_DEPENDENT_INT64 long
+#define COMPILER_DEPENDENT_UINT64 unsigned long
+
+/*
+ * Calling conventions:
+ *
+ * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
+ * ACPI_EXTERNAL_XFACE - External ACPI interfaces
+ * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
+ * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
+ */
+#define ACPI_SYSTEM_XFACE
+#define ACPI_EXTERNAL_XFACE
+#define ACPI_INTERNAL_XFACE
+#define ACPI_INTERNAL_VAR_XFACE
+
+/* Asm macros */
+
+#define ACPI_ASM_MACROS
+#define BREAKPOINT3
+#define ACPI_DISABLE_IRQS() local_irq_disable()
+#define ACPI_ENABLE_IRQS() local_irq_enable()
+#define ACPI_FLUSH_CPU_CACHE()
+
+static inline int
+ia64_acpi_acquire_global_lock (unsigned int *lock)
+{
+ unsigned int old, new, val;
+ do {
+ old = *lock;
+ new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
+ val = ia64_cmpxchg4_acq(lock, new, old);
+ } while (unlikely (val != old));
+ return (new < 3) ? -1 : 0;
+}
+
+static inline int
+ia64_acpi_release_global_lock (unsigned int *lock)
+{
+ unsigned int old, new, val;
+ do {
+ old = *lock;
+ new = old & ~0x3;
+ val = ia64_cmpxchg4_acq(lock, new, old);
+ } while (unlikely (val != old));
+ return old & 0x1;
+}
+
+#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
+ ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
+
+#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
+ ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
+
+#define acpi_disabled 0 /* ACPI always enabled on IA64 */
+#define acpi_noirq 0 /* ACPI always enabled on IA64 */
+#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
+#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
+#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
+static inline void disable_acpi(void) { }
+
+const char *acpi_get_sysname (void);
+int acpi_request_vector (u32 int_type);
+int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
+
+/* routines for saving/restoring kernel state */
+extern int acpi_save_state_mem(void);
+extern void acpi_restore_state_mem(void);
+extern unsigned long acpi_wakeup_address;
+
+/*
+ * Record the cpei override flag and current logical cpu. This is
+ * useful for CPU removal.
+ */
+extern unsigned int can_cpei_retarget(void);
+extern unsigned int is_cpu_cpei_target(unsigned int cpu);
+extern void set_cpei_target_cpu(unsigned int cpu);
+extern unsigned int get_cpei_target_cpu(void);
+extern void prefill_possible_map(void);
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+extern int additional_cpus;
+#else
+#define additional_cpus 0
+#endif
+
+#ifdef CONFIG_ACPI_NUMA
+#if MAX_NUMNODES > 256
+#define MAX_PXM_DOMAINS MAX_NUMNODES
+#else
+#define MAX_PXM_DOMAINS (256)
+#endif
+extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
+extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
+#endif
+
+#define acpi_unlazy_tlb(x)
+
+#ifdef CONFIG_ACPI_NUMA
+extern cpumask_t early_cpu_possible_map;
+#define for_each_possible_early_cpu(cpu) \
+ for_each_cpu_mask((cpu), early_cpu_possible_map)
+
+static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
+{
+ int low_cpu, high_cpu;
+ int cpu;
+ int next_nid = 0;
+
+ low_cpu = cpus_weight(early_cpu_possible_map);
+
+ high_cpu = max(low_cpu, min_cpus);
+ high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
+
+ for (cpu = low_cpu; cpu < high_cpu; cpu++) {
+ cpu_set(cpu, early_cpu_possible_map);
+ if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
+ node_cpuid[cpu].nid = next_nid;
+ next_nid++;
+ if (next_nid >= num_online_nodes())
+ next_nid = 0;
+ }
+ }
+}
+#endif /* CONFIG_ACPI_NUMA */
+
+#endif /*__KERNEL__*/
+
+#endif /*_ASM_ACPI_H*/
diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h
new file mode 100644
index 000000000000..c11fdd8ab4d7
--- /dev/null
+++ b/arch/ia64/include/asm/agp.h
@@ -0,0 +1,30 @@
+#ifndef _ASM_IA64_AGP_H
+#define _ASM_IA64_AGP_H
+
+/*
+ * IA-64 specific AGP definitions.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate
+ * in coherent mode, which lets us map the AGP memory as normal (write-back) memory
+ * (unlike x86, where it gets mapped "write-coalescing").
+ */
+#define map_page_into_agp(page) /* nothing */
+#define unmap_page_from_agp(page) /* nothing */
+#define flush_agp_cache() mb()
+
+/* Convert a physical address to an address suitable for the GART. */
+#define phys_to_gart(x) (x)
+#define gart_to_phys(x) (x)
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
+#endif /* _ASM_IA64_AGP_H */
diff --git a/arch/ia64/include/asm/asmmacro.h b/arch/ia64/include/asm/asmmacro.h
new file mode 100644
index 000000000000..c1642fd64029
--- /dev/null
+++ b/arch/ia64/include/asm/asmmacro.h
@@ -0,0 +1,135 @@
+#ifndef _ASM_IA64_ASMMACRO_H
+#define _ASM_IA64_ASMMACRO_H
+
+/*
+ * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#define ENTRY(name) \
+ .align 32; \
+ .proc name; \
+name:
+
+#define ENTRY_MIN_ALIGN(name) \
+ .align 16; \
+ .proc name; \
+name:
+
+#define GLOBAL_ENTRY(name) \
+ .global name; \
+ ENTRY(name)
+
+#define END(name) \
+ .endp name
+
+/*
+ * Helper macros to make unwind directives more readable:
+ */
+
+/* prologue_gr: */
+#define ASM_UNW_PRLG_RP 0x8
+#define ASM_UNW_PRLG_PFS 0x4
+#define ASM_UNW_PRLG_PSP 0x2
+#define ASM_UNW_PRLG_PR 0x1
+#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs))
+
+/*
+ * Helper macros for accessing user memory.
+ *
+ * When adding any new .section/.previous entries here, make sure to
+ * also add it to the DISCARD section in arch/ia64/kernel/gate.lds.S or
+ * unpleasant things will happen.
+ */
+
+ .section "__ex_table", "a" // declare section & section attributes
+ .previous
+
+# define EX(y,x...) \
+ .xdata4 "__ex_table", 99f-., y-.; \
+ [99:] x
+# define EXCLR(y,x...) \
+ .xdata4 "__ex_table", 99f-., y-.+4; \
+ [99:] x
+
+/*
+ * Tag MCA recoverable instruction ranges.
+ */
+
+ .section "__mca_table", "a" // declare section & section attributes
+ .previous
+
+# define MCA_RECOVER_RANGE(y) \
+ .xdata4 "__mca_table", y-., 99f-.; \
+ [99:]
+
+/*
+ * Mark instructions that need a load of a virtual address patched to be
+ * a load of a physical address. We use this either in critical performance
+ * path (ivt.S - TLB miss processing) or in places where it might not be
+ * safe to use a "tpa" instruction (mca_asm.S - error recovery).
+ */
+ .section ".data.patch.vtop", "a" // declare section & section attributes
+ .previous
+
+#define LOAD_PHYSICAL(pr, reg, obj) \
+[1:](pr)movl reg = obj; \
+ .xdata4 ".data.patch.vtop", 1b-.
+
+/*
+ * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
+ * we'll patch out the work-around bundles with NOPs, so their impact is minimal.
+ */
+#define DO_MCKINLEY_E9_WORKAROUND
+
+#ifdef DO_MCKINLEY_E9_WORKAROUND
+ .section ".data.patch.mckinley_e9", "a"
+ .previous
+/* workaround for Itanium 2 Errata 9: */
+# define FSYS_RETURN \
+ .xdata4 ".data.patch.mckinley_e9", 1f-.; \
+1:{ .mib; \
+ nop.m 0; \
+ mov r16=ar.pfs; \
+ br.call.sptk.many b7=2f;; \
+ }; \
+2:{ .mib; \
+ nop.m 0; \
+ mov ar.pfs=r16; \
+ br.ret.sptk.many b6;; \
+ }
+#else
+# define FSYS_RETURN br.ret.sptk.many b6
+#endif
+
+/*
+ * If physical stack register size is different from DEF_NUM_STACK_REG,
+ * dynamically patch the kernel for correct size.
+ */
+ .section ".data.patch.phys_stack_reg", "a"
+ .previous
+#define LOAD_PHYS_STACK_REG_SIZE(reg) \
+[1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \
+ .xdata4 ".data.patch.phys_stack_reg", 1b-.
+
+/*
+ * Up until early 2004, use of .align within a function caused bad unwind info.
+ * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing
+ * otherwise.
+ */
+#ifdef HAVE_WORKING_TEXT_ALIGN
+# define TEXT_ALIGN(n) .align n
+#else
+# define TEXT_ALIGN(n)
+#endif
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define dv_serialize_data .serialize.data
+# define dv_serialize_instruction .serialize.instruction
+#else
+# define dv_serialize_data
+# define dv_serialize_instruction
+#endif
+
+#endif /* _ASM_IA64_ASMMACRO_H */
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
new file mode 100644
index 000000000000..50c2b83fd5a0
--- /dev/null
+++ b/arch/ia64/include/asm/atomic.h
@@ -0,0 +1,226 @@
+#ifndef _ASM_IA64_ATOMIC_H
+#define _ASM_IA64_ATOMIC_H
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ *
+ * NOTE: don't mess with the types below! The "unsigned long" and
+ * "int" types were carefully placed so as to ensure proper operation
+ * of the macros.
+ *
+ * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/types.h>
+
+#include <asm/intrinsics.h>
+#include <asm/system.h>
+
+/*
+ * On IA-64, counter must always be volatile to ensure that that the
+ * memory accesses are ordered.
+ */
+typedef struct { volatile __s32 counter; } atomic_t;
+typedef struct { volatile __s64 counter; } atomic64_t;
+
+#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+
+#define atomic_read(v) ((v)->counter)
+#define atomic64_read(v) ((v)->counter)
+
+#define atomic_set(v,i) (((v)->counter) = (i))
+#define atomic64_set(v,i) (((v)->counter) = (i))
+
+static __inline__ int
+ia64_atomic_add (int i, atomic_t *v)
+{
+ __s32 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic_read(v);
+ new = old + i;
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
+ return new;
+}
+
+static __inline__ int
+ia64_atomic64_add (__s64 i, atomic64_t *v)
+{
+ __s64 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic64_read(v);
+ new = old + i;
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
+ return new;
+}
+
+static __inline__ int
+ia64_atomic_sub (int i, atomic_t *v)
+{
+ __s32 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic_read(v);
+ new = old - i;
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
+ return new;
+}
+
+static __inline__ int
+ia64_atomic64_sub (__s64 i, atomic64_t *v)
+{
+ __s64 old, new;
+ CMPXCHG_BUGCHECK_DECL
+
+ do {
+ CMPXCHG_BUGCHECK(v);
+ old = atomic64_read(v);
+ new = old - i;
+ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
+ return new;
+}
+
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic64_cmpxchg(v, old, new) \
+ (cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+#define atomic_add_return(i,v) \
+({ \
+ int __ia64_aar_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
+ : ia64_atomic_add(__ia64_aar_i, v); \
+})
+
+#define atomic64_add_return(i,v) \
+({ \
+ long __ia64_aar_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
+ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
+ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
+ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
+ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
+ : ia64_atomic64_add(__ia64_aar_i, v); \
+})
+
+/*
+ * Atomically add I to V and return TRUE if the resulting value is
+ * negative.
+ */
+static __inline__ int
+atomic_add_negative (int i, atomic_t *v)
+{
+ return atomic_add_return(i, v) < 0;
+}
+
+static __inline__ int
+atomic64_add_negative (__s64 i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+
+#define atomic_sub_return(i,v) \
+({ \
+ int __ia64_asr_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
+ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
+ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
+ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
+ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
+ : ia64_atomic_sub(__ia64_asr_i, v); \
+})
+
+#define atomic64_sub_return(i,v) \
+({ \
+ long __ia64_asr_i = (i); \
+ (__builtin_constant_p(i) \
+ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
+ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
+ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
+ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
+ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
+ : ia64_atomic64_sub(__ia64_asr_i, v); \
+})
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
+#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
+
+#define atomic_add(i,v) atomic_add_return((i), (v))
+#define atomic_sub(i,v) atomic_sub_return((i), (v))
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_dec(v) atomic_sub(1, (v))
+
+#define atomic64_add(i,v) atomic64_add_return((i), (v))
+#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
+#define atomic64_inc(v) atomic64_add(1, (v))
+#define atomic64_dec(v) atomic64_sub(1, (v))
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#include <asm-generic/atomic.h>
+#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/auxvec.h b/arch/ia64/include/asm/auxvec.h
new file mode 100644
index 000000000000..23cebe5685b9
--- /dev/null
+++ b/arch/ia64/include/asm/auxvec.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_IA64_AUXVEC_H
+#define _ASM_IA64_AUXVEC_H
+
+/*
+ * Architecture-neutral AT_ values are in the range 0-17. Leave some room for more of
+ * them, start the architecture-specific ones at 32.
+ */
+#define AT_SYSINFO 32
+#define AT_SYSINFO_EHDR 33
+
+#endif /* _ASM_IA64_AUXVEC_H */
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
new file mode 100644
index 000000000000..e2ca80037335
--- /dev/null
+++ b/arch/ia64/include/asm/bitops.h
@@ -0,0 +1,468 @@
+#ifndef _ASM_IA64_BITOPS_H
+#define _ASM_IA64_BITOPS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64
+ * O(1) scheduler patch
+ */
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ *
+ * The address must be (at least) "long" aligned.
+ * Note that there are driver (e.g., eepro100) which use these operations to
+ * operate on hw-defined data-structures, so we can't easily change these
+ * operations to force a bigger alignment.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+static __inline__ void
+set_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = 1 << (nr & 31);
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old | bit;
+ } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__set_bit (int nr, volatile void *addr)
+{
+ *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
+}
+
+/*
+ * clear_bit() has "acquire" semantics.
+ */
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void
+clear_bit (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * clear_bit_unlock - Clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit_unlock() is atomic and may not be reordered. It does
+ * contain a memory barrier suitable for unlock type operations.
+ */
+static __inline__ void
+clear_bit_unlock (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg_rel(m, old, new) != old);
+}
+
+/**
+ * __clear_bit_unlock - Non-atomically clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Similarly to clear_bit_unlock, the implementation uses a store
+ * with release semantics. See also __raw_spin_unlock().
+ */
+static __inline__ void
+__clear_bit_unlock(int nr, void *addr)
+{
+ __u32 * const m = (__u32 *) addr + (nr >> 5);
+ __u32 const new = *m & ~(1 << (nr & 31));
+
+ ia64_st4_rel_nta(m, new);
+}
+
+/**
+ * __clear_bit - Clears a bit in memory (non-atomic version)
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__clear_bit (int nr, volatile void *addr)
+{
+ *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to toggle
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void
+change_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = (1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old ^ bit;
+ } while (cmpxchg_acq(m, old, new) != old);
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to toggle
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __inline__ void
+__change_bit (int nr, volatile void *addr)
+{
+ *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_set_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = 1 << (nr & 31);
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old | bit;
+ } while (cmpxchg_acq(m, old, new) != old);
+ return (old & bit) != 0;
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on ia64
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __inline__ int
+__test_and_set_bit (int nr, volatile void *addr)
+{
+ __u32 *p = (__u32 *) addr + (nr >> 5);
+ __u32 m = 1 << (nr & 31);
+ int oldbitset = (*p & m) != 0;
+
+ *p |= m;
+ return oldbitset;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_clear_bit (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg_acq(m, old, new) != old);
+ return (old & ~mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __inline__ int
+__test_and_clear_bit(int nr, volatile void * addr)
+{
+ __u32 *p = (__u32 *) addr + (nr >> 5);
+ __u32 m = 1 << (nr & 31);
+ int oldbitset = *p & m;
+
+ *p &= ~m;
+ return oldbitset;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies the acquisition side of the memory barrier.
+ */
+static __inline__ int
+test_and_change_bit (int nr, volatile void *addr)
+{
+ __u32 bit, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ bit = (1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old ^ bit;
+ } while (cmpxchg_acq(m, old, new) != old);
+ return (old & bit) != 0;
+}
+
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ */
+static __inline__ int
+__test_and_change_bit (int nr, void *addr)
+{
+ __u32 old, bit = (1 << (nr & 31));
+ __u32 *m = (__u32 *) addr + (nr >> 5);
+
+ old = *m;
+ *m = old ^ bit;
+ return (old & bit) != 0;
+}
+
+static __inline__ int
+test_bit (int nr, const volatile void *addr)
+{
+ return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
+}
+
+/**
+ * ffz - find the first zero bit in a long word
+ * @x: The long word to find the bit in
+ *
+ * Returns the bit-number (0..63) of the first (least significant) zero bit.
+ * Undefined if no zero exists, so code should check against ~0UL first...
+ */
+static inline unsigned long
+ffz (unsigned long x)
+{
+ unsigned long result;
+
+ result = ia64_popcnt(x & (~x - 1));
+ return result;
+}
+
+/**
+ * __ffs - find first bit in word.
+ * @x: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __inline__ unsigned long
+__ffs (unsigned long x)
+{
+ unsigned long result;
+
+ result = ia64_popcnt((x-1) & ~x);
+ return result;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Return bit number of last (most-significant) bit set. Undefined
+ * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
+ */
+static inline unsigned long
+ia64_fls (unsigned long x)
+{
+ long double d = x;
+ long exp;
+
+ exp = ia64_getf_exp(d);
+ return exp - 0xffff;
+}
+
+/*
+ * Find the last (most significant) bit set. Returns 0 for x==0 and
+ * bits are numbered from 1..32 (e.g., fls(9) == 4).
+ */
+static inline int
+fls (int t)
+{
+ unsigned long x = t & 0xffffffffu;
+
+ if (!x)
+ return 0;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ return ia64_popcnt(x);
+}
+
+/*
+ * Find the last (most significant) bit set. Undefined for x==0.
+ * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
+ */
+static inline unsigned long
+__fls (unsigned long x)
+{
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x |= x >> 32;
+ return ia64_popcnt(x) - 1;
+}
+
+#include <asm-generic/bitops/fls64.h>
+
+/*
+ * ffs: find first bit set. This is defined the same way as the libc and
+ * compiler builtin ffs routines, therefore differs in spirit from the above
+ * ffz (man ffs): it operates on "int" values only and the result value is the
+ * bit number + 1. ffs(0) is defined to return zero.
+ */
+#define ffs(x) __builtin_ffs(x)
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+static __inline__ unsigned long
+hweight64 (unsigned long x)
+{
+ unsigned long result;
+ result = ia64_popcnt(x);
+ return result;
+}
+
+#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
+#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
+#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/find.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
+#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
+#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
+
+#include <asm-generic/bitops/minix.h>
+#include <asm-generic/bitops/sched.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_BITOPS_H */
diff --git a/arch/ia64/include/asm/break.h b/arch/ia64/include/asm/break.h
new file mode 100644
index 000000000000..f03402039896
--- /dev/null
+++ b/arch/ia64/include/asm/break.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_IA64_BREAK_H
+#define _ASM_IA64_BREAK_H
+
+/*
+ * IA-64 Linux break numbers.
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * OS-specific debug break numbers:
+ */
+#define __IA64_BREAK_KDB 0x80100
+#define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */
+#define __IA64_BREAK_JPROBE 0x82000
+
+/*
+ * OS-specific break numbers:
+ */
+#define __IA64_BREAK_SYSCALL 0x100000
+
+#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h
new file mode 100644
index 000000000000..823616b5020b
--- /dev/null
+++ b/arch/ia64/include/asm/bug.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_IA64_BUG_H
+#define _ASM_IA64_BUG_H
+
+#ifdef CONFIG_BUG
+#define ia64_abort() __builtin_trap()
+#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
+
+/* should this BUG be made generic? */
+#define HAVE_ARCH_BUG
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif
diff --git a/arch/ia64/include/asm/bugs.h b/arch/ia64/include/asm/bugs.h
new file mode 100644
index 000000000000..433523e3b2ed
--- /dev/null
+++ b/arch/ia64/include/asm/bugs.h
@@ -0,0 +1,19 @@
+/*
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Needs:
+ * void check_bugs(void);
+ *
+ * Based on <asm-alpha/bugs.h>.
+ *
+ * Modified 1998, 1999, 2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+ */
+#ifndef _ASM_IA64_BUGS_H
+#define _ASM_IA64_BUGS_H
+
+#include <asm/processor.h>
+
+extern void check_bugs (void);
+
+#endif /* _ASM_IA64_BUGS_H */
diff --git a/arch/ia64/include/asm/byteorder.h b/arch/ia64/include/asm/byteorder.h
new file mode 100644
index 000000000000..69bd41d7c26e
--- /dev/null
+++ b/arch/ia64/include/asm/byteorder.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_IA64_BYTEORDER_H
+#define _ASM_IA64_BYTEORDER_H
+
+/*
+ * Modified 1998, 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+ */
+
+#include <asm/types.h>
+#include <asm/intrinsics.h>
+#include <linux/compiler.h>
+
+static __inline__ __attribute_const__ __u64
+__ia64_swab64 (__u64 x)
+{
+ __u64 result;
+
+ result = ia64_mux1(x, ia64_mux1_rev);
+ return result;
+}
+
+static __inline__ __attribute_const__ __u32
+__ia64_swab32 (__u32 x)
+{
+ return __ia64_swab64(x) >> 32;
+}
+
+static __inline__ __attribute_const__ __u16
+__ia64_swab16(__u16 x)
+{
+ return __ia64_swab64(x) >> 48;
+}
+
+#define __arch__swab64(x) __ia64_swab64(x)
+#define __arch__swab32(x) __ia64_swab32(x)
+#define __arch__swab16(x) __ia64_swab16(x)
+
+#define __BYTEORDER_HAS_U64__
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _ASM_IA64_BYTEORDER_H */
diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
new file mode 100644
index 000000000000..e7482bd628ff
--- /dev/null
+++ b/arch/ia64/include/asm/cache.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_IA64_CACHE_H
+#define _ASM_IA64_CACHE_H
+
+
+/*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/* Bytes per L1 (data) cache line. */
+#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#ifdef CONFIG_SMP
+# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+# define SMP_CACHE_BYTES L1_CACHE_BYTES
+#else
+ /*
+ * The "aligned" directive can only _increase_ alignment, so this is
+ * safe and provides an easy way to avoid wasting space on a
+ * uni-processor:
+ */
+# define SMP_CACHE_SHIFT 3
+# define SMP_CACHE_BYTES (1 << 3)
+#endif
+
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
+#endif /* _ASM_IA64_CACHE_H */
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
new file mode 100644
index 000000000000..afcfbda76e20
--- /dev/null
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_IA64_CACHEFLUSH_H
+#define _ASM_IA64_CACHEFLUSH_H
+
+/*
+ * Copyright (C) 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/page-flags.h>
+#include <linux/bitops.h>
+
+#include <asm/page.h>
+
+/*
+ * Cache flushing routines. This is the kind of stuff that can be very expensive, so try
+ * to avoid them whenever possible.
+ */
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_icache_page(vma,page) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+#define flush_dcache_page(page) \
+do { \
+ clear_bit(PG_arch_1, &(page)->flags); \
+} while (0)
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+extern void flush_icache_range (unsigned long start, unsigned long end);
+
+#define flush_icache_user_range(vma, page, user_addr, len) \
+do { \
+ unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
+ flush_icache_range(_addr, _addr + (len)); \
+} while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { memcpy(dst, src, len); \
+ flush_icache_user_range(vma, page, vaddr, len); \
+} while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif /* _ASM_IA64_CACHEFLUSH_H */
diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h
new file mode 100644
index 000000000000..97af155057e4
--- /dev/null
+++ b/arch/ia64/include/asm/checksum.h
@@ -0,0 +1,79 @@
+#ifndef _ASM_IA64_CHECKSUM_H
+#define _ASM_IA64_CHECKSUM_H
+
+/*
+ * Modified 1998, 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+
+/*
+ * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit
+ * checksum, already complemented
+ */
+extern __sum16 csum_tcpudp_magic (__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum);
+
+extern __wsum csum_tcpudp_nofold (__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum);
+
+/*
+ * Computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * Same as csum_partial, but copies from src while it checksums.
+ *
+ * Here it is even more important to align src and dst on a 32-bit (or
+ * even better 64-bit) boundary.
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum,
+ int *errp);
+
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum);
+
+/*
+ * This routine is used for miscellaneous IP-like checksums, mainly in
+ * icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+/*
+ * Fold a partial checksum without adding pseudo headers.
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+
+#define _HAVE_ARCH_IPV6_CSUM 1
+struct in6_addr;
+extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr, __u32 len, unsigned short proto,
+ __wsum csum);
+
+#endif /* _ASM_IA64_CHECKSUM_H */
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
new file mode 100644
index 000000000000..dfcf75b8426d
--- /dev/null
+++ b/arch/ia64/include/asm/compat.h
@@ -0,0 +1,207 @@
+#ifndef _ASM_IA64_COMPAT_H
+#define _ASM_IA64_COMPAT_H
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+
+#define COMPAT_USER_HZ 100
+
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_time_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_key_t;
+typedef s32 compat_pid_t;
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+typedef u16 compat_mode_t;
+typedef u32 compat_ino_t;
+typedef u16 compat_dev_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef u16 compat_nlink_t;
+typedef u16 compat_ipc_pid_t;
+typedef s32 compat_daddr_t;
+typedef u32 compat_caddr_t;
+typedef __kernel_fsid_t compat_fsid_t;
+typedef s32 compat_timer_t;
+
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef s64 __attribute__((aligned(4))) compat_s64;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u64 __attribute__((aligned(4))) compat_u64;
+
+struct compat_timespec {
+ compat_time_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct compat_timeval {
+ compat_time_t tv_sec;
+ s32 tv_usec;
+};
+
+struct compat_stat {
+ compat_dev_t st_dev;
+ u16 __pad1;
+ compat_ino_t st_ino;
+ compat_mode_t st_mode;
+ compat_nlink_t st_nlink;
+ __compat_uid_t st_uid;
+ __compat_gid_t st_gid;
+ compat_dev_t st_rdev;
+ u16 __pad2;
+ u32 st_size;
+ u32 st_blksize;
+ u32 st_blocks;
+ u32 st_atime;
+ u32 st_atime_nsec;
+ u32 st_mtime;
+ u32 st_mtime_nsec;
+ u32 st_ctime;
+ u32 st_ctime_nsec;
+ u32 __unused4;
+ u32 __unused5;
+};
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+ compat_pid_t l_pid;
+};
+
+#define F_GETLK64 12
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
+/*
+ * IA32 uses 4 byte alignment for 64 bit quantities,
+ * so we need to pack this structure.
+ */
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+} __attribute__((packed));
+
+struct compat_statfs {
+ int f_type;
+ int f_bsize;
+ int f_blocks;
+ int f_bfree;
+ int f_bavail;
+ int f_files;
+ int f_ffree;
+ compat_fsid_t f_fsid;
+ int f_namelen; /* SunOS ignores this field. */
+ int f_frsize;
+ int f_spare[5];
+};
+
+#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
+#define COMPAT_RLIM_INFINITY 0xffffffff
+
+typedef u32 compat_old_sigset_t; /* at least 32 bits */
+
+#define _COMPAT_NSIG 64
+#define _COMPAT_NSIG_BPW 32
+
+typedef u32 compat_sigset_word;
+
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
+
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ unsigned short mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ compat_ulong_t unused1;
+ compat_ulong_t unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_time_t sem_otime;
+ compat_ulong_t __unused1;
+ compat_time_t sem_ctime;
+ compat_ulong_t __unused2;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_time_t msg_stime;
+ compat_ulong_t __unused1;
+ compat_time_t msg_rtime;
+ compat_ulong_t __unused2;
+ compat_time_t msg_ctime;
+ compat_ulong_t __unused3;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_time_t shm_atime;
+ compat_ulong_t __unused1;
+ compat_time_t shm_dtime;
+ compat_ulong_t __unused2;
+ compat_time_t shm_ctime;
+ compat_ulong_t __unused3;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+/*
+ * A pointer passed in from user mode. This should not be used for syscall parameters,
+ * just declare them as pointers because the syscall entry code will have appropriately
+ * converted them already.
+ */
+typedef u32 compat_uptr_t;
+
+static inline void __user *
+compat_ptr (compat_uptr_t uptr)
+{
+ return (void __user *) (unsigned long) uptr;
+}
+
+static inline compat_uptr_t
+ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
+static __inline__ void __user *
+compat_alloc_user_space (long len)
+{
+ struct pt_regs *regs = task_pt_regs(current);
+ return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
+}
+
+#endif /* _ASM_IA64_COMPAT_H */
diff --git a/arch/ia64/include/asm/cpu.h b/arch/ia64/include/asm/cpu.h
new file mode 100644
index 000000000000..fcca30b9f110
--- /dev/null
+++ b/arch/ia64/include/asm/cpu.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_CPU_H_
+#define _ASM_IA64_CPU_H_
+
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/topology.h>
+#include <linux/percpu.h>
+
+struct ia64_cpu {
+ struct cpu cpu;
+};
+
+DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
+
+DECLARE_PER_CPU(int, cpu_state);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int arch_register_cpu(int num);
+extern void arch_unregister_cpu(int);
+#endif
+
+#endif /* _ASM_IA64_CPU_H_ */
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
new file mode 100644
index 000000000000..d20b998cb91d
--- /dev/null
+++ b/arch/ia64/include/asm/cputime.h
@@ -0,0 +1,109 @@
+/*
+ * Definitions for measuring cputime on ia64 machines.
+ *
+ * Based on <asm-powerpc/cputime.h>.
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
+ * Otherwise we measure cpu time in jiffies using the generic definitions.
+ */
+
+#ifndef __IA64_CPUTIME_H
+#define __IA64_CPUTIME_H
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#include <asm-generic/cputime.h>
+#else
+
+#include <linux/time.h>
+#include <linux/jiffies.h>
+#include <asm/processor.h>
+
+typedef u64 cputime_t;
+typedef u64 cputime64_t;
+
+#define cputime_zero ((cputime_t)0)
+#define cputime_max ((~((cputime_t)0) >> 1) - 1)
+#define cputime_add(__a, __b) ((__a) + (__b))
+#define cputime_sub(__a, __b) ((__a) - (__b))
+#define cputime_div(__a, __n) ((__a) / (__n))
+#define cputime_halve(__a) ((__a) >> 1)
+#define cputime_eq(__a, __b) ((__a) == (__b))
+#define cputime_gt(__a, __b) ((__a) > (__b))
+#define cputime_ge(__a, __b) ((__a) >= (__b))
+#define cputime_lt(__a, __b) ((__a) < (__b))
+#define cputime_le(__a, __b) ((__a) <= (__b))
+
+#define cputime64_zero ((cputime64_t)0)
+#define cputime64_add(__a, __b) ((__a) + (__b))
+#define cputime64_sub(__a, __b) ((__a) - (__b))
+#define cputime_to_cputime64(__ct) (__ct)
+
+/*
+ * Convert cputime <-> jiffies (HZ)
+ */
+#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
+
+/*
+ * Convert cputime <-> milliseconds
+ */
+#define cputime_to_msecs(__ct) ((__ct) / NSEC_PER_MSEC)
+#define msecs_to_cputime(__msecs) ((__msecs) * NSEC_PER_MSEC)
+
+/*
+ * Convert cputime <-> seconds
+ */
+#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC)
+#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC)
+
+/*
+ * Convert cputime <-> timespec (nsec)
+ */
+static inline cputime_t timespec_to_cputime(const struct timespec *val)
+{
+ cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+ return (ret + val->tv_nsec);
+}
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+{
+ val->tv_sec = ct / NSEC_PER_SEC;
+ val->tv_nsec = ct % NSEC_PER_SEC;
+}
+
+/*
+ * Convert cputime <-> timeval (msec)
+ */
+static inline cputime_t timeval_to_cputime(struct timeval *val)
+{
+ cputime_t ret = val->tv_sec * NSEC_PER_SEC;
+ return (ret + val->tv_usec * NSEC_PER_USEC);
+}
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+{
+ val->tv_sec = ct / NSEC_PER_SEC;
+ val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+}
+
+/*
+ * Convert cputime <-> clock (USER_HZ)
+ */
+#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct)
+
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/current.h b/arch/ia64/include/asm/current.h
new file mode 100644
index 000000000000..c659f90fbfd9
--- /dev/null
+++ b/arch/ia64/include/asm/current.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_IA64_CURRENT_H
+#define _ASM_IA64_CURRENT_H
+
+/*
+ * Modified 1998-2000
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm/intrinsics.h>
+
+/*
+ * In kernel mode, thread pointer (r13) is used to point to the current task
+ * structure.
+ */
+#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
+
+#endif /* _ASM_IA64_CURRENT_H */
diff --git a/arch/ia64/include/asm/cyclone.h b/arch/ia64/include/asm/cyclone.h
new file mode 100644
index 000000000000..88f6500e84ab
--- /dev/null
+++ b/arch/ia64/include/asm/cyclone.h
@@ -0,0 +1,15 @@
+#ifndef ASM_IA64_CYCLONE_H
+#define ASM_IA64_CYCLONE_H
+
+#ifdef CONFIG_IA64_CYCLONE
+extern int use_cyclone;
+extern void __init cyclone_setup(void);
+#else /* CONFIG_IA64_CYCLONE */
+#define use_cyclone 0
+static inline void cyclone_setup(void)
+{
+ printk(KERN_ERR "Cyclone Counter: System not configured"
+ " w/ CONFIG_IA64_CYCLONE.\n");
+}
+#endif /* CONFIG_IA64_CYCLONE */
+#endif /* !ASM_IA64_CYCLONE_H */
diff --git a/arch/ia64/include/asm/delay.h b/arch/ia64/include/asm/delay.h
new file mode 100644
index 000000000000..a30a62f235e1
--- /dev/null
+++ b/arch/ia64/include/asm/delay.h
@@ -0,0 +1,88 @@
+#ifndef _ASM_IA64_DELAY_H
+#define _ASM_IA64_DELAY_H
+
+/*
+ * Delay routines using a pre-computed "cycles/usec" value.
+ *
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/compiler.h>
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+
+static __inline__ void
+ia64_set_itm (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_CR_ITM, val);
+ ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itm (void)
+{
+ unsigned long result;
+
+ result = ia64_getreg(_IA64_REG_CR_ITM);
+ ia64_srlz_d();
+ return result;
+}
+
+static __inline__ void
+ia64_set_itv (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_CR_ITV, val);
+ ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itv (void)
+{
+ return ia64_getreg(_IA64_REG_CR_ITV);
+}
+
+static __inline__ void
+ia64_set_itc (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_AR_ITC, val);
+ ia64_srlz_d();
+}
+
+static __inline__ unsigned long
+ia64_get_itc (void)
+{
+ unsigned long result;
+
+ result = ia64_getreg(_IA64_REG_AR_ITC);
+ ia64_barrier();
+#ifdef CONFIG_ITANIUM
+ while (unlikely((__s32) result == -1)) {
+ result = ia64_getreg(_IA64_REG_AR_ITC);
+ ia64_barrier();
+ }
+#endif
+ return result;
+}
+
+extern void ia64_delay_loop (unsigned long loops);
+
+static __inline__ void
+__delay (unsigned long loops)
+{
+ if (unlikely(loops < 1))
+ return;
+
+ ia64_delay_loop (loops - 1);
+}
+
+extern void udelay (unsigned long usecs);
+
+#endif /* _ASM_IA64_DELAY_H */
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
new file mode 100644
index 000000000000..3db6daf7f251
--- /dev/null
+++ b/arch/ia64/include/asm/device.h
@@ -0,0 +1,15 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_IA64_DEVICE_H
+#define _ASM_IA64_DEVICE_H
+
+struct dev_archdata {
+#ifdef CONFIG_ACPI
+ void *acpi_handle;
+#endif
+};
+
+#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/div64.h b/arch/ia64/include/asm/div64.h
new file mode 100644
index 000000000000..6cd978cefb28
--- /dev/null
+++ b/arch/ia64/include/asm/div64.h
@@ -0,0 +1 @@
+#include <asm-generic/div64.h>
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..9f0df9bd46b7
--- /dev/null
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -0,0 +1,97 @@
+#ifndef _ASM_IA64_DMA_MAPPING_H
+#define _ASM_IA64_DMA_MAPPING_H
+
+/*
+ * Copyright (C) 2003-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <asm/machvec.h>
+#include <linux/scatterlist.h>
+
+#define dma_alloc_coherent platform_dma_alloc_coherent
+/* coherent mem. is cheap */
+static inline void *
+dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ return dma_alloc_coherent(dev, size, dma_handle, flag);
+}
+#define dma_free_coherent platform_dma_free_coherent
+static inline void
+dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+#define dma_map_single_attrs platform_dma_map_single_attrs
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size, int dir)
+{
+ return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
+}
+#define dma_map_sg_attrs platform_dma_map_sg_attrs
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, int dir)
+{
+ return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
+}
+#define dma_unmap_single_attrs platform_dma_unmap_single_attrs
+static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
+ size_t size, int dir)
+{
+ return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
+}
+#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, int dir)
+{
+ return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
+}
+#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
+#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
+#define dma_sync_single_for_device platform_dma_sync_single_for_device
+#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
+#define dma_mapping_error platform_dma_mapping_error
+
+#define dma_map_page(dev, pg, off, size, dir) \
+ dma_map_single(dev, page_address(pg) + (off), (size), (dir))
+#define dma_unmap_page(dev, dma_addr, size, dir) \
+ dma_unmap_single(dev, dma_addr, size, dir)
+
+/*
+ * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
+ * See Documentation/DMA-API.txt for details.
+ */
+
+#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
+ dma_sync_single_for_cpu(dev, dma_handle, size, dir)
+#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
+ dma_sync_single_for_device(dev, dma_handle, size, dir)
+
+#define dma_supported platform_dma_supported
+
+static inline int
+dma_set_mask (struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
+
+extern int dma_get_cache_alignment(void);
+
+static inline void
+dma_cache_sync (struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ /*
+ * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
+ * ensure that dma_cache_sync() enforces order, hence the mb().
+ */
+ mb();
+}
+
+#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
+
+#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h
new file mode 100644
index 000000000000..4d97f60f1ef5
--- /dev/null
+++ b/arch/ia64/include/asm/dma.h
@@ -0,0 +1,24 @@
+#ifndef _ASM_IA64_DMA_H
+#define _ASM_IA64_DMA_H
+
+/*
+ * Copyright (C) 1998-2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/io.h> /* need byte IO */
+
+extern unsigned long MAX_DMA_ADDRESS;
+
+#ifdef CONFIG_PCI
+ extern int isa_dma_bridge_buggy;
+#else
+# define isa_dma_bridge_buggy (0)
+#endif
+
+#define free_dma(x)
+
+void dma_mark_clean(void *addr, size_t size);
+
+#endif /* _ASM_IA64_DMA_H */
diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h
new file mode 100644
index 000000000000..00eb1b130b63
--- /dev/null
+++ b/arch/ia64/include/asm/dmi.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H 1
+
+#include <asm/io.h>
+
+/* Use normal IO mappings for DMI */
+#define dmi_ioremap ioremap
+#define dmi_iounmap(x,l) iounmap(x)
+#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
+
+#endif
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
new file mode 100644
index 000000000000..5e0c1a6bce8d
--- /dev/null
+++ b/arch/ia64/include/asm/elf.h
@@ -0,0 +1,269 @@
+#ifndef _ASM_IA64_ELF_H
+#define _ASM_IA64_ELF_H
+
+/*
+ * ELF-specific definitions.
+ *
+ * Copyright (C) 1998-1999, 2002-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/auxvec.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_IA_64)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_IA_64
+
+#define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
+
+/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
+ interpreted as follows by Linux: */
+#define EF_IA_64_LINUX_EXECUTABLE_STACK 0x1 /* is stack (& heap) executable by default? */
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.
+ * Typical use of this is to invoke "./ld.so someprog" to test out a
+ * new version of the loader. We need to make sure that it is out of
+ * the way of the program that it will "exec", and that there is
+ * sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
+#define PT_IA_64_UNWIND 0x70000001
+
+/* IA-64 relocations: */
+#define R_IA64_NONE 0x00 /* none */
+#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
+#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
+#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22 0x2a /* @gprel(sym+add), add imm22 */
+#define R_IA64_GPREL64I 0x2b /* @gprel(sym+add), mov imm64 */
+#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym+add), data4 MSB */
+#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym+add), data4 LSB */
+#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym+add), data8 MSB */
+#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF22 0x32 /* @ltoff(sym+add), add imm22 */
+#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym+add), add imm22 */
+#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym+add), data8 MSB */
+#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym+add), data8 LSB */
+#define R_IA64_FPTR64I 0x43 /* @fptr(sym+add), mov imm64 */
+#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym+add), data4 MSB */
+#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym+add), data4 LSB */
+#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym+add), data8 MSB */
+#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym+add), data8 LSB */
+#define R_IA64_PCREL60B 0x48 /* @pcrel(sym+add), brl */
+#define R_IA64_PCREL21B 0x49 /* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL21M 0x4a /* @pcrel(sym+add), chk.s */
+#define R_IA64_PCREL21F 0x4b /* @pcrel(sym+add), fchkf */
+#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym+add), data4 MSB */
+#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym+add), data4 LSB */
+#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym+add), data8 MSB */
+#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), 4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), 4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), 8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), 8 LSB */
+#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym+add), data4 MSB */
+#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym+add), data4 LSB */
+#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym+add), data8 MSB */
+#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym+add), data8 LSB */
+#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym+add), data4 MSB */
+#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym+add), data4 LSB */
+#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym+add), data8 MSB */
+#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym+add), data8 LSB */
+#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
+#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
+#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
+#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
+#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL22 0x7a /* @pcrel(sym+add), imm22 */
+#define R_IA64_PCREL64I 0x7b /* @pcrel(sym+add), imm64 */
+#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY 0x84 /* dynamic reloc, data copy */
+#define R_IA64_SUB 0x85 /* -symbol + addend, add imm22 */
+#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
+#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
+#define R_IA64_TPREL14 0x91 /* @tprel(sym+add), add imm14 */
+#define R_IA64_TPREL22 0x92 /* @tprel(sym+add), add imm22 */
+#define R_IA64_TPREL64I 0x93 /* @tprel(sym+add), add imm64 */
+#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym+add), data8 MSB */
+#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), add imm22 */
+#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym+add), data8 MSB */
+#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(s+a)), imm22 */
+#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym+add), imm14 */
+#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym+add), imm22 */
+#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym+add), imm64 */
+#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym+add), data4 MSB */
+#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym+add), data4 LSB */
+#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym+add), data8 MSB */
+#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
+
+/* IA-64 specific section flags: */
+#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
+
+/*
+ * We use (abuse?) this macro to insert the (empty) vm_area that is
+ * used to map the register backing store. I don't see any better
+ * place to do this, but we should discuss this with Linus once we can
+ * talk to him...
+ */
+extern void ia64_init_addr_space (void);
+#define ELF_PLAT_INIT(_r, load_addr) ia64_init_addr_space()
+
+/* ELF register definitions. This is needed for core dump support. */
+
+/*
+ * elf_gregset_t contains the application-level state in the following order:
+ * r0-r31
+ * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
+ * predicate registers (p0-p63)
+ * b0-b7
+ * ip cfm psr
+ * ar.rsc ar.bsp ar.bspstore ar.rnat
+ * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
+ */
+#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
+#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
+
+/* elf_gregset_t register offsets */
+#define ELF_GR_0_OFFSET 0
+#define ELF_NAT_OFFSET (32 * sizeof(elf_greg_t))
+#define ELF_PR_OFFSET (33 * sizeof(elf_greg_t))
+#define ELF_BR_0_OFFSET (34 * sizeof(elf_greg_t))
+#define ELF_CR_IIP_OFFSET (42 * sizeof(elf_greg_t))
+#define ELF_CFM_OFFSET (43 * sizeof(elf_greg_t))
+#define ELF_CR_IPSR_OFFSET (44 * sizeof(elf_greg_t))
+#define ELF_GR_OFFSET(i) (ELF_GR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_BR_OFFSET(i) (ELF_BR_0_OFFSET + i * sizeof(elf_greg_t))
+#define ELF_AR_RSC_OFFSET (45 * sizeof(elf_greg_t))
+#define ELF_AR_BSP_OFFSET (46 * sizeof(elf_greg_t))
+#define ELF_AR_BSPSTORE_OFFSET (47 * sizeof(elf_greg_t))
+#define ELF_AR_RNAT_OFFSET (48 * sizeof(elf_greg_t))
+#define ELF_AR_CCV_OFFSET (49 * sizeof(elf_greg_t))
+#define ELF_AR_UNAT_OFFSET (50 * sizeof(elf_greg_t))
+#define ELF_AR_FPSR_OFFSET (51 * sizeof(elf_greg_t))
+#define ELF_AR_PFS_OFFSET (52 * sizeof(elf_greg_t))
+#define ELF_AR_LC_OFFSET (53 * sizeof(elf_greg_t))
+#define ELF_AR_EC_OFFSET (54 * sizeof(elf_greg_t))
+#define ELF_AR_CSD_OFFSET (55 * sizeof(elf_greg_t))
+#define ELF_AR_SSD_OFFSET (56 * sizeof(elf_greg_t))
+#define ELF_AR_END_OFFSET (57 * sizeof(elf_greg_t))
+
+typedef unsigned long elf_fpxregset_t;
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct ia64_fpreg elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+
+
+struct pt_regs; /* forward declaration... */
+extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
+#define ELF_CORE_COPY_REGS(_dest,_regs) ia64_elf_core_copy_regs(_regs, _dest);
+
+/* This macro yields a bitmask that programs can use to figure out
+ what instruction set this CPU supports. */
+#define ELF_HWCAP 0
+
+/* This macro yields a string that ld.so will use to load
+ implementation specific libraries for optimization. Not terribly
+ relevant until we have real hardware to play with... */
+#define ELF_PLATFORM NULL
+
+#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
+#define elf_read_implies_exec(ex, executable_stack) \
+ ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
+
+struct task_struct;
+
+#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
+
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+#define ARCH_DLINFO \
+do { \
+ extern char __kernel_syscall_via_epc[]; \
+ NEW_AUX_ENT(AT_SYSINFO, (unsigned long) __kernel_syscall_via_epc); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
+} while (0)
+
+
+/*
+ * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
+ * extra segments containing the gate DSO contents. Dumping its
+ * contents makes post-mortem fully interpretable later without matching up
+ * the same kernel and hardware config to see what PC values meant.
+ * Dumping its extra ELF program headers includes all the other information
+ * a debugger needs to easily find how the gate DSO was being used.
+ */
+#define ELF_CORE_EXTRA_PHDRS (GATE_EHDR->e_phnum)
+#define ELF_CORE_WRITE_EXTRA_PHDRS \
+do { \
+ const struct elf_phdr *const gate_phdrs = \
+ (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
+ int i; \
+ Elf64_Off ofs = 0; \
+ for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
+ struct elf_phdr phdr = gate_phdrs[i]; \
+ if (phdr.p_type == PT_LOAD) { \
+ phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
+ phdr.p_filesz = phdr.p_memsz; \
+ if (ofs == 0) { \
+ ofs = phdr.p_offset = offset; \
+ offset += phdr.p_filesz; \
+ } \
+ else \
+ phdr.p_offset = ofs; \
+ } \
+ else \
+ phdr.p_offset += ofs; \
+ phdr.p_paddr = 0; /* match other core phdrs */ \
+ DUMP_WRITE(&phdr, sizeof(phdr)); \
+ } \
+} while (0)
+#define ELF_CORE_WRITE_EXTRA_DATA \
+do { \
+ const struct elf_phdr *const gate_phdrs = \
+ (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
+ int i; \
+ for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
+ if (gate_phdrs[i].p_type == PT_LOAD) { \
+ DUMP_WRITE((void *) gate_phdrs[i].p_vaddr, \
+ PAGE_ALIGN(gate_phdrs[i].p_memsz)); \
+ break; \
+ } \
+ } \
+} while (0)
+
+#endif /* _ASM_IA64_ELF_H */
diff --git a/arch/ia64/include/asm/emergency-restart.h b/arch/ia64/include/asm/emergency-restart.h
new file mode 100644
index 000000000000..108d8c48e42e
--- /dev/null
+++ b/arch/ia64/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/ia64/include/asm/errno.h b/arch/ia64/include/asm/errno.h
new file mode 100644
index 000000000000..4c82b503d92f
--- /dev/null
+++ b/arch/ia64/include/asm/errno.h
@@ -0,0 +1 @@
+#include <asm-generic/errno.h>
diff --git a/arch/ia64/include/asm/esi.h b/arch/ia64/include/asm/esi.h
new file mode 100644
index 000000000000..40991c6ba647
--- /dev/null
+++ b/arch/ia64/include/asm/esi.h
@@ -0,0 +1,29 @@
+/*
+ * ESI service calls.
+ *
+ * Copyright (c) Copyright 2005-2006 Hewlett-Packard Development Company, L.P.
+ * Alex Williamson <alex.williamson@hp.com>
+ */
+#ifndef esi_h
+#define esi_h
+
+#include <linux/efi.h>
+
+#define ESI_QUERY 0x00000001
+#define ESI_OPEN_HANDLE 0x02000000
+#define ESI_CLOSE_HANDLE 0x02000001
+
+enum esi_proc_type {
+ ESI_PROC_SERIALIZED, /* calls need to be serialized */
+ ESI_PROC_MP_SAFE, /* MP-safe, but not reentrant */
+ ESI_PROC_REENTRANT /* MP-safe and reentrant */
+};
+
+extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
+extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
+ enum esi_proc_type,
+ u64, u64, u64, u64, u64, u64, u64, u64);
+extern int ia64_esi_call_phys(efi_guid_t, struct ia64_sal_retval *, u64, u64,
+ u64, u64, u64, u64, u64, u64);
+
+#endif /* esi_h */
diff --git a/arch/ia64/include/asm/fb.h b/arch/ia64/include/asm/fb.h
new file mode 100644
index 000000000000..89a397cee90a
--- /dev/null
+++ b/arch/ia64/include/asm/fb.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <linux/efi.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+}
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/ia64/include/asm/fcntl.h b/arch/ia64/include/asm/fcntl.h
new file mode 100644
index 000000000000..1dd275dc8f65
--- /dev/null
+++ b/arch/ia64/include/asm/fcntl.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_IA64_FCNTL_H
+#define _ASM_IA64_FCNTL_H
+/*
+ * Modified 1998-2000
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
+ */
+
+#define force_o_largefile() \
+ (personality(current->personality) != PER_LINUX32)
+
+#include <asm-generic/fcntl.h>
+
+#endif /* _ASM_IA64_FCNTL_H */
diff --git a/arch/ia64/include/asm/fpswa.h b/arch/ia64/include/asm/fpswa.h
new file mode 100644
index 000000000000..62edfceadaa6
--- /dev/null
+++ b/arch/ia64/include/asm/fpswa.h
@@ -0,0 +1,73 @@
+#ifndef _ASM_IA64_FPSWA_H
+#define _ASM_IA64_FPSWA_H
+
+/*
+ * Floating-point Software Assist
+ *
+ * Copyright (C) 1999 Intel Corporation.
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
+ */
+
+typedef struct {
+ /* 4 * 128 bits */
+ unsigned long fp_lp[4*2];
+} fp_state_low_preserved_t;
+
+typedef struct {
+ /* 10 * 128 bits */
+ unsigned long fp_lv[10 * 2];
+} fp_state_low_volatile_t;
+
+typedef struct {
+ /* 16 * 128 bits */
+ unsigned long fp_hp[16 * 2];
+} fp_state_high_preserved_t;
+
+typedef struct {
+ /* 96 * 128 bits */
+ unsigned long fp_hv[96 * 2];
+} fp_state_high_volatile_t;
+
+/**
+ * floating point state to be passed to the FP emulation library by
+ * the trap/fault handler
+ */
+typedef struct {
+ unsigned long bitmask_low64;
+ unsigned long bitmask_high64;
+ fp_state_low_preserved_t *fp_state_low_preserved;
+ fp_state_low_volatile_t *fp_state_low_volatile;
+ fp_state_high_preserved_t *fp_state_high_preserved;
+ fp_state_high_volatile_t *fp_state_high_volatile;
+} fp_state_t;
+
+typedef struct {
+ unsigned long status;
+ unsigned long err0;
+ unsigned long err1;
+ unsigned long err2;
+} fpswa_ret_t;
+
+/**
+ * function header for the Floating Point software assist
+ * library. This function is invoked by the Floating point software
+ * assist trap/fault handler.
+ */
+typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr,
+ unsigned long *fsr, unsigned long *isr, unsigned long *preds,
+ unsigned long *ifs, fp_state_t *fp_state);
+
+/**
+ * This is the FPSWA library interface as defined by EFI. We need to pass a
+ * pointer to the interface itself on a call to the assist library
+ */
+typedef struct {
+ unsigned int revision;
+ unsigned int reserved;
+ efi_fpswa_t fpswa;
+} fpswa_interface_t;
+
+extern fpswa_interface_t *fpswa_interface;
+
+#endif /* _ASM_IA64_FPSWA_H */
diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h
new file mode 100644
index 000000000000..3859558ff0a4
--- /dev/null
+++ b/arch/ia64/include/asm/fpu.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_FPU_H
+#define _ASM_IA64_FPU_H
+
+/*
+ * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/types.h>
+
+/* floating point status register: */
+#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
+#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
+#define FPSR_TRAP_ZD (1 << 2) /* zero-divide trap disabled */
+#define FPSR_TRAP_OD (1 << 3) /* overflow trap disabled */
+#define FPSR_TRAP_UD (1 << 4) /* underflow trap disabled */
+#define FPSR_TRAP_ID (1 << 5) /* inexact trap disabled */
+#define FPSR_S0(x) ((x) << 6)
+#define FPSR_S1(x) ((x) << 19)
+#define FPSR_S2(x) (__IA64_UL(x) << 32)
+#define FPSR_S3(x) (__IA64_UL(x) << 45)
+
+/* floating-point status field controls: */
+#define FPSF_FTZ (1 << 0) /* flush-to-zero */
+#define FPSF_WRE (1 << 1) /* widest-range exponent */
+#define FPSF_PC(x) (((x) & 0x3) << 2) /* precision control */
+#define FPSF_RC(x) (((x) & 0x3) << 4) /* rounding control */
+#define FPSF_TD (1 << 6) /* trap disabled */
+
+/* floating-point status field flags: */
+#define FPSF_V (1 << 7) /* invalid operation flag */
+#define FPSF_D (1 << 8) /* denormal/unnormal operand flag */
+#define FPSF_Z (1 << 9) /* zero divide (IEEE) flag */
+#define FPSF_O (1 << 10) /* overflow (IEEE) flag */
+#define FPSF_U (1 << 11) /* underflow (IEEE) flag */
+#define FPSF_I (1 << 12) /* inexact (IEEE) flag) */
+
+/* floating-point rounding control: */
+#define FPRC_NEAREST 0x0
+#define FPRC_NEGINF 0x1
+#define FPRC_POSINF 0x2
+#define FPRC_TRUNC 0x3
+
+#define FPSF_DEFAULT (FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST))
+
+/* This default value is the same as HP-UX uses. Don't change it
+ without a very good reason. */
+#define FPSR_DEFAULT (FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD \
+ | FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID \
+ | FPSR_S0 (FPSF_DEFAULT) \
+ | FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE) \
+ | FPSR_S2 (FPSF_DEFAULT | FPSF_TD) \
+ | FPSR_S3 (FPSF_DEFAULT | FPSF_TD))
+
+# ifndef __ASSEMBLY__
+
+struct ia64_fpreg {
+ union {
+ unsigned long bits[2];
+ long double __dummy; /* force 16-byte alignment */
+ } u;
+};
+
+# endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_FPU_H */
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
new file mode 100644
index 000000000000..c7f0f062239c
--- /dev/null
+++ b/arch/ia64/include/asm/futex.h
@@ -0,0 +1,124 @@
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+#include <asm/system.h>
+
+#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
+do { \
+ register unsigned long r8 __asm ("r8") = 0; \
+ __asm__ __volatile__( \
+ " mf;; \n" \
+ "[1:] " insn ";; \n" \
+ " .xdata4 \"__ex_table\", 1b-., 2f-. \n" \
+ "[2:]" \
+ : "+r" (r8), "=r" (oldval) \
+ : "r" (uaddr), "r" (oparg) \
+ : "memory"); \
+ ret = r8; \
+} while (0)
+
+#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
+do { \
+ register unsigned long r8 __asm ("r8") = 0; \
+ int val, newval; \
+ do { \
+ __asm__ __volatile__( \
+ " mf;; \n" \
+ "[1:] ld4 %3=[%4];; \n" \
+ " mov %2=%3 \n" \
+ insn ";; \n" \
+ " mov ar.ccv=%2;; \n" \
+ "[2:] cmpxchg4.acq %1=[%4],%3,ar.ccv;; \n" \
+ " .xdata4 \"__ex_table\", 1b-., 3f-.\n" \
+ " .xdata4 \"__ex_table\", 2b-., 3f-.\n" \
+ "[3:]" \
+ : "+r" (r8), "=r" (val), "=&r" (oldval), \
+ "=&r" (newval) \
+ : "r" (uaddr), "r" (oparg) \
+ : "memory"); \
+ if (unlikely (r8)) \
+ break; \
+ } while (unlikely (val != oldval)); \
+ ret = r8; \
+} while (0)
+
+static inline int
+futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op1("xchg4 %1=[%2],%3", ret, oldval, uaddr,
+ oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op2("add %3=%3,%5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op2("or %3=%3,%5", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op2("and %3=%3,%5", ret, oldval, uaddr,
+ ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op2("xor %3=%3,%5", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ {
+ register unsigned long r8 __asm ("r8");
+ __asm__ __volatile__(
+ " mf;; \n"
+ " mov ar.ccv=%3;; \n"
+ "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
+ " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
+ "[2:]"
+ : "=r" (r8)
+ : "r" (uaddr), "r" (newval),
+ "rO" ((long) (unsigned) oldval)
+ : "memory");
+ return r8;
+ }
+}
+
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h
new file mode 100644
index 000000000000..0f5b55921758
--- /dev/null
+++ b/arch/ia64/include/asm/gcc_intrin.h
@@ -0,0 +1,620 @@
+#ifndef _ASM_IA64_GCC_INTRIN_H
+#define _ASM_IA64_GCC_INTRIN_H
+/*
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+
+#include <linux/compiler.h>
+
+/* define this macro to get some asm stmts included in 'c' files */
+#define ASM_SUPPORTED
+
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define ia64_barrier() asm volatile ("":::"memory")
+
+#define ia64_stop() asm volatile (";;"::)
+
+#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
+
+#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
+
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
+extern void ia64_bad_param_for_setreg (void);
+extern void ia64_bad_param_for_getreg (void);
+
+#ifdef __KERNEL__
+register unsigned long ia64_r13 asm ("r13") __used;
+#endif
+
+#define ia64_native_setreg(regnum, val) \
+({ \
+ switch (regnum) { \
+ case _IA64_REG_PSR_L: \
+ asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
+ break; \
+ case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
+ asm volatile ("mov ar%0=%1" :: \
+ "i" (regnum - _IA64_REG_AR_KR0), \
+ "r"(val): "memory"); \
+ break; \
+ case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
+ asm volatile ("mov cr%0=%1" :: \
+ "i" (regnum - _IA64_REG_CR_DCR), \
+ "r"(val): "memory" ); \
+ break; \
+ case _IA64_REG_SP: \
+ asm volatile ("mov r12=%0" :: \
+ "r"(val): "memory"); \
+ break; \
+ case _IA64_REG_GP: \
+ asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
+ break; \
+ default: \
+ ia64_bad_param_for_setreg(); \
+ break; \
+ } \
+})
+
+#define ia64_native_getreg(regnum) \
+({ \
+ __u64 ia64_intri_res; \
+ \
+ switch (regnum) { \
+ case _IA64_REG_GP: \
+ asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_IP: \
+ asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_PSR: \
+ asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
+ break; \
+ case _IA64_REG_TP: /* for current() */ \
+ ia64_intri_res = ia64_r13; \
+ break; \
+ case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
+ asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
+ : "i"(regnum - _IA64_REG_AR_KR0)); \
+ break; \
+ case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
+ asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
+ : "i" (regnum - _IA64_REG_CR_DCR)); \
+ break; \
+ case _IA64_REG_SP: \
+ asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
+ break; \
+ default: \
+ ia64_bad_param_for_getreg(); \
+ break; \
+ } \
+ ia64_intri_res; \
+})
+
+#define ia64_hint_pause 0
+
+#define ia64_hint(mode) \
+({ \
+ switch (mode) { \
+ case ia64_hint_pause: \
+ asm volatile ("hint @pause" ::: "memory"); \
+ break; \
+ } \
+})
+
+
+/* Integer values for mux1 instruction */
+#define ia64_mux1_brcst 0
+#define ia64_mux1_mix 8
+#define ia64_mux1_shuf 9
+#define ia64_mux1_alt 10
+#define ia64_mux1_rev 11
+
+#define ia64_mux1(x, mode) \
+({ \
+ __u64 ia64_intri_res; \
+ \
+ switch (mode) { \
+ case ia64_mux1_brcst: \
+ asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_mix: \
+ asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_shuf: \
+ asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_alt: \
+ asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ case ia64_mux1_rev: \
+ asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
+ break; \
+ } \
+ ia64_intri_res; \
+})
+
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# define ia64_popcnt(x) __builtin_popcountl(x)
+#else
+# define ia64_popcnt(x) \
+ ({ \
+ __u64 ia64_intri_res; \
+ asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
+ \
+ ia64_intri_res; \
+ })
+#endif
+
+#define ia64_getf_exp(x) \
+({ \
+ long ia64_intri_res; \
+ \
+ asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_shrp(a, b, count) \
+({ \
+ __u64 ia64_intri_res; \
+ asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
+ ia64_intri_res; \
+})
+
+#define ia64_ldfs(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldfd(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldfe(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldf8(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_ldf_fill(regnum, x) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
+})
+
+#define ia64_st4_rel_nta(m, val) \
+({ \
+ asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
+})
+
+#define ia64_stfs(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfd(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stfe(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf8(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_stf_spill(x, regnum) \
+({ \
+ register double __f__ asm ("f"#regnum); \
+ asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
+})
+
+#define ia64_fetchadd4_acq(p, inc) \
+({ \
+ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd4.acq %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd4_rel(p, inc) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd4.rel %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd8_acq(p, inc) \
+({ \
+ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd8.acq %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_fetchadd8_rel(p, inc) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("fetchadd8.rel %0=[%1],%2" \
+ : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
+ : "memory"); \
+ \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg1(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("xchg1 %0=[%1],%2" \
+ : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg2(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg4(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_xchg8(ptr,x) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
+ : "r" (ptr), "r" (x) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg1_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg1_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg2_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg2_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ \
+ asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg4_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg4_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg8_acq(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_cmpxchg8_rel(ptr, new, old) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
+ \
+ asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
+ "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
+ ia64_intri_res; \
+})
+
+#define ia64_mf() asm volatile ("mf" ::: "memory")
+#define ia64_mfa() asm volatile ("mf.a" ::: "memory")
+
+#define ia64_invala() asm volatile ("invala" ::: "memory")
+
+#define ia64_native_thash(addr) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
+ ia64_intri_res; \
+})
+
+#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
+#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define ia64_dv_serialize_data() asm volatile (".serialize.data");
+# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
+#else
+# define ia64_dv_serialize_data()
+# define ia64_dv_serialize_instruction()
+#endif
+
+#define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
+
+#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
+
+#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
+
+
+#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
+ :: "r"(trnum), "r"(addr) : "memory")
+
+#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
+ :: "r"(trnum), "r"(addr) : "memory")
+
+#define ia64_tpa(addr) \
+({ \
+ __u64 ia64_pa; \
+ asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
+ ia64_pa; \
+})
+
+#define __ia64_set_dbr(index, val) \
+ asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_ibr(index, val) \
+ asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pkr(index, val) \
+ asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmc(index, val) \
+ asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_set_pmd(index, val) \
+ asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+
+#define ia64_native_set_rr(index, val) \
+ asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+
+#define ia64_native_get_cpuid(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
+ ia64_intri_res; \
+})
+
+#define __ia64_get_dbr(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_ibr(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_pkr(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_get_pmc(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+
+#define ia64_native_get_pmd(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_native_get_rr(index) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
+ ia64_intri_res; \
+})
+
+#define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+
+
+#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
+
+#define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
+#define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
+#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
+#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
+
+#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
+
+#define ia64_native_ptcga(addr, size) \
+do { \
+ asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
+ ia64_dv_serialize_data(); \
+} while (0)
+
+#define ia64_ptcl(addr, size) \
+do { \
+ asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
+ ia64_dv_serialize_data(); \
+} while (0)
+
+#define ia64_ptri(addr, size) \
+ asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ptrd(addr, size) \
+ asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+
+#define ia64_ttag(addr) \
+({ \
+ __u64 ia64_intri_res; \
+ asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
+ ia64_intri_res; \
+})
+
+
+/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+
+#define ia64_lfhint_none 0
+#define ia64_lfhint_nt1 1
+#define ia64_lfhint_nt2 2
+#define ia64_lfhint_nta 3
+
+#define ia64_lfetch(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_excl(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_fault(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_lfetch_fault_excl(lfhint, y) \
+({ \
+ switch (lfhint) { \
+ case ia64_lfhint_none: \
+ asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt1: \
+ asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nt2: \
+ asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
+ break; \
+ case ia64_lfhint_nta: \
+ asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
+ break; \
+ } \
+})
+
+#define ia64_native_intrin_local_irq_restore(x) \
+do { \
+ asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
+ "(p6) ssm psr.i;" \
+ "(p7) rsm psr.i;;" \
+ "(p6) srlz.d" \
+ :: "r"((x)) : "p6", "p7", "memory"); \
+} while (0)
+
+#endif /* _ASM_IA64_GCC_INTRIN_H */
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
new file mode 100644
index 000000000000..140e495b8e0e
--- /dev/null
+++ b/arch/ia64/include/asm/hardirq.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_IA64_HARDIRQ_H
+#define _ASM_IA64_HARDIRQ_H
+
+/*
+ * Modified 1998-2002, 2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+#include <asm/processor.h>
+
+/*
+ * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
+ */
+
+#define __ARCH_IRQ_STAT 1
+
+#define local_softirq_pending() (local_cpu_data->softirq_pending)
+
+#define HARDIRQ_BITS 14
+
+/*
+ * The hardirq mask has to be large enough to have space for potentially all IRQ sources
+ * in the system nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+extern void __iomem *ipi_base_addr;
+
+void ack_bad_irq(unsigned int irq);
+
+#endif /* _ASM_IA64_HARDIRQ_H */
diff --git a/arch/ia64/include/asm/hpsim.h b/arch/ia64/include/asm/hpsim.h
new file mode 100644
index 000000000000..892ab198a9da
--- /dev/null
+++ b/arch/ia64/include/asm/hpsim.h
@@ -0,0 +1,16 @@
+#ifndef _ASMIA64_HPSIM_H
+#define _ASMIA64_HPSIM_H
+
+#ifndef CONFIG_HP_SIMSERIAL_CONSOLE
+static inline int simcons_register(void) { return 1; }
+#else
+int simcons_register(void);
+#endif
+
+struct tty_driver;
+extern struct tty_driver *hp_simserial_driver;
+
+void ia64_ssc_connect_irq(long intr, long irq);
+void ia64_ctl_trace(long on);
+
+#endif
diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h
new file mode 100644
index 000000000000..da55c63728e0
--- /dev/null
+++ b/arch/ia64/include/asm/hugetlb.h
@@ -0,0 +1,80 @@
+#ifndef _ASM_IA64_HUGETLB_H
+#define _ASM_IA64_HUGETLB_H
+
+#include <asm/page.h>
+
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
+
+int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len)
+{
+ return (REGION_NUMBER(addr) == RGN_HPAGE ||
+ REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_IA64_HUGETLB_H */
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
new file mode 100644
index 000000000000..5c99cbcb8a0d
--- /dev/null
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -0,0 +1,192 @@
+#ifndef _ASM_IA64_HW_IRQ_H
+#define _ASM_IA64_HW_IRQ_H
+
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/profile.h>
+
+#include <asm/machvec.h>
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#ifndef CONFIG_PARAVIRT
+typedef u8 ia64_vector;
+#else
+typedef u16 ia64_vector;
+#endif
+
+/*
+ * 0 special
+ *
+ * 1,3-14 are reserved from firmware
+ *
+ * 16-255 (vectored external interrupts) are available
+ *
+ * 15 spurious interrupt (see IVR)
+ *
+ * 16 lowest priority, 255 highest priority
+ *
+ * 15 classes of 16 interrupts each.
+ */
+#define IA64_MIN_VECTORED_IRQ 16
+#define IA64_MAX_VECTORED_IRQ 255
+#define IA64_NUM_VECTORS 256
+
+#define AUTO_ASSIGN -1
+
+#define IA64_SPURIOUS_INT_VECTOR 0x0f
+
+/*
+ * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI.
+ */
+#define IA64_CPEP_VECTOR 0x1c /* corrected platform error polling vector */
+#define IA64_CMCP_VECTOR 0x1d /* corrected machine-check polling vector */
+#define IA64_CPE_VECTOR 0x1e /* corrected platform error interrupt vector */
+#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */
+/*
+ * Vectors 0x20-0x2f are reserved for legacy ISA IRQs.
+ * Use vectors 0x30-0xe7 as the default device vector range for ia64.
+ * Platforms may choose to reduce this range in platform_irq_setup, but the
+ * platform range must fall within
+ * [IA64_DEF_FIRST_DEVICE_VECTOR..IA64_DEF_LAST_DEVICE_VECTOR]
+ */
+extern int ia64_first_device_vector;
+extern int ia64_last_device_vector;
+
+#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
+#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
+#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
+#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
+#define IA64_MAX_DEVICE_VECTORS (IA64_DEF_LAST_DEVICE_VECTOR - IA64_DEF_FIRST_DEVICE_VECTOR + 1)
+#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1)
+
+#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */
+#define IA64_PERFMON_VECTOR 0xee /* performance monitor interrupt vector */
+#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */
+#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */
+#define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */
+#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */
+#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */
+
+/* Used for encoding redirected irqs */
+
+#define IA64_IRQ_REDIRECTED (1 << 31)
+
+/* IA64 inter-cpu interrupt related definitions */
+
+#define IA64_IPI_DEFAULT_BASE_ADDR 0xfee00000
+
+/* Delivery modes for inter-cpu interrupts */
+enum {
+ IA64_IPI_DM_INT = 0x0, /* pend an external interrupt */
+ IA64_IPI_DM_PMI = 0x2, /* pend a PMI */
+ IA64_IPI_DM_NMI = 0x4, /* pend an NMI (vector 2) */
+ IA64_IPI_DM_INIT = 0x5, /* pend an INIT interrupt */
+ IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */
+};
+
+extern __u8 isa_irq_to_vector_map[16];
+#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
+
+struct irq_cfg {
+ ia64_vector vector;
+ cpumask_t domain;
+ cpumask_t old_domain;
+ unsigned move_cleanup_count;
+ u8 move_in_progress : 1;
+};
+extern spinlock_t vector_lock;
+extern struct irq_cfg irq_cfg[NR_IRQS];
+#define irq_to_domain(x) irq_cfg[(x)].domain
+DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
+
+extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
+
+#ifdef CONFIG_PARAVIRT_GUEST
+#include <asm/paravirt.h>
+#else
+#define ia64_register_ipi ia64_native_register_ipi
+#define assign_irq_vector ia64_native_assign_irq_vector
+#define free_irq_vector ia64_native_free_irq_vector
+#define register_percpu_irq ia64_native_register_percpu_irq
+#define ia64_resend_irq ia64_native_resend_irq
+#endif
+
+extern void ia64_native_register_ipi(void);
+extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
+extern int ia64_native_assign_irq_vector (int irq); /* allocate a free vector */
+extern void ia64_native_free_irq_vector (int vector);
+extern int reserve_irq_vector (int vector);
+extern void __setup_vector_irq(int cpu);
+extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
+extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action);
+extern int check_irq_used (int irq);
+extern void destroy_and_reserve_irq (unsigned int irq);
+
+#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+extern int irq_prepare_move(int irq, int cpu);
+extern void irq_complete_move(unsigned int irq);
+#else
+static inline int irq_prepare_move(int irq, int cpu) { return 0; }
+static inline void irq_complete_move(unsigned int irq) {}
+#endif
+
+static inline void ia64_native_resend_irq(unsigned int vector)
+{
+ platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0);
+}
+
+/*
+ * Default implementations for the irq-descriptor API:
+ */
+
+extern irq_desc_t irq_desc[NR_IRQS];
+
+#ifndef CONFIG_IA64_GENERIC
+static inline ia64_vector __ia64_irq_to_vector(int irq)
+{
+ return irq_cfg[irq].vector;
+}
+
+static inline unsigned int
+__ia64_local_vector_to_irq (ia64_vector vec)
+{
+ return __get_cpu_var(vector_irq)[vec];
+}
+#endif
+
+/*
+ * Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt
+ * vectors. On smaller systems, there is a one-to-one correspondence between interrupt
+ * vectors and the Linux irq numbers. However, larger systems may have multiple interrupt
+ * domains meaning that the translation from vector number to irq number depends on the
+ * interrupt domain that a CPU belongs to. This API abstracts such platform-dependent
+ * differences and provides a uniform means to translate between vector and irq numbers
+ * and to obtain the irq descriptor for a given irq number.
+ */
+
+/* Extract the IA-64 vector that corresponds to IRQ. */
+static inline ia64_vector
+irq_to_vector (int irq)
+{
+ return platform_irq_to_vector(irq);
+}
+
+/*
+ * Convert the local IA-64 vector to the corresponding irq number. This translation is
+ * done in the context of the interrupt domain that the currently executing CPU belongs
+ * to.
+ */
+static inline unsigned int
+local_vector_to_irq (ia64_vector vec)
+{
+ return platform_local_vector_to_irq(vec);
+}
+
+#endif /* _ASM_IA64_HW_IRQ_H */
diff --git a/arch/ia64/include/asm/ia32.h b/arch/ia64/include/asm/ia32.h
new file mode 100644
index 000000000000..2390ee145aa1
--- /dev/null
+++ b/arch/ia64/include/asm/ia32.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_IA64_IA32_H
+#define _ASM_IA64_IA32_H
+
+
+#include <asm/ptrace.h>
+#include <asm/signal.h>
+
+#define IA32_NR_syscalls 285 /* length of syscall table */
+#define IA32_PAGE_SHIFT 12 /* 4KB pages */
+
+#ifndef __ASSEMBLY__
+
+# ifdef CONFIG_IA32_SUPPORT
+
+#define IA32_PAGE_OFFSET 0xc0000000
+
+extern void ia32_cpu_init (void);
+extern void ia32_mem_init (void);
+extern void ia32_gdt_init (void);
+extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
+extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
+extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
+
+# endif /* !CONFIG_IA32_SUPPORT */
+
+/* Declare this unconditionally, so we don't get warnings for unreachable code. */
+extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs);
+#if PAGE_SHIFT > IA32_PAGE_SHIFT
+extern int ia32_copy_ia64_partial_page_list(struct task_struct *,
+ unsigned long);
+extern void ia32_drop_ia64_partial_page_list(struct task_struct *);
+#else
+# define ia32_copy_ia64_partial_page_list(a1, a2) 0
+# define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0)
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_IA32_H */
diff --git a/arch/ia64/include/asm/ia64regs.h b/arch/ia64/include/asm/ia64regs.h
new file mode 100644
index 000000000000..1757f1c11ad4
--- /dev/null
+++ b/arch/ia64/include/asm/ia64regs.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2002,2003 Intel Corp.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+
+#ifndef _ASM_IA64_IA64REGS_H
+#define _ASM_IA64_IA64REGS_H
+
+/*
+ * Register Names for getreg() and setreg().
+ *
+ * The "magic" numbers happen to match the values used by the Intel compiler's
+ * getreg()/setreg() intrinsics.
+ */
+
+/* Special Registers */
+
+#define _IA64_REG_IP 1016 /* getreg only */
+#define _IA64_REG_PSR 1019
+#define _IA64_REG_PSR_L 1019
+
+/* General Integer Registers */
+
+#define _IA64_REG_GP 1025 /* R1 */
+#define _IA64_REG_R8 1032 /* R8 */
+#define _IA64_REG_R9 1033 /* R9 */
+#define _IA64_REG_SP 1036 /* R12 */
+#define _IA64_REG_TP 1037 /* R13 */
+
+/* Application Registers */
+
+#define _IA64_REG_AR_KR0 3072
+#define _IA64_REG_AR_KR1 3073
+#define _IA64_REG_AR_KR2 3074
+#define _IA64_REG_AR_KR3 3075
+#define _IA64_REG_AR_KR4 3076
+#define _IA64_REG_AR_KR5 3077
+#define _IA64_REG_AR_KR6 3078
+#define _IA64_REG_AR_KR7 3079
+#define _IA64_REG_AR_RSC 3088
+#define _IA64_REG_AR_BSP 3089
+#define _IA64_REG_AR_BSPSTORE 3090
+#define _IA64_REG_AR_RNAT 3091
+#define _IA64_REG_AR_FCR 3093
+#define _IA64_REG_AR_EFLAG 3096
+#define _IA64_REG_AR_CSD 3097
+#define _IA64_REG_AR_SSD 3098
+#define _IA64_REG_AR_CFLAG 3099
+#define _IA64_REG_AR_FSR 3100
+#define _IA64_REG_AR_FIR 3101
+#define _IA64_REG_AR_FDR 3102
+#define _IA64_REG_AR_CCV 3104
+#define _IA64_REG_AR_UNAT 3108
+#define _IA64_REG_AR_FPSR 3112
+#define _IA64_REG_AR_ITC 3116
+#define _IA64_REG_AR_PFS 3136
+#define _IA64_REG_AR_LC 3137
+#define _IA64_REG_AR_EC 3138
+
+/* Control Registers */
+
+#define _IA64_REG_CR_DCR 4096
+#define _IA64_REG_CR_ITM 4097
+#define _IA64_REG_CR_IVA 4098
+#define _IA64_REG_CR_PTA 4104
+#define _IA64_REG_CR_IPSR 4112
+#define _IA64_REG_CR_ISR 4113
+#define _IA64_REG_CR_IIP 4115
+#define _IA64_REG_CR_IFA 4116
+#define _IA64_REG_CR_ITIR 4117
+#define _IA64_REG_CR_IIPA 4118
+#define _IA64_REG_CR_IFS 4119
+#define _IA64_REG_CR_IIM 4120
+#define _IA64_REG_CR_IHA 4121
+#define _IA64_REG_CR_LID 4160
+#define _IA64_REG_CR_IVR 4161 /* getreg only */
+#define _IA64_REG_CR_TPR 4162
+#define _IA64_REG_CR_EOI 4163
+#define _IA64_REG_CR_IRR0 4164 /* getreg only */
+#define _IA64_REG_CR_IRR1 4165 /* getreg only */
+#define _IA64_REG_CR_IRR2 4166 /* getreg only */
+#define _IA64_REG_CR_IRR3 4167 /* getreg only */
+#define _IA64_REG_CR_ITV 4168
+#define _IA64_REG_CR_PMV 4169
+#define _IA64_REG_CR_CMCV 4170
+#define _IA64_REG_CR_LRR0 4176
+#define _IA64_REG_CR_LRR1 4177
+
+/* Indirect Registers for getindreg() and setindreg() */
+
+#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */
+#define _IA64_REG_INDR_DBR 9001
+#define _IA64_REG_INDR_IBR 9002
+#define _IA64_REG_INDR_PKR 9003
+#define _IA64_REG_INDR_PMC 9004
+#define _IA64_REG_INDR_PMD 9005
+#define _IA64_REG_INDR_RR 9006
+
+#endif /* _ASM_IA64_IA64REGS_H */
diff --git a/arch/ia64/include/asm/intel_intrin.h b/arch/ia64/include/asm/intel_intrin.h
new file mode 100644
index 000000000000..53cec577558a
--- /dev/null
+++ b/arch/ia64/include/asm/intel_intrin.h
@@ -0,0 +1,161 @@
+#ifndef _ASM_IA64_INTEL_INTRIN_H
+#define _ASM_IA64_INTEL_INTRIN_H
+/*
+ * Intel Compiler Intrinsics
+ *
+ * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
+ * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com>
+ *
+ */
+#include <ia64intrin.h>
+
+#define ia64_barrier() __memory_barrier()
+
+#define ia64_stop() /* Nothing: As of now stop bit is generated for each
+ * intrinsic
+ */
+
+#define ia64_native_getreg __getReg
+#define ia64_native_setreg __setReg
+
+#define ia64_hint __hint
+#define ia64_hint_pause __hint_pause
+
+#define ia64_mux1_brcst _m64_mux1_brcst
+#define ia64_mux1_mix _m64_mux1_mix
+#define ia64_mux1_shuf _m64_mux1_shuf
+#define ia64_mux1_alt _m64_mux1_alt
+#define ia64_mux1_rev _m64_mux1_rev
+
+#define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v)))
+#define ia64_popcnt _m64_popcnt
+#define ia64_getf_exp __getf_exp
+#define ia64_shrp _m64_shrp
+
+#define ia64_tpa __tpa
+#define ia64_invala __invala
+#define ia64_invala_gr __invala_gr
+#define ia64_invala_fr __invala_fr
+#define ia64_nop __nop
+#define ia64_sum __sum
+#define ia64_native_ssm __ssm
+#define ia64_rum __rum
+#define ia64_native_rsm __rsm
+#define ia64_native_fc __fc
+
+#define ia64_ldfs __ldfs
+#define ia64_ldfd __ldfd
+#define ia64_ldfe __ldfe
+#define ia64_ldf8 __ldf8
+#define ia64_ldf_fill __ldf_fill
+
+#define ia64_stfs __stfs
+#define ia64_stfd __stfd
+#define ia64_stfe __stfe
+#define ia64_stf8 __stf8
+#define ia64_stf_spill __stf_spill
+
+#define ia64_mf __mf
+#define ia64_mfa __mfa
+
+#define ia64_fetchadd4_acq __fetchadd4_acq
+#define ia64_fetchadd4_rel __fetchadd4_rel
+#define ia64_fetchadd8_acq __fetchadd8_acq
+#define ia64_fetchadd8_rel __fetchadd8_rel
+
+#define ia64_xchg1 _InterlockedExchange8
+#define ia64_xchg2 _InterlockedExchange16
+#define ia64_xchg4 _InterlockedExchange
+#define ia64_xchg8 _InterlockedExchange64
+
+#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
+#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
+#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
+#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
+#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
+#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
+#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
+#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
+
+#define __ia64_set_dbr(index, val) \
+ __setIndReg(_IA64_REG_INDR_DBR, index, val)
+#define ia64_set_ibr(index, val) \
+ __setIndReg(_IA64_REG_INDR_IBR, index, val)
+#define ia64_set_pkr(index, val) \
+ __setIndReg(_IA64_REG_INDR_PKR, index, val)
+#define ia64_set_pmc(index, val) \
+ __setIndReg(_IA64_REG_INDR_PMC, index, val)
+#define ia64_set_pmd(index, val) \
+ __setIndReg(_IA64_REG_INDR_PMD, index, val)
+#define ia64_native_set_rr(index, val) \
+ __setIndReg(_IA64_REG_INDR_RR, index, val)
+
+#define ia64_native_get_cpuid(index) \
+ __getIndReg(_IA64_REG_INDR_CPUID, index)
+#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
+#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
+#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
+#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
+#define ia64_native_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
+#define ia64_native_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
+
+#define ia64_srlz_d __dsrlz
+#define ia64_srlz_i __isrlz
+
+#define ia64_dv_serialize_data()
+#define ia64_dv_serialize_instruction()
+
+#define ia64_st1_rel __st1_rel
+#define ia64_st2_rel __st2_rel
+#define ia64_st4_rel __st4_rel
+#define ia64_st8_rel __st8_rel
+
+/* FIXME: need st4.rel.nta intrinsic */
+#define ia64_st4_rel_nta __st4_rel
+
+#define ia64_ld1_acq __ld1_acq
+#define ia64_ld2_acq __ld2_acq
+#define ia64_ld4_acq __ld4_acq
+#define ia64_ld8_acq __ld8_acq
+
+#define ia64_sync_i __synci
+#define ia64_native_thash __thash
+#define ia64_native_ttag __ttag
+#define ia64_itcd __itcd
+#define ia64_itci __itci
+#define ia64_itrd __itrd
+#define ia64_itri __itri
+#define ia64_ptce __ptce
+#define ia64_ptcl __ptcl
+#define ia64_native_ptcg __ptcg
+#define ia64_native_ptcga __ptcga
+#define ia64_ptri __ptri
+#define ia64_ptrd __ptrd
+#define ia64_dep_mi _m64_dep_mi
+
+/* Values for lfhint in __lfetch and __lfetch_fault */
+
+#define ia64_lfhint_none __lfhint_none
+#define ia64_lfhint_nt1 __lfhint_nt1
+#define ia64_lfhint_nt2 __lfhint_nt2
+#define ia64_lfhint_nta __lfhint_nta
+
+#define ia64_lfetch __lfetch
+#define ia64_lfetch_excl __lfetch_excl
+#define ia64_lfetch_fault __lfetch_fault
+#define ia64_lfetch_fault_excl __lfetch_fault_excl
+
+#define ia64_native_intrin_local_irq_restore(x) \
+do { \
+ if ((x) != 0) { \
+ ia64_native_ssm(IA64_PSR_I); \
+ ia64_srlz_d(); \
+ } else { \
+ ia64_native_rsm(IA64_PSR_I); \
+ } \
+} while (0)
+
+#define __builtin_trap() __break(0);
+
+#endif /* _ASM_IA64_INTEL_INTRIN_H */
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
new file mode 100644
index 000000000000..47d686dba1eb
--- /dev/null
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -0,0 +1,241 @@
+#ifndef _ASM_IA64_INTRINSICS_H
+#define _ASM_IA64_INTRINSICS_H
+
+/*
+ * Compiler-dependent intrinsics.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifndef __ASSEMBLY__
+
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+
+#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
+
+#define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \
+do { \
+ ia64_native_set_rr(0x0000000000000000UL, (val0)); \
+ ia64_native_set_rr(0x2000000000000000UL, (val1)); \
+ ia64_native_set_rr(0x4000000000000000UL, (val2)); \
+ ia64_native_set_rr(0x6000000000000000UL, (val3)); \
+ ia64_native_set_rr(0x8000000000000000UL, (val4)); \
+} while (0)
+
+/*
+ * Force an unresolved reference if someone tries to use
+ * ia64_fetch_and_add() with a bad value.
+ */
+extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
+extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
+
+#define IA64_FETCHADD(tmp,v,n,sz,sem) \
+({ \
+ switch (sz) { \
+ case 4: \
+ tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \
+ break; \
+ \
+ case 8: \
+ tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \
+ break; \
+ \
+ default: \
+ __bad_size_for_ia64_fetch_and_add(); \
+ } \
+})
+
+#define ia64_fetchadd(i,v,sem) \
+({ \
+ __u64 _tmp; \
+ volatile __typeof__(*(v)) *_v = (v); \
+ /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
+ if ((i) == -16) \
+ IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
+ else if ((i) == -8) \
+ IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
+ else if ((i) == -4) \
+ IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
+ else if ((i) == -1) \
+ IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
+ else if ((i) == 1) \
+ IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
+ else if ((i) == 4) \
+ IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
+ else if ((i) == 8) \
+ IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
+ else if ((i) == 16) \
+ IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
+ else \
+ _tmp = __bad_increment_for_ia64_fetch_and_add(); \
+ (__typeof__(*(v))) (_tmp); /* return old value */ \
+})
+
+#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalid xchg().
+ */
+extern void ia64_xchg_called_with_bad_pointer (void);
+
+#define __xchg(x,ptr,size) \
+({ \
+ unsigned long __xchg_result; \
+ \
+ switch (size) { \
+ case 1: \
+ __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
+ break; \
+ \
+ case 2: \
+ __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
+ break; \
+ \
+ case 4: \
+ __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
+ break; \
+ \
+ case 8: \
+ __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
+ break; \
+ default: \
+ ia64_xchg_called_with_bad_pointer(); \
+ } \
+ __xchg_result; \
+})
+
+#define xchg(ptr,x) \
+ ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern long ia64_cmpxchg_called_with_bad_pointer (void);
+
+#define ia64_cmpxchg(sem,ptr,old,new,size) \
+({ \
+ __u64 _o_, _r_; \
+ \
+ switch (size) { \
+ case 1: _o_ = (__u8 ) (long) (old); break; \
+ case 2: _o_ = (__u16) (long) (old); break; \
+ case 4: _o_ = (__u32) (long) (old); break; \
+ case 8: _o_ = (__u64) (long) (old); break; \
+ default: break; \
+ } \
+ switch (size) { \
+ case 1: \
+ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
+ break; \
+ \
+ case 2: \
+ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
+ break; \
+ \
+ case 4: \
+ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
+ break; \
+ \
+ case 8: \
+ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
+ break; \
+ \
+ default: \
+ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
+ break; \
+ } \
+ (__typeof__(old)) _r_; \
+})
+
+#define cmpxchg_acq(ptr, o, n) \
+ ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_rel(ptr, o, n) \
+ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
+
+/* for compatibility with other platforms: */
+#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define cmpxchg_local cmpxchg
+#define cmpxchg64_local cmpxchg64
+
+#ifdef CONFIG_IA64_DEBUG_CMPXCHG
+# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
+# define CMPXCHG_BUGCHECK(v) \
+ do { \
+ if (_cmpxchg_bugcheck_count-- <= 0) { \
+ void *ip; \
+ extern int printk(const char *fmt, ...); \
+ ip = (void *) ia64_getreg(_IA64_REG_IP); \
+ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
+ break; \
+ } \
+ } while (0)
+#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
+# define CMPXCHG_BUGCHECK_DECL
+# define CMPXCHG_BUGCHECK(v)
+#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+#endif
+
+#ifdef __KERNEL__
+#include <asm/paravirt_privop.h>
+#endif
+
+#ifndef __ASSEMBLY__
+#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__)
+#define IA64_INTRINSIC_API(name) pv_cpu_ops.name
+#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
+#else
+#define IA64_INTRINSIC_API(name) ia64_native_ ## name
+#define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name
+#endif
+
+/************************************************/
+/* Instructions paravirtualized for correctness */
+/************************************************/
+/* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */
+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
+ * is not currently used (though it may be in a long-format VHPT system!)
+ */
+#define ia64_fc IA64_INTRINSIC_API(fc)
+#define ia64_thash IA64_INTRINSIC_API(thash)
+#define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid)
+#define ia64_get_pmd IA64_INTRINSIC_API(get_pmd)
+
+
+/************************************************/
+/* Instructions paravirtualized for performance */
+/************************************************/
+#define ia64_ssm IA64_INTRINSIC_MACRO(ssm)
+#define ia64_rsm IA64_INTRINSIC_MACRO(rsm)
+#define ia64_getreg IA64_INTRINSIC_API(getreg)
+#define ia64_setreg IA64_INTRINSIC_API(setreg)
+#define ia64_set_rr IA64_INTRINSIC_API(set_rr)
+#define ia64_get_rr IA64_INTRINSIC_API(get_rr)
+#define ia64_ptcga IA64_INTRINSIC_API(ptcga)
+#define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i)
+#define ia64_intrin_local_irq_restore \
+ IA64_INTRINSIC_API(intrin_local_irq_restore)
+#define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_INTRINSICS_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
new file mode 100644
index 000000000000..260a85ac9d6a
--- /dev/null
+++ b/arch/ia64/include/asm/io.h
@@ -0,0 +1,459 @@
+#ifndef _ASM_IA64_IO_H
+#define _ASM_IA64_IO_H
+
+/*
+ * This file contains the definitions for the emulated IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated to
+ * (a) handle it all in a way that makes gcc able to optimize it as
+ * well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+/* We don't use IO slowdowns on the ia64, but.. */
+#define __SLOW_DOWN_IO do { } while (0)
+#define SLOW_DOWN_IO do { } while (0)
+
+#define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
+
+/*
+ * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
+ * large machines may have multiple other I/O spaces so we can't place any a priori limit
+ * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
+ */
+#define IO_SPACE_LIMIT 0xffffffffffffffffUL
+
+#define MAX_IO_SPACES_BITS 8
+#define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
+#define IO_SPACE_BITS 24
+#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
+
+#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
+#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
+#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
+
+#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
+
+struct io_space {
+ unsigned long mmio_base; /* base in MMIO space */
+ int sparse;
+};
+
+extern struct io_space io_space[];
+extern unsigned int num_io_spaces;
+
+# ifdef __KERNEL__
+
+/*
+ * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
+ * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
+ * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
+ *
+ * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
+ * code that uses bare port numbers without the prerequisite pci_iomap().
+ */
+#define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
+#define PIO_MASK (PIO_OFFSET - 1)
+#define PIO_RESERVED __IA64_UNCACHED_OFFSET
+#define HAVE_ARCH_PIO_SIZE
+
+#include <asm/intrinsics.h>
+#include <asm/machvec.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm-generic/iomap.h>
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+static inline unsigned long
+virt_to_phys (volatile void *address)
+{
+ return (unsigned long) address - PAGE_OFFSET;
+}
+
+static inline void*
+phys_to_virt (unsigned long address)
+{
+ return (void *) (address + PAGE_OFFSET);
+}
+
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
+extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */
+extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
+
+/*
+ * The following two macros are deprecated and scheduled for removal.
+ * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
+ */
+#define bus_to_virt phys_to_virt
+#define virt_to_bus virt_to_phys
+#define page_to_bus page_to_phys
+
+# endif /* KERNEL */
+
+/*
+ * Memory fence w/accept. This should never be used in code that is
+ * not IA-64 specific.
+ */
+#define __ia64_mf_a() ia64_mfa()
+
+/**
+ * ___ia64_mmiowb - I/O write barrier
+ *
+ * Ensure ordering of I/O space writes. This will make sure that writes
+ * following the barrier will arrive after all previous writes. For most
+ * ia64 platforms, this is a simple 'mf.a' instruction.
+ *
+ * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ */
+static inline void ___ia64_mmiowb(void)
+{
+ ia64_mfa();
+}
+
+static inline void*
+__ia64_mk_io_addr (unsigned long port)
+{
+ struct io_space *space;
+ unsigned long offset;
+
+ space = &io_space[IO_SPACE_NR(port)];
+ port = IO_SPACE_PORT(port);
+ if (space->sparse)
+ offset = IO_SPACE_SPARSE_ENCODING(port);
+ else
+ offset = port;
+
+ return (void *) (space->mmio_base | offset);
+}
+
+#define __ia64_inb ___ia64_inb
+#define __ia64_inw ___ia64_inw
+#define __ia64_inl ___ia64_inl
+#define __ia64_outb ___ia64_outb
+#define __ia64_outw ___ia64_outw
+#define __ia64_outl ___ia64_outl
+#define __ia64_readb ___ia64_readb
+#define __ia64_readw ___ia64_readw
+#define __ia64_readl ___ia64_readl
+#define __ia64_readq ___ia64_readq
+#define __ia64_readb_relaxed ___ia64_readb
+#define __ia64_readw_relaxed ___ia64_readw
+#define __ia64_readl_relaxed ___ia64_readl
+#define __ia64_readq_relaxed ___ia64_readq
+#define __ia64_writeb ___ia64_writeb
+#define __ia64_writew ___ia64_writew
+#define __ia64_writel ___ia64_writel
+#define __ia64_writeq ___ia64_writeq
+#define __ia64_mmiowb ___ia64_mmiowb
+
+/*
+ * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
+ * that the access has completed before executing other I/O accesses. Since we're doing
+ * the accesses through an uncachable (UC) translation, the CPU will execute them in
+ * program order. However, we still need to tell the compiler not to shuffle them around
+ * during optimization, which is why we use "volatile" pointers.
+ */
+
+static inline unsigned int
+___ia64_inb (unsigned long port)
+{
+ volatile unsigned char *addr = __ia64_mk_io_addr(port);
+ unsigned char ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+static inline unsigned int
+___ia64_inw (unsigned long port)
+{
+ volatile unsigned short *addr = __ia64_mk_io_addr(port);
+ unsigned short ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+static inline unsigned int
+___ia64_inl (unsigned long port)
+{
+ volatile unsigned int *addr = __ia64_mk_io_addr(port);
+ unsigned int ret;
+
+ ret = *addr;
+ __ia64_mf_a();
+ return ret;
+}
+
+static inline void
+___ia64_outb (unsigned char val, unsigned long port)
+{
+ volatile unsigned char *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+static inline void
+___ia64_outw (unsigned short val, unsigned long port)
+{
+ volatile unsigned short *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+static inline void
+___ia64_outl (unsigned int val, unsigned long port)
+{
+ volatile unsigned int *addr = __ia64_mk_io_addr(port);
+
+ *addr = val;
+ __ia64_mf_a();
+}
+
+static inline void
+__insb (unsigned long port, void *dst, unsigned long count)
+{
+ unsigned char *dp = dst;
+
+ while (count--)
+ *dp++ = platform_inb(port);
+}
+
+static inline void
+__insw (unsigned long port, void *dst, unsigned long count)
+{
+ unsigned short *dp = dst;
+
+ while (count--)
+ *dp++ = platform_inw(port);
+}
+
+static inline void
+__insl (unsigned long port, void *dst, unsigned long count)
+{
+ unsigned int *dp = dst;
+
+ while (count--)
+ *dp++ = platform_inl(port);
+}
+
+static inline void
+__outsb (unsigned long port, const void *src, unsigned long count)
+{
+ const unsigned char *sp = src;
+
+ while (count--)
+ platform_outb(*sp++, port);
+}
+
+static inline void
+__outsw (unsigned long port, const void *src, unsigned long count)
+{
+ const unsigned short *sp = src;
+
+ while (count--)
+ platform_outw(*sp++, port);
+}
+
+static inline void
+__outsl (unsigned long port, const void *src, unsigned long count)
+{
+ const unsigned int *sp = src;
+
+ while (count--)
+ platform_outl(*sp++, port);
+}
+
+/*
+ * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
+ * specification regarding legacy I/O support. Thus, we have to make these operations
+ * platform dependent...
+ */
+#define __inb platform_inb
+#define __inw platform_inw
+#define __inl platform_inl
+#define __outb platform_outb
+#define __outw platform_outw
+#define __outl platform_outl
+#define __mmiowb platform_mmiowb
+
+#define inb(p) __inb(p)
+#define inw(p) __inw(p)
+#define inl(p) __inl(p)
+#define insb(p,d,c) __insb(p,d,c)
+#define insw(p,d,c) __insw(p,d,c)
+#define insl(p,d,c) __insl(p,d,c)
+#define outb(v,p) __outb(v,p)
+#define outw(v,p) __outw(v,p)
+#define outl(v,p) __outl(v,p)
+#define outsb(p,s,c) __outsb(p,s,c)
+#define outsw(p,s,c) __outsw(p,s,c)
+#define outsl(p,s,c) __outsl(p,s,c)
+#define mmiowb() __mmiowb()
+
+/*
+ * The address passed to these functions are ioremap()ped already.
+ *
+ * We need these to be machine vectors since some platforms don't provide
+ * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
+ * a good idea). Writes are ok though for all existing ia64 platforms (and
+ * hopefully it'll stay that way).
+ */
+static inline unsigned char
+___ia64_readb (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short
+___ia64_readw (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int
+___ia64_readl (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *) addr;
+}
+
+static inline unsigned long
+___ia64_readq (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned long __force *) addr;
+}
+
+static inline void
+__writeb (unsigned char val, volatile void __iomem *addr)
+{
+ *(volatile unsigned char __force *) addr = val;
+}
+
+static inline void
+__writew (unsigned short val, volatile void __iomem *addr)
+{
+ *(volatile unsigned short __force *) addr = val;
+}
+
+static inline void
+__writel (unsigned int val, volatile void __iomem *addr)
+{
+ *(volatile unsigned int __force *) addr = val;
+}
+
+static inline void
+__writeq (unsigned long val, volatile void __iomem *addr)
+{
+ *(volatile unsigned long __force *) addr = val;
+}
+
+#define __readb platform_readb
+#define __readw platform_readw
+#define __readl platform_readl
+#define __readq platform_readq
+#define __readb_relaxed platform_readb_relaxed
+#define __readw_relaxed platform_readw_relaxed
+#define __readl_relaxed platform_readl_relaxed
+#define __readq_relaxed platform_readq_relaxed
+
+#define readb(a) __readb((a))
+#define readw(a) __readw((a))
+#define readl(a) __readl((a))
+#define readq(a) __readq((a))
+#define readb_relaxed(a) __readb_relaxed((a))
+#define readw_relaxed(a) __readw_relaxed((a))
+#define readl_relaxed(a) __readl_relaxed((a))
+#define readq_relaxed(a) __readq_relaxed((a))
+#define __raw_readb readb
+#define __raw_readw readw
+#define __raw_readl readl
+#define __raw_readq readq
+#define __raw_readb_relaxed readb_relaxed
+#define __raw_readw_relaxed readw_relaxed
+#define __raw_readl_relaxed readl_relaxed
+#define __raw_readq_relaxed readq_relaxed
+#define writeb(v,a) __writeb((v), (a))
+#define writew(v,a) __writew((v), (a))
+#define writel(v,a) __writel((v), (a))
+#define writeq(v,a) __writeq((v), (a))
+#define __raw_writeb writeb
+#define __raw_writew writew
+#define __raw_writel writel
+#define __raw_writeq writeq
+
+#ifndef inb_p
+# define inb_p inb
+#endif
+#ifndef inw_p
+# define inw_p inw
+#endif
+#ifndef inl_p
+# define inl_p inl
+#endif
+
+#ifndef outb_p
+# define outb_p outb
+#endif
+#ifndef outw_p
+# define outw_p outw
+#endif
+#ifndef outl_p
+# define outl_p outl
+#endif
+
+# ifdef __KERNEL__
+
+extern void __iomem * ioremap(unsigned long offset, unsigned long size);
+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
+extern void iounmap (volatile void __iomem *addr);
+
+/*
+ * String version of IO memory access ops:
+ */
+extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
+extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
+extern void memset_io(volatile void __iomem *s, int c, long n);
+
+# endif /* __KERNEL__ */
+
+/*
+ * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
+ * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
+ * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
+ * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
+ * over BIO-level virtual merging.
+ */
+extern unsigned long ia64_max_iommu_merge_mask;
+#if 1
+#define BIO_VMERGE_BOUNDARY 0
+#else
+/*
+ * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
+ * replaced by dma_merge_mask() or something of that sort. Note: the only way
+ * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
+ * expanded into:
+ *
+ * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
+ *
+ * which is precisely what we want.
+ */
+#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
+#endif
+
+#endif /* _ASM_IA64_IO_H */
diff --git a/arch/ia64/include/asm/ioctl.h b/arch/ia64/include/asm/ioctl.h
new file mode 100644
index 000000000000..b279fe06dfe5
--- /dev/null
+++ b/arch/ia64/include/asm/ioctl.h
@@ -0,0 +1 @@
+#include <asm-generic/ioctl.h>
diff --git a/arch/ia64/include/asm/ioctls.h b/arch/ia64/include/asm/ioctls.h
new file mode 100644
index 000000000000..f41b636a0bf6
--- /dev/null
+++ b/arch/ia64/include/asm/ioctls.h
@@ -0,0 +1,93 @@
+#ifndef _ASM_IA64_IOCTLS_H
+#define _ASM_IA64_IOCTLS_H
+
+/*
+ * Based on <asm-i386/ioctls.h>
+ *
+ * Modified 1998, 1999, 2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm/ioctl.h>
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+#define TCGETA 0x5405
+#define TCSETA 0x5406
+#define TCSETAW 0x5407
+#define TCSETAF 0x5408
+#define TCSBRK 0x5409
+#define TCXONC 0x540A
+#define TCFLSH 0x540B
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP 0x540F
+#define TIOCSPGRP 0x5410
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TCGETS2 _IOR('T',0x2A, struct termios2)
+#define TCSETS2 _IOW('T',0x2B, struct termios2)
+#define TCSETSW2 _IOW('T',0x2C, struct termios2)
+#define TCSETSF2 _IOW('T',0x2D, struct termios2)
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
+#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
+#define FIOQSIZE 0x5460
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif /* _ASM_IA64_IOCTLS_H */
diff --git a/arch/ia64/include/asm/iosapic.h b/arch/ia64/include/asm/iosapic.h
new file mode 100644
index 000000000000..b9c102e15f22
--- /dev/null
+++ b/arch/ia64/include/asm/iosapic.h
@@ -0,0 +1,126 @@
+#ifndef __ASM_IA64_IOSAPIC_H
+#define __ASM_IA64_IOSAPIC_H
+
+#define IOSAPIC_REG_SELECT 0x0
+#define IOSAPIC_WINDOW 0x10
+#define IOSAPIC_EOI 0x40
+
+#define IOSAPIC_VERSION 0x1
+
+/*
+ * Redirection table entry
+ */
+#define IOSAPIC_RTE_LOW(i) (0x10+i*2)
+#define IOSAPIC_RTE_HIGH(i) (0x11+i*2)
+
+#define IOSAPIC_DEST_SHIFT 16
+
+/*
+ * Delivery mode
+ */
+#define IOSAPIC_DELIVERY_SHIFT 8
+#define IOSAPIC_FIXED 0x0
+#define IOSAPIC_LOWEST_PRIORITY 0x1
+#define IOSAPIC_PMI 0x2
+#define IOSAPIC_NMI 0x4
+#define IOSAPIC_INIT 0x5
+#define IOSAPIC_EXTINT 0x7
+
+/*
+ * Interrupt polarity
+ */
+#define IOSAPIC_POLARITY_SHIFT 13
+#define IOSAPIC_POL_HIGH 0
+#define IOSAPIC_POL_LOW 1
+
+/*
+ * Trigger mode
+ */
+#define IOSAPIC_TRIGGER_SHIFT 15
+#define IOSAPIC_EDGE 0
+#define IOSAPIC_LEVEL 1
+
+/*
+ * Mask bit
+ */
+
+#define IOSAPIC_MASK_SHIFT 16
+#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
+
+#define IOSAPIC_VECTOR_MASK 0xffffff00
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_IOSAPIC
+
+#define NR_IOSAPICS 256
+
+#ifdef CONFIG_PARAVIRT_GUEST
+#include <asm/paravirt.h>
+#else
+#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
+#define __iosapic_read __ia64_native_iosapic_read
+#define __iosapic_write __ia64_native_iosapic_write
+#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
+#endif
+
+extern void __init ia64_native_iosapic_pcat_compat_init(void);
+extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
+
+static inline unsigned int
+__ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
+{
+ writel(reg, iosapic + IOSAPIC_REG_SELECT);
+ return readl(iosapic + IOSAPIC_WINDOW);
+}
+
+static inline void
+__ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
+{
+ writel(reg, iosapic + IOSAPIC_REG_SELECT);
+ writel(val, iosapic + IOSAPIC_WINDOW);
+}
+
+static inline void iosapic_eoi(char __iomem *iosapic, u32 vector)
+{
+ writel(vector, iosapic + IOSAPIC_EOI);
+}
+
+extern void __init iosapic_system_init (int pcat_compat);
+extern int __devinit iosapic_init (unsigned long address,
+ unsigned int gsi_base);
+#ifdef CONFIG_HOTPLUG
+extern int iosapic_remove (unsigned int gsi_base);
+#else
+#define iosapic_remove(gsi_base) (-EINVAL)
+#endif /* CONFIG_HOTPLUG */
+extern int gsi_to_irq (unsigned int gsi);
+extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
+ unsigned long trigger);
+extern void iosapic_unregister_intr (unsigned int irq);
+extern void __devinit iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
+ unsigned long polarity,
+ unsigned long trigger);
+extern int __init iosapic_register_platform_intr (u32 int_type,
+ unsigned int gsi,
+ int pmi_vector,
+ u16 eid, u16 id,
+ unsigned long polarity,
+ unsigned long trigger);
+
+#ifdef CONFIG_NUMA
+extern void __devinit map_iosapic_to_node (unsigned int, int);
+#endif
+#else
+#define iosapic_system_init(pcat_compat) do { } while (0)
+#define iosapic_init(address,gsi_base) (-EINVAL)
+#define iosapic_remove(gsi_base) (-ENODEV)
+#define iosapic_register_intr(gsi,polarity,trigger) (gsi)
+#define iosapic_unregister_intr(irq) do { } while (0)
+#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0)
+#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \
+ polarity,trigger) (gsi)
+#endif
+
+# endif /* !__ASSEMBLY__ */
+#endif /* __ASM_IA64_IOSAPIC_H */
diff --git a/arch/ia64/include/asm/ipcbuf.h b/arch/ia64/include/asm/ipcbuf.h
new file mode 100644
index 000000000000..079899ae7d32
--- /dev/null
+++ b/arch/ia64/include/asm/ipcbuf.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_IA64_IPCBUF_H
+#define _ASM_IA64_IPCBUF_H
+
+/*
+ * The ipc64_perm structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 32-bit seq
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ unsigned short seq;
+ unsigned short __pad1;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _ASM_IA64_IPCBUF_H */
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
new file mode 100644
index 000000000000..3627116fb0e2
--- /dev/null
+++ b/arch/ia64/include/asm/irq.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_IA64_IRQ_H
+#define _ASM_IA64_IRQ_H
+
+/*
+ * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 11/24/98 S.Eranian updated TIMER_IRQ and irq_canonicalize
+ * 01/20/99 S.Eranian added keyboard interrupt
+ * 02/29/00 D.Mosberger moved most things into hw_irq.h
+ */
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <asm-ia64/nr-irqs.h>
+
+static __inline__ int
+irq_canonicalize (int irq)
+{
+ /*
+ * We do the legacy thing here of pretending that irqs < 16
+ * are 8259 irqs. This really shouldn't be necessary at all,
+ * but we keep it here as serial.c still uses it...
+ */
+ return ((irq == 2) ? 9 : irq);
+}
+
+extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
+bool is_affinity_mask_valid(cpumask_t cpumask);
+
+#define is_affinity_mask_valid is_affinity_mask_valid
+
+#endif /* _ASM_IA64_IRQ_H */
diff --git a/arch/ia64/include/asm/irq_regs.h b/arch/ia64/include/asm/irq_regs.h
new file mode 100644
index 000000000000..3dd9c0b70270
--- /dev/null
+++ b/arch/ia64/include/asm/irq_regs.h
@@ -0,0 +1 @@
+#include <asm-generic/irq_regs.h>
diff --git a/arch/ia64/include/asm/kdebug.h b/arch/ia64/include/asm/kdebug.h
new file mode 100644
index 000000000000..d11a69855036
--- /dev/null
+++ b/arch/ia64/include/asm/kdebug.h
@@ -0,0 +1,57 @@
+#ifndef _IA64_KDEBUG_H
+#define _IA64_KDEBUG_H 1
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> adopted from
+ * include/asm-x86_64/kdebug.h
+ *
+ * 2005-Oct Keith Owens <kaos@sgi.com>. Expand notify_die to cover more
+ * events.
+ */
+
+enum die_val {
+ DIE_BREAK = 1,
+ DIE_FAULT,
+ DIE_OOPS,
+ DIE_MACHINE_HALT,
+ DIE_MACHINE_RESTART,
+ DIE_MCA_MONARCH_ENTER,
+ DIE_MCA_MONARCH_PROCESS,
+ DIE_MCA_MONARCH_LEAVE,
+ DIE_MCA_SLAVE_ENTER,
+ DIE_MCA_SLAVE_PROCESS,
+ DIE_MCA_SLAVE_LEAVE,
+ DIE_MCA_RENDZVOUS_ENTER,
+ DIE_MCA_RENDZVOUS_PROCESS,
+ DIE_MCA_RENDZVOUS_LEAVE,
+ DIE_MCA_NEW_TIMEOUT,
+ DIE_INIT_ENTER,
+ DIE_INIT_MONARCH_ENTER,
+ DIE_INIT_MONARCH_PROCESS,
+ DIE_INIT_MONARCH_LEAVE,
+ DIE_INIT_SLAVE_ENTER,
+ DIE_INIT_SLAVE_PROCESS,
+ DIE_INIT_SLAVE_LEAVE,
+ DIE_KDEBUG_ENTER,
+ DIE_KDEBUG_LEAVE,
+ DIE_KDUMP_ENTER,
+ DIE_KDUMP_LEAVE,
+};
+
+#endif
diff --git a/arch/ia64/include/asm/kexec.h b/arch/ia64/include/asm/kexec.h
new file mode 100644
index 000000000000..541be835fc5a
--- /dev/null
+++ b/arch/ia64/include/asm/kexec.h
@@ -0,0 +1,44 @@
+#ifndef _ASM_IA64_KEXEC_H
+#define _ASM_IA64_KEXEC_H
+
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+
+#define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096)
+
+/* The native architecture */
+#define KEXEC_ARCH KEXEC_ARCH_IA_64
+
+#define kexec_flush_icache_page(page) do { \
+ unsigned long page_addr = (unsigned long)page_address(page); \
+ flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
+ } while(0)
+
+extern struct kimage *ia64_kimage;
+extern const unsigned int relocate_new_kernel_size;
+extern void relocate_new_kernel(unsigned long, unsigned long,
+ struct ia64_boot_param *, unsigned long);
+static inline void
+crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
+{
+}
+extern struct resource efi_memmap_res;
+extern struct resource boot_param_res;
+extern void kdump_smp_send_stop(void);
+extern void kdump_smp_send_init(void);
+extern void kexec_disable_iosapic(void);
+extern void crash_save_this_cpu(void);
+struct rsvd_region;
+extern unsigned long kdump_find_rsvd_region(unsigned long size,
+ struct rsvd_region *rsvd_regions, int n);
+extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg);
+extern int kdump_status[];
+extern atomic_t kdump_cpu_freezed;
+extern atomic_t kdump_in_progress;
+
+#endif /* _ASM_IA64_KEXEC_H */
diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h
new file mode 100644
index 000000000000..5d1658aa2b3b
--- /dev/null
+++ b/arch/ia64/include/asm/kmap_types.h
@@ -0,0 +1,30 @@
+#ifndef _ASM_IA64_KMAP_TYPES_H
+#define _ASM_IA64_KMAP_TYPES_H
+
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+# define D(n) __KM_FENCE_##n ,
+#else
+# define D(n)
+#endif
+
+enum km_type {
+D(0) KM_BOUNCE_READ,
+D(1) KM_SKB_SUNRPC_DATA,
+D(2) KM_SKB_DATA_SOFTIRQ,
+D(3) KM_USER0,
+D(4) KM_USER1,
+D(5) KM_BIO_SRC_IRQ,
+D(6) KM_BIO_DST_IRQ,
+D(7) KM_PTE0,
+D(8) KM_PTE1,
+D(9) KM_IRQ0,
+D(10) KM_IRQ1,
+D(11) KM_SOFTIRQ0,
+D(12) KM_SOFTIRQ1,
+D(13) KM_TYPE_NR
+};
+
+#undef D
+
+#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
new file mode 100644
index 000000000000..dbf83fb28db3
--- /dev/null
+++ b/arch/ia64/include/asm/kprobes.h
@@ -0,0 +1,132 @@
+#ifndef _ASM_KPROBES_H
+#define _ASM_KPROBES_H
+/*
+ * Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> adapted from i386
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+#include <asm/break.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 2 /* last half is for kprobe-booster */
+#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
+#define NOP_M_INST (long)(1<<27)
+#define BRL_INST(i1, i2) ((long)((0xcL << 37) | /* brl */ \
+ (0x1L << 12) | /* many */ \
+ (((i1) & 1) << 36) | ((i2) << 13))) /* imm */
+
+typedef union cmp_inst {
+ struct {
+ unsigned long long qp : 6;
+ unsigned long long p1 : 6;
+ unsigned long long c : 1;
+ unsigned long long r2 : 7;
+ unsigned long long r3 : 7;
+ unsigned long long p2 : 6;
+ unsigned long long ta : 1;
+ unsigned long long x2 : 2;
+ unsigned long long tb : 1;
+ unsigned long long opcode : 4;
+ unsigned long long reserved : 23;
+ }f;
+ unsigned long long l;
+} cmp_inst_t;
+
+struct kprobe;
+
+typedef struct _bundle {
+ struct {
+ unsigned long long template : 5;
+ unsigned long long slot0 : 41;
+ unsigned long long slot1_p0 : 64-46;
+ } quad0;
+ struct {
+ unsigned long long slot1_p1 : 41 - (64-46);
+ unsigned long long slot2 : 41;
+ } quad1;
+} __attribute__((__aligned__(16))) bundle_t;
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+#define MAX_PARAM_RSE_SIZE (0x60+0x60/0x3f)
+/* per-cpu kprobe control block */
+#define ARCH_PREV_KPROBE_SZ 2
+struct kprobe_ctlblk {
+ unsigned long kprobe_status;
+ struct pt_regs jprobe_saved_regs;
+ unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
+ unsigned long *bsp;
+ unsigned long cfm;
+ atomic_t prev_kprobe_index;
+ struct prev_kprobe prev_kprobe[ARCH_PREV_KPROBE_SZ];
+};
+
+#define kretprobe_blacklist_size 0
+
+#define SLOT0_OPCODE_SHIFT (37)
+#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46))
+#define SLOT2_OPCODE_SHIFT (37)
+
+#define INDIRECT_CALL_OPCODE (1)
+#define IP_RELATIVE_CALL_OPCODE (5)
+#define IP_RELATIVE_BRANCH_OPCODE (4)
+#define IP_RELATIVE_PREDICT_OPCODE (7)
+#define LONG_BRANCH_OPCODE (0xC)
+#define LONG_CALL_OPCODE (0xD)
+#define flush_insn_slot(p) do { } while (0)
+
+typedef struct kprobe_opcode {
+ bundle_t bundle;
+} kprobe_opcode_t;
+
+struct fnptr {
+ unsigned long ip;
+ unsigned long gp;
+};
+
+/* Architecture specific copy of original instruction*/
+struct arch_specific_insn {
+ /* copy of the instruction to be emulated */
+ kprobe_opcode_t *insn;
+ #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
+ #define INST_FLAG_FIX_BRANCH_REG 2
+ #define INST_FLAG_BREAK_INST 4
+ #define INST_FLAG_BOOSTABLE 8
+ unsigned long inst_flag;
+ unsigned short target_br_reg;
+ unsigned short slot;
+};
+
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+extern void invalidate_stacked_regs(void);
+extern void flush_register_stack(void);
+extern void arch_remove_kprobe(struct kprobe *p);
+
+#endif /* _ASM_KPROBES_H */
diff --git a/arch/ia64/include/asm/kregs.h b/arch/ia64/include/asm/kregs.h
new file mode 100644
index 000000000000..aefcdfee7f23
--- /dev/null
+++ b/arch/ia64/include/asm/kregs.h
@@ -0,0 +1,165 @@
+#ifndef _ASM_IA64_KREGS_H
+#define _ASM_IA64_KREGS_H
+
+/*
+ * Copyright (C) 2001-2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * This file defines the kernel register usage convention used by Linux/ia64.
+ */
+
+/*
+ * Kernel registers:
+ */
+#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */
+#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */
+#define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */
+#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */
+#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */
+#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */
+#define IA64_KR_PT_BASE 7 /* ar.k7: page table base address (physical) */
+
+#define _IA64_KR_PASTE(x,y) x##y
+#define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n)
+#define IA64_KR(n) _IA64_KR_PREFIX(IA64_KR_##n)
+
+/*
+ * Translation registers:
+ */
+#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */
+#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
+#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
+
+#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
+#define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/
+
+/* Processor status register bits: */
+#define IA64_PSR_BE_BIT 1
+#define IA64_PSR_UP_BIT 2
+#define IA64_PSR_AC_BIT 3
+#define IA64_PSR_MFL_BIT 4
+#define IA64_PSR_MFH_BIT 5
+#define IA64_PSR_IC_BIT 13
+#define IA64_PSR_I_BIT 14
+#define IA64_PSR_PK_BIT 15
+#define IA64_PSR_DT_BIT 17
+#define IA64_PSR_DFL_BIT 18
+#define IA64_PSR_DFH_BIT 19
+#define IA64_PSR_SP_BIT 20
+#define IA64_PSR_PP_BIT 21
+#define IA64_PSR_DI_BIT 22
+#define IA64_PSR_SI_BIT 23
+#define IA64_PSR_DB_BIT 24
+#define IA64_PSR_LP_BIT 25
+#define IA64_PSR_TB_BIT 26
+#define IA64_PSR_RT_BIT 27
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL0_BIT 32
+#define IA64_PSR_CPL1_BIT 33
+#define IA64_PSR_IS_BIT 34
+#define IA64_PSR_MC_BIT 35
+#define IA64_PSR_IT_BIT 36
+#define IA64_PSR_ID_BIT 37
+#define IA64_PSR_DA_BIT 38
+#define IA64_PSR_DD_BIT 39
+#define IA64_PSR_SS_BIT 40
+#define IA64_PSR_RI_BIT 41
+#define IA64_PSR_ED_BIT 43
+#define IA64_PSR_BN_BIT 44
+#define IA64_PSR_IA_BIT 45
+
+/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
+ execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
+ execve(). */
+#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
+ IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
+ IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
+#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP)
+
+#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
+#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
+#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT)
+#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT)
+#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT)
+#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT)
+#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT)
+#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT)
+#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT)
+#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT)
+#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT)
+#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT)
+#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT)
+#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT)
+#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT)
+#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT)
+#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT)
+#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
+#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
+/* The following are not affected by save_flags()/restore_flags(): */
+#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
+#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
+#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
+#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
+#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT)
+#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT)
+#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT)
+#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT)
+#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
+#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
+#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
+#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
+
+/* User mask bits: */
+#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
+
+/* Default Control Register */
+#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */
+#define IA64_DCR_BE_BIT 1 /* big-endian default */
+#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */
+#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */
+#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */
+#define IA64_DCR_DK_BIT 10 /* defer key miss faults */
+#define IA64_DCR_DX_BIT 11 /* defer key permission faults */
+#define IA64_DCR_DR_BIT 12 /* defer access right faults */
+#define IA64_DCR_DA_BIT 13 /* defer access bit faults */
+#define IA64_DCR_DD_BIT 14 /* defer debug faults */
+
+#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT)
+#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT)
+#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT)
+#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT)
+#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT)
+#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT)
+#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT)
+#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT)
+#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT)
+#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT)
+
+/* Interrupt Status Register */
+#define IA64_ISR_X_BIT 32 /* execute access */
+#define IA64_ISR_W_BIT 33 /* write access */
+#define IA64_ISR_R_BIT 34 /* read access */
+#define IA64_ISR_NA_BIT 35 /* non-access */
+#define IA64_ISR_SP_BIT 36 /* speculative load exception */
+#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */
+#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */
+#define IA64_ISR_CODE_MASK 0xf
+
+#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT)
+#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT)
+#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT)
+#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT)
+#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT)
+#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT)
+#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT)
+
+/* ISR code field for non-access instructions */
+#define IA64_ISR_CODE_TPA 0
+#define IA64_ISR_CODE_FC 1
+#define IA64_ISR_CODE_PROBE 2
+#define IA64_ISR_CODE_TAK 3
+#define IA64_ISR_CODE_LFETCH 4
+#define IA64_ISR_CODE_PROBEF 5
+
+#endif /* _ASM_IA64_kREGS_H */
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
new file mode 100644
index 000000000000..f38472ac2267
--- /dev/null
+++ b/arch/ia64/include/asm/kvm.h
@@ -0,0 +1,211 @@
+#ifndef __ASM_IA64_KVM_H
+#define __ASM_IA64_KVM_H
+
+/*
+ * kvm structure definitions for ia64
+ *
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <asm/types.h>
+
+#include <linux/ioctl.h>
+
+/* Architectural interrupt line count. */
+#define KVM_NR_INTERRUPTS 256
+
+#define KVM_IOAPIC_NUM_PINS 48
+
+struct kvm_ioapic_state {
+ __u64 base_address;
+ __u32 ioregsel;
+ __u32 id;
+ __u32 irr;
+ __u32 pad;
+ union {
+ __u64 bits;
+ struct {
+ __u8 vector;
+ __u8 delivery_mode:3;
+ __u8 dest_mode:1;
+ __u8 delivery_status:1;
+ __u8 polarity:1;
+ __u8 remote_irr:1;
+ __u8 trig_mode:1;
+ __u8 mask:1;
+ __u8 reserve:7;
+ __u8 reserved[4];
+ __u8 dest_id;
+ } fields;
+ } redirtbl[KVM_IOAPIC_NUM_PINS];
+};
+
+#define KVM_IRQCHIP_PIC_MASTER 0
+#define KVM_IRQCHIP_PIC_SLAVE 1
+#define KVM_IRQCHIP_IOAPIC 2
+
+#define KVM_CONTEXT_SIZE 8*1024
+
+struct kvm_fpreg {
+ union {
+ unsigned long bits[2];
+ long double __dummy; /* force 16-byte alignment */
+ } u;
+};
+
+union context {
+ /* 8K size */
+ char dummy[KVM_CONTEXT_SIZE];
+ struct {
+ unsigned long psr;
+ unsigned long pr;
+ unsigned long caller_unat;
+ unsigned long pad;
+ unsigned long gr[32];
+ unsigned long ar[128];
+ unsigned long br[8];
+ unsigned long cr[128];
+ unsigned long rr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long pkr[8];
+ struct kvm_fpreg fr[128];
+ };
+};
+
+struct thash_data {
+ union {
+ struct {
+ unsigned long p : 1; /* 0 */
+ unsigned long rv1 : 1; /* 1 */
+ unsigned long ma : 3; /* 2-4 */
+ unsigned long a : 1; /* 5 */
+ unsigned long d : 1; /* 6 */
+ unsigned long pl : 2; /* 7-8 */
+ unsigned long ar : 3; /* 9-11 */
+ unsigned long ppn : 38; /* 12-49 */
+ unsigned long rv2 : 2; /* 50-51 */
+ unsigned long ed : 1; /* 52 */
+ unsigned long ig1 : 11; /* 53-63 */
+ };
+ struct {
+ unsigned long __rv1 : 53; /* 0-52 */
+ unsigned long contiguous : 1; /*53 */
+ unsigned long tc : 1; /* 54 TR or TC */
+ unsigned long cl : 1;
+ /* 55 I side or D side cache line */
+ unsigned long len : 4; /* 56-59 */
+ unsigned long io : 1; /* 60 entry is for io or not */
+ unsigned long nomap : 1;
+ /* 61 entry cann't be inserted into machine TLB.*/
+ unsigned long checked : 1;
+ /* 62 for VTLB/VHPT sanity check */
+ unsigned long invalid : 1;
+ /* 63 invalid entry */
+ };
+ unsigned long page_flags;
+ }; /* same for VHPT and TLB */
+
+ union {
+ struct {
+ unsigned long rv3 : 2;
+ unsigned long ps : 6;
+ unsigned long key : 24;
+ unsigned long rv4 : 32;
+ };
+ unsigned long itir;
+ };
+ union {
+ struct {
+ unsigned long ig2 : 12;
+ unsigned long vpn : 49;
+ unsigned long vrn : 3;
+ };
+ unsigned long ifa;
+ unsigned long vadr;
+ struct {
+ unsigned long tag : 63;
+ unsigned long ti : 1;
+ };
+ unsigned long etag;
+ };
+ union {
+ struct thash_data *next;
+ unsigned long rid;
+ unsigned long gpaddr;
+ };
+};
+
+#define NITRS 8
+#define NDTRS 8
+
+struct saved_vpd {
+ unsigned long vhpi;
+ unsigned long vgr[16];
+ unsigned long vbgr[16];
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long vpsr;
+ unsigned long vpr;
+ unsigned long vcr[128];
+};
+
+struct kvm_regs {
+ char *saved_guest;
+ char *saved_stack;
+ struct saved_vpd vpd;
+ /*Arch-regs*/
+ int mp_state;
+ unsigned long vmm_rr;
+ /* TR and TC. */
+ struct thash_data itrs[NITRS];
+ struct thash_data dtrs[NDTRS];
+ /* Bit is set if there is a tr/tc for the region. */
+ unsigned char itr_regions;
+ unsigned char dtr_regions;
+ unsigned char tc_regions;
+
+ char irq_check;
+ unsigned long saved_itc;
+ unsigned long itc_check;
+ unsigned long timer_check;
+ unsigned long timer_pending;
+ unsigned long last_itc;
+
+ unsigned long vrr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long insvc[4]; /* Interrupt in service. */
+ unsigned long xtp;
+
+ unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_saved_rr0; /* from kvm_arch */
+ unsigned long metaphysical_saved_rr4; /* from kvm_arch */
+ unsigned long fp_psr; /*used for lazy float register */
+ unsigned long saved_gp;
+ /*for phycial emulation */
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+#endif
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
new file mode 100644
index 000000000000..1efe513a9941
--- /dev/null
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -0,0 +1,527 @@
+/*
+ * kvm_host.h: used for kvm module, and hold ia64-specific sections.
+ *
+ * Copyright (C) 2007, Intel Corporation.
+ *
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#ifndef __ASM_KVM_HOST_H
+#define __ASM_KVM_HOST_H
+
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+#include <linux/kvm_types.h>
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+
+#define KVM_MAX_VCPUS 4
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* define exit reasons from vmm to kvm*/
+#define EXIT_REASON_VM_PANIC 0
+#define EXIT_REASON_MMIO_INSTRUCTION 1
+#define EXIT_REASON_PAL_CALL 2
+#define EXIT_REASON_SAL_CALL 3
+#define EXIT_REASON_SWITCH_RR6 4
+#define EXIT_REASON_VM_DESTROY 5
+#define EXIT_REASON_EXTERNAL_INTERRUPT 6
+#define EXIT_REASON_IPI 7
+#define EXIT_REASON_PTC_G 8
+
+/*Define vmm address space and vm data space.*/
+#define KVM_VMM_SIZE (16UL<<20)
+#define KVM_VMM_SHIFT 24
+#define KVM_VMM_BASE 0xD000000000000000UL
+#define VMM_SIZE (8UL<<20)
+
+/*
+ * Define vm_buffer, used by PAL Services, base address.
+ * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M
+ */
+#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
+#define KVM_VM_BUFFER_SIZE (8UL<<20)
+
+/*Define Virtual machine data layout.*/
+#define KVM_VM_DATA_SHIFT 24
+#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT)
+#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE)
+
+
+#define KVM_P2M_BASE KVM_VM_DATA_BASE
+#define KVM_P2M_OFS 0
+#define KVM_P2M_SIZE (8UL << 20)
+
+#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE)
+#define KVM_VHPT_OFS KVM_P2M_SIZE
+#define KVM_VHPT_BLOCK_SIZE (2UL << 20)
+#define VHPT_SHIFT 18
+#define VHPT_SIZE (1UL << VHPT_SHIFT)
+#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5))
+
+#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE)
+#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE)
+#define KVM_VTLB_BLOCK_SIZE (1UL<<20)
+#define VTLB_SHIFT 17
+#define VTLB_SIZE (1UL<<VTLB_SHIFT)
+#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5))
+
+#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE)
+#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE)
+#define KVM_VPD_BLOCK_SIZE (2UL<<20)
+#define VPD_SHIFT 16
+#define VPD_SIZE (1UL<<VPD_SHIFT)
+
+#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE)
+#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE)
+#define KVM_VCPU_BLOCK_SIZE (2UL<<20)
+#define VCPU_SHIFT 18
+#define VCPU_SIZE (1UL<<VCPU_SHIFT)
+#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE
+
+#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE)
+#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE)
+#define KVM_VM_BLOCK_SIZE (1UL<<19)
+
+#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE)
+#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE)
+#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19)
+
+/* Get vpd, vhpt, tlb, vcpu, base*/
+#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE)
+#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE)
+#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE)
+#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE)
+
+/*IO section definitions*/
+#define IOREQ_READ 1
+#define IOREQ_WRITE 0
+
+#define STATE_IOREQ_NONE 0
+#define STATE_IOREQ_READY 1
+#define STATE_IOREQ_INPROCESS 2
+#define STATE_IORESP_READY 3
+
+/*Guest Physical address layout.*/
+#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
+#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
+#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
+#define GPFN_PIB (3UL << 60) /* PIB base */
+#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
+#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
+#define GPFN_GFW (6UL << 60) /* Guest Firmware */
+#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */
+
+#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
+#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
+#define INVALID_MFN (~0UL)
+#define MEM_G (1UL << 30)
+#define MEM_M (1UL << 20)
+#define MMIO_START (3 * MEM_G)
+#define MMIO_SIZE (512 * MEM_M)
+#define VGA_IO_START 0xA0000UL
+#define VGA_IO_SIZE 0x20000
+#define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
+#define LEGACY_IO_SIZE (64 * MEM_M)
+#define IO_SAPIC_START 0xfec00000UL
+#define IO_SAPIC_SIZE 0x100000
+#define PIB_START 0xfee00000UL
+#define PIB_SIZE 0x200000
+#define GFW_START (4 * MEM_G - 16 * MEM_M)
+#define GFW_SIZE (16 * MEM_M)
+
+/*Deliver mode, defined for ioapic.c*/
+#define dest_Fixed IOSAPIC_FIXED
+#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
+
+#define NMI_VECTOR 2
+#define ExtINT_VECTOR 0
+#define NULL_VECTOR (-1)
+#define IA64_SPURIOUS_INT_VECTOR 0x0f
+
+#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
+
+/*
+ *Delivery mode
+ */
+#define SAPIC_DELIV_SHIFT 8
+#define SAPIC_FIXED 0x0
+#define SAPIC_LOWEST_PRIORITY 0x1
+#define SAPIC_PMI 0x2
+#define SAPIC_NMI 0x4
+#define SAPIC_INIT 0x5
+#define SAPIC_EXTINT 0x7
+
+/*
+ * vcpu->requests bit members for arch
+ */
+#define KVM_REQ_PTC_G 32
+#define KVM_REQ_RESUME 33
+
+#define KVM_PAGES_PER_HPAGE 1
+
+struct kvm;
+struct kvm_vcpu;
+struct kvm_guest_debug{
+};
+
+struct kvm_mmio_req {
+ uint64_t addr; /* physical address */
+ uint64_t size; /* size in bytes */
+ uint64_t data; /* data (or paddr of data) */
+ uint8_t state:4;
+ uint8_t dir:1; /* 1=read, 0=write */
+};
+
+/*Pal data struct */
+struct kvm_pal_call{
+ /*In area*/
+ uint64_t gr28;
+ uint64_t gr29;
+ uint64_t gr30;
+ uint64_t gr31;
+ /*Out area*/
+ struct ia64_pal_retval ret;
+};
+
+/* Sal data structure */
+struct kvm_sal_call{
+ /*In area*/
+ uint64_t in0;
+ uint64_t in1;
+ uint64_t in2;
+ uint64_t in3;
+ uint64_t in4;
+ uint64_t in5;
+ uint64_t in6;
+ uint64_t in7;
+ struct sal_ret_values ret;
+};
+
+/*Guest change rr6*/
+struct kvm_switch_rr6 {
+ uint64_t old_rr;
+ uint64_t new_rr;
+};
+
+union ia64_ipi_a{
+ unsigned long val;
+ struct {
+ unsigned long rv : 3;
+ unsigned long ir : 1;
+ unsigned long eid : 8;
+ unsigned long id : 8;
+ unsigned long ib_base : 44;
+ };
+};
+
+union ia64_ipi_d {
+ unsigned long val;
+ struct {
+ unsigned long vector : 8;
+ unsigned long dm : 3;
+ unsigned long ig : 53;
+ };
+};
+
+/*ipi check exit data*/
+struct kvm_ipi_data{
+ union ia64_ipi_a addr;
+ union ia64_ipi_d data;
+};
+
+/*global purge data*/
+struct kvm_ptc_g {
+ unsigned long vaddr;
+ unsigned long rr;
+ unsigned long ps;
+ struct kvm_vcpu *vcpu;
+};
+
+/*Exit control data */
+struct exit_ctl_data{
+ uint32_t exit_reason;
+ uint32_t vm_status;
+ union {
+ struct kvm_mmio_req ioreq;
+ struct kvm_pal_call pal_data;
+ struct kvm_sal_call sal_data;
+ struct kvm_switch_rr6 rr_data;
+ struct kvm_ipi_data ipi_data;
+ struct kvm_ptc_g ptc_g_data;
+ } u;
+};
+
+union pte_flags {
+ unsigned long val;
+ struct {
+ unsigned long p : 1; /*0 */
+ unsigned long : 1; /* 1 */
+ unsigned long ma : 3; /* 2-4 */
+ unsigned long a : 1; /* 5 */
+ unsigned long d : 1; /* 6 */
+ unsigned long pl : 2; /* 7-8 */
+ unsigned long ar : 3; /* 9-11 */
+ unsigned long ppn : 38; /* 12-49 */
+ unsigned long : 2; /* 50-51 */
+ unsigned long ed : 1; /* 52 */
+ };
+};
+
+union ia64_pta {
+ unsigned long val;
+ struct {
+ unsigned long ve : 1;
+ unsigned long reserved0 : 1;
+ unsigned long size : 6;
+ unsigned long vf : 1;
+ unsigned long reserved1 : 6;
+ unsigned long base : 49;
+ };
+};
+
+struct thash_cb {
+ /* THASH base information */
+ struct thash_data *hash; /* hash table pointer */
+ union ia64_pta pta;
+ int num;
+};
+
+struct kvm_vcpu_stat {
+};
+
+struct kvm_vcpu_arch {
+ int launched;
+ int last_exit;
+ int last_run_cpu;
+ int vmm_tr_slot;
+ int vm_tr_slot;
+
+#define KVM_MP_STATE_RUNNABLE 0
+#define KVM_MP_STATE_UNINITIALIZED 1
+#define KVM_MP_STATE_INIT_RECEIVED 2
+#define KVM_MP_STATE_HALTED 3
+ int mp_state;
+
+#define MAX_PTC_G_NUM 3
+ int ptc_g_count;
+ struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
+
+ /*halt timer to wake up sleepy vcpus*/
+ struct hrtimer hlt_timer;
+ long ht_active;
+
+ struct kvm_lapic *apic; /* kernel irqchip context */
+ struct vpd *vpd;
+
+ /* Exit data for vmm_transition*/
+ struct exit_ctl_data exit_data;
+
+ cpumask_t cache_coherent_map;
+
+ unsigned long vmm_rr;
+ unsigned long host_rr6;
+ unsigned long psbits[8];
+ unsigned long cr_iipa;
+ unsigned long cr_isr;
+ unsigned long vsa_base;
+ unsigned long dirty_log_lock_pa;
+ unsigned long __gp;
+ /* TR and TC. */
+ struct thash_data itrs[NITRS];
+ struct thash_data dtrs[NDTRS];
+ /* Bit is set if there is a tr/tc for the region. */
+ unsigned char itr_regions;
+ unsigned char dtr_regions;
+ unsigned char tc_regions;
+ /* purge all */
+ unsigned long ptce_base;
+ unsigned long ptce_count[2];
+ unsigned long ptce_stride[2];
+ /* itc/itm */
+ unsigned long last_itc;
+ long itc_offset;
+ unsigned long itc_check;
+ unsigned long timer_check;
+ unsigned long timer_pending;
+
+ unsigned long vrr[8];
+ unsigned long ibr[8];
+ unsigned long dbr[8];
+ unsigned long insvc[4]; /* Interrupt in service. */
+ unsigned long xtp;
+
+ unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+ unsigned long metaphysical_saved_rr0; /* from kvm_arch */
+ unsigned long metaphysical_saved_rr4; /* from kvm_arch */
+ unsigned long fp_psr; /*used for lazy float register */
+ unsigned long saved_gp;
+ /*for phycial emulation */
+ int mode_flags;
+ struct thash_cb vtlb;
+ struct thash_cb vhpt;
+ char irq_check;
+ char irq_new_pending;
+
+ unsigned long opcode;
+ unsigned long cause;
+ union context host;
+ union context guest;
+};
+
+struct kvm_vm_stat {
+ u64 remote_tlb_flush;
+};
+
+struct kvm_sal_data {
+ unsigned long boot_ip;
+ unsigned long boot_gp;
+};
+
+struct kvm_arch {
+ unsigned long vm_base;
+ unsigned long metaphysical_rr0;
+ unsigned long metaphysical_rr4;
+ unsigned long vmm_init_rr;
+ unsigned long vhpt_base;
+ unsigned long vtlb_base;
+ unsigned long vpd_base;
+ spinlock_t dirty_log_lock;
+ struct kvm_ioapic *vioapic;
+ struct kvm_vm_stat stat;
+ struct kvm_sal_data rdv_sal_data;
+};
+
+union cpuid3_t {
+ u64 value;
+ struct {
+ u64 number : 8;
+ u64 revision : 8;
+ u64 model : 8;
+ u64 family : 8;
+ u64 archrev : 8;
+ u64 rv : 24;
+ };
+};
+
+struct kvm_pt_regs {
+ /* The following registers are saved by SAVE_MIN: */
+ unsigned long b6; /* scratch */
+ unsigned long b7; /* scratch */
+
+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+ unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
+
+ unsigned long cr_ipsr; /* interrupted task's psr */
+ unsigned long cr_iip; /* interrupted task's instruction pointer */
+ unsigned long cr_ifs; /* interrupted task's function state */
+
+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+ unsigned long ar_pfs; /* prev function state */
+ unsigned long ar_rsc; /* RSE configuration */
+ /* The following two are valid only if cr_ipsr.cpl > 0: */
+ unsigned long ar_rnat; /* RSE NaT */
+ unsigned long ar_bspstore; /* RSE bspstore */
+
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+ unsigned long b0; /* return pointer (bp) */
+ unsigned long loadrs; /* size of dirty partition << 16 */
+
+ unsigned long r1; /* the gp pointer */
+ unsigned long r12; /* interrupted task's memory stack pointer */
+ unsigned long r13; /* thread pointer */
+
+ unsigned long ar_fpsr; /* floating point status (preserved) */
+ unsigned long r15; /* scratch */
+
+ /* The remaining registers are NOT saved for system calls. */
+ unsigned long r14; /* scratch */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
+
+ /*
+ * Floating point registers that the kernel considers scratch:
+ */
+ struct ia64_fpreg f6; /* scratch */
+ struct ia64_fpreg f7; /* scratch */
+ struct ia64_fpreg f8; /* scratch */
+ struct ia64_fpreg f9; /* scratch */
+ struct ia64_fpreg f10; /* scratch */
+ struct ia64_fpreg f11; /* scratch */
+
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+ unsigned long eml_unat; /* used for emulating instruction */
+ unsigned long pad0; /* alignment pad */
+};
+
+static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
+{
+ return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+}
+
+typedef int kvm_vmm_entry(void);
+typedef void kvm_tramp_entry(union context *host, union context *guest);
+
+struct kvm_vmm_info{
+ struct module *module;
+ kvm_vmm_entry *vmm_entry;
+ kvm_tramp_entry *tramp_entry;
+ unsigned long vmm_ivt;
+};
+
+int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+void kvm_sal_emul(struct kvm_vcpu *vcpu);
+
+static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
+
+#endif
diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h
new file mode 100644
index 000000000000..0d6d8ca07b8c
--- /dev/null
+++ b/arch/ia64/include/asm/kvm_para.h
@@ -0,0 +1,27 @@
+#ifndef __IA64_KVM_PARA_H
+#define __IA64_KVM_PARA_H
+
+/*
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif
diff --git a/arch/ia64/include/asm/libata-portmap.h b/arch/ia64/include/asm/libata-portmap.h
new file mode 100644
index 000000000000..0e00c9a9f410
--- /dev/null
+++ b/arch/ia64/include/asm/libata-portmap.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_IA64_LIBATA_PORTMAP_H
+#define __ASM_IA64_LIBATA_PORTMAP_H
+
+#define ATA_PRIMARY_CMD 0x1F0
+#define ATA_PRIMARY_CTL 0x3F6
+#define ATA_PRIMARY_IRQ(dev) isa_irq_to_vector(14)
+
+#define ATA_SECONDARY_CMD 0x170
+#define ATA_SECONDARY_CTL 0x376
+#define ATA_SECONDARY_IRQ(dev) isa_irq_to_vector(15)
+
+#endif
diff --git a/arch/ia64/include/asm/linkage.h b/arch/ia64/include/asm/linkage.h
new file mode 100644
index 000000000000..ef22a45c1890
--- /dev/null
+++ b/arch/ia64/include/asm/linkage.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifndef __ASSEMBLY__
+
+#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage))
+
+#else
+
+#include <asm/asmmacro.h>
+
+#endif
+
+#endif
diff --git a/arch/ia64/include/asm/local.h b/arch/ia64/include/asm/local.h
new file mode 100644
index 000000000000..c11c530f74d0
--- /dev/null
+++ b/arch/ia64/include/asm/local.h
@@ -0,0 +1 @@
+#include <asm-generic/local.h>
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
new file mode 100644
index 000000000000..2b850ccafef5
--- /dev/null
+++ b/arch/ia64/include/asm/machvec.h
@@ -0,0 +1,460 @@
+/*
+ * Machine vector for IA-64.
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_MACHVEC_H
+#define _ASM_IA64_MACHVEC_H
+
+#include <linux/types.h>
+
+/* forward declarations: */
+struct device;
+struct pt_regs;
+struct scatterlist;
+struct page;
+struct mm_struct;
+struct pci_bus;
+struct task_struct;
+struct pci_dev;
+struct msi_desc;
+struct dma_attrs;
+
+typedef void ia64_mv_setup_t (char **);
+typedef void ia64_mv_cpu_init_t (void);
+typedef void ia64_mv_irq_init_t (void);
+typedef void ia64_mv_send_ipi_t (int, int, int, int);
+typedef void ia64_mv_timer_interrupt_t (int, void *);
+typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
+typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
+typedef u8 ia64_mv_irq_to_vector (int);
+typedef unsigned int ia64_mv_local_vector_to_irq (u8);
+typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
+typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
+ u8 size);
+typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
+ u8 size);
+typedef void ia64_mv_migrate_t(struct task_struct * task);
+typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *);
+typedef void ia64_mv_kernel_launch_event_t(void);
+
+/* DMA-mapping interface: */
+typedef void ia64_mv_dma_init (void);
+typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
+typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
+typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
+typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
+typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
+typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
+typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
+typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
+typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
+typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
+typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
+typedef int ia64_mv_dma_supported (struct device *, u64);
+
+typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
+typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
+typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+
+/*
+ * WARNING: The legacy I/O space is _architected_. Platforms are
+ * expected to follow this architected model (see Section 10.7 in the
+ * IA-64 Architecture Software Developer's Manual). Unfortunately,
+ * some broken machines do not follow that model, which is why we have
+ * to make the inX/outX operations part of the machine vector.
+ * Platform designers should follow the architected model whenever
+ * possible.
+ */
+typedef unsigned int ia64_mv_inb_t (unsigned long);
+typedef unsigned int ia64_mv_inw_t (unsigned long);
+typedef unsigned int ia64_mv_inl_t (unsigned long);
+typedef void ia64_mv_outb_t (unsigned char, unsigned long);
+typedef void ia64_mv_outw_t (unsigned short, unsigned long);
+typedef void ia64_mv_outl_t (unsigned int, unsigned long);
+typedef void ia64_mv_mmiowb_t (void);
+typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
+typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
+typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
+typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
+typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
+typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
+typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
+typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
+
+typedef int ia64_mv_setup_msi_irq_t (struct pci_dev *pdev, struct msi_desc *);
+typedef void ia64_mv_teardown_msi_irq_t (unsigned int irq);
+
+static inline void
+machvec_noop (void)
+{
+}
+
+static inline void
+machvec_noop_mm (struct mm_struct *mm)
+{
+}
+
+static inline void
+machvec_noop_task (struct task_struct *task)
+{
+}
+
+static inline void
+machvec_noop_bus (struct pci_bus *bus)
+{
+}
+
+extern void machvec_setup (char **);
+extern void machvec_timer_interrupt (int, void *);
+extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
+extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
+extern void machvec_tlb_migrate_finish (struct mm_struct *);
+
+# if defined (CONFIG_IA64_HP_SIM)
+# include <asm/machvec_hpsim.h>
+# elif defined (CONFIG_IA64_DIG)
+# include <asm/machvec_dig.h>
+# elif defined (CONFIG_IA64_HP_ZX1)
+# include <asm/machvec_hpzx1.h>
+# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
+# include <asm/machvec_hpzx1_swiotlb.h>
+# elif defined (CONFIG_IA64_SGI_SN2)
+# include <asm/machvec_sn2.h>
+# elif defined (CONFIG_IA64_SGI_UV)
+# include <asm/machvec_uv.h>
+# elif defined (CONFIG_IA64_GENERIC)
+
+# ifdef MACHVEC_PLATFORM_HEADER
+# include MACHVEC_PLATFORM_HEADER
+# else
+# define platform_name ia64_mv.name
+# define platform_setup ia64_mv.setup
+# define platform_cpu_init ia64_mv.cpu_init
+# define platform_irq_init ia64_mv.irq_init
+# define platform_send_ipi ia64_mv.send_ipi
+# define platform_timer_interrupt ia64_mv.timer_interrupt
+# define platform_global_tlb_purge ia64_mv.global_tlb_purge
+# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
+# define platform_dma_init ia64_mv.dma_init
+# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
+# define platform_dma_free_coherent ia64_mv.dma_free_coherent
+# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
+# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
+# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
+# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
+# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
+# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
+# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
+# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
+# define platform_dma_mapping_error ia64_mv.dma_mapping_error
+# define platform_dma_supported ia64_mv.dma_supported
+# define platform_irq_to_vector ia64_mv.irq_to_vector
+# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
+# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
+# define platform_pci_legacy_read ia64_mv.pci_legacy_read
+# define platform_pci_legacy_write ia64_mv.pci_legacy_write
+# define platform_inb ia64_mv.inb
+# define platform_inw ia64_mv.inw
+# define platform_inl ia64_mv.inl
+# define platform_outb ia64_mv.outb
+# define platform_outw ia64_mv.outw
+# define platform_outl ia64_mv.outl
+# define platform_mmiowb ia64_mv.mmiowb
+# define platform_readb ia64_mv.readb
+# define platform_readw ia64_mv.readw
+# define platform_readl ia64_mv.readl
+# define platform_readq ia64_mv.readq
+# define platform_readb_relaxed ia64_mv.readb_relaxed
+# define platform_readw_relaxed ia64_mv.readw_relaxed
+# define platform_readl_relaxed ia64_mv.readl_relaxed
+# define platform_readq_relaxed ia64_mv.readq_relaxed
+# define platform_migrate ia64_mv.migrate
+# define platform_setup_msi_irq ia64_mv.setup_msi_irq
+# define platform_teardown_msi_irq ia64_mv.teardown_msi_irq
+# define platform_pci_fixup_bus ia64_mv.pci_fixup_bus
+# define platform_kernel_launch_event ia64_mv.kernel_launch_event
+# endif
+
+/* __attribute__((__aligned__(16))) is required to make size of the
+ * structure multiple of 16 bytes.
+ * This will fillup the holes created because of section 3.3.1 in
+ * Software Conventions guide.
+ */
+struct ia64_machine_vector {
+ const char *name;
+ ia64_mv_setup_t *setup;
+ ia64_mv_cpu_init_t *cpu_init;
+ ia64_mv_irq_init_t *irq_init;
+ ia64_mv_send_ipi_t *send_ipi;
+ ia64_mv_timer_interrupt_t *timer_interrupt;
+ ia64_mv_global_tlb_purge_t *global_tlb_purge;
+ ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
+ ia64_mv_dma_init *dma_init;
+ ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
+ ia64_mv_dma_free_coherent *dma_free_coherent;
+ ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
+ ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
+ ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
+ ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
+ ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
+ ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
+ ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
+ ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
+ ia64_mv_dma_mapping_error *dma_mapping_error;
+ ia64_mv_dma_supported *dma_supported;
+ ia64_mv_irq_to_vector *irq_to_vector;
+ ia64_mv_local_vector_to_irq *local_vector_to_irq;
+ ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
+ ia64_mv_pci_legacy_read_t *pci_legacy_read;
+ ia64_mv_pci_legacy_write_t *pci_legacy_write;
+ ia64_mv_inb_t *inb;
+ ia64_mv_inw_t *inw;
+ ia64_mv_inl_t *inl;
+ ia64_mv_outb_t *outb;
+ ia64_mv_outw_t *outw;
+ ia64_mv_outl_t *outl;
+ ia64_mv_mmiowb_t *mmiowb;
+ ia64_mv_readb_t *readb;
+ ia64_mv_readw_t *readw;
+ ia64_mv_readl_t *readl;
+ ia64_mv_readq_t *readq;
+ ia64_mv_readb_relaxed_t *readb_relaxed;
+ ia64_mv_readw_relaxed_t *readw_relaxed;
+ ia64_mv_readl_relaxed_t *readl_relaxed;
+ ia64_mv_readq_relaxed_t *readq_relaxed;
+ ia64_mv_migrate_t *migrate;
+ ia64_mv_setup_msi_irq_t *setup_msi_irq;
+ ia64_mv_teardown_msi_irq_t *teardown_msi_irq;
+ ia64_mv_pci_fixup_bus_t *pci_fixup_bus;
+ ia64_mv_kernel_launch_event_t *kernel_launch_event;
+} __attribute__((__aligned__(16))); /* align attrib? see above comment */
+
+#define MACHVEC_INIT(name) \
+{ \
+ #name, \
+ platform_setup, \
+ platform_cpu_init, \
+ platform_irq_init, \
+ platform_send_ipi, \
+ platform_timer_interrupt, \
+ platform_global_tlb_purge, \
+ platform_tlb_migrate_finish, \
+ platform_dma_init, \
+ platform_dma_alloc_coherent, \
+ platform_dma_free_coherent, \
+ platform_dma_map_single_attrs, \
+ platform_dma_unmap_single_attrs, \
+ platform_dma_map_sg_attrs, \
+ platform_dma_unmap_sg_attrs, \
+ platform_dma_sync_single_for_cpu, \
+ platform_dma_sync_sg_for_cpu, \
+ platform_dma_sync_single_for_device, \
+ platform_dma_sync_sg_for_device, \
+ platform_dma_mapping_error, \
+ platform_dma_supported, \
+ platform_irq_to_vector, \
+ platform_local_vector_to_irq, \
+ platform_pci_get_legacy_mem, \
+ platform_pci_legacy_read, \
+ platform_pci_legacy_write, \
+ platform_inb, \
+ platform_inw, \
+ platform_inl, \
+ platform_outb, \
+ platform_outw, \
+ platform_outl, \
+ platform_mmiowb, \
+ platform_readb, \
+ platform_readw, \
+ platform_readl, \
+ platform_readq, \
+ platform_readb_relaxed, \
+ platform_readw_relaxed, \
+ platform_readl_relaxed, \
+ platform_readq_relaxed, \
+ platform_migrate, \
+ platform_setup_msi_irq, \
+ platform_teardown_msi_irq, \
+ platform_pci_fixup_bus, \
+ platform_kernel_launch_event \
+}
+
+extern struct ia64_machine_vector ia64_mv;
+extern void machvec_init (const char *name);
+extern void machvec_init_from_cmdline(const char *cmdline);
+
+# else
+# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
+# endif /* CONFIG_IA64_GENERIC */
+
+/*
+ * Declare default routines which aren't declared anywhere else:
+ */
+extern ia64_mv_dma_init swiotlb_init;
+extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent;
+extern ia64_mv_dma_free_coherent swiotlb_free_coherent;
+extern ia64_mv_dma_map_single swiotlb_map_single;
+extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs;
+extern ia64_mv_dma_unmap_single swiotlb_unmap_single;
+extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs;
+extern ia64_mv_dma_map_sg swiotlb_map_sg;
+extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg;
+extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs;
+extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
+extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu;
+extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
+extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device;
+extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error;
+extern ia64_mv_dma_supported swiotlb_dma_supported;
+
+/*
+ * Define default versions so we can extend machvec for new platforms without having
+ * to update the machvec files for all existing platforms.
+ */
+#ifndef platform_setup
+# define platform_setup machvec_setup
+#endif
+#ifndef platform_cpu_init
+# define platform_cpu_init machvec_noop
+#endif
+#ifndef platform_irq_init
+# define platform_irq_init machvec_noop
+#endif
+
+#ifndef platform_send_ipi
+# define platform_send_ipi ia64_send_ipi /* default to architected version */
+#endif
+#ifndef platform_timer_interrupt
+# define platform_timer_interrupt machvec_timer_interrupt
+#endif
+#ifndef platform_global_tlb_purge
+# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
+#endif
+#ifndef platform_tlb_migrate_finish
+# define platform_tlb_migrate_finish machvec_noop_mm
+#endif
+#ifndef platform_kernel_launch_event
+# define platform_kernel_launch_event machvec_noop
+#endif
+#ifndef platform_dma_init
+# define platform_dma_init swiotlb_init
+#endif
+#ifndef platform_dma_alloc_coherent
+# define platform_dma_alloc_coherent swiotlb_alloc_coherent
+#endif
+#ifndef platform_dma_free_coherent
+# define platform_dma_free_coherent swiotlb_free_coherent
+#endif
+#ifndef platform_dma_map_single_attrs
+# define platform_dma_map_single_attrs swiotlb_map_single_attrs
+#endif
+#ifndef platform_dma_unmap_single_attrs
+# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
+#endif
+#ifndef platform_dma_map_sg_attrs
+# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
+#endif
+#ifndef platform_dma_unmap_sg_attrs
+# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
+#endif
+#ifndef platform_dma_sync_single_for_cpu
+# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
+#endif
+#ifndef platform_dma_sync_sg_for_cpu
+# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
+#endif
+#ifndef platform_dma_sync_single_for_device
+# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
+#endif
+#ifndef platform_dma_sync_sg_for_device
+# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
+#endif
+#ifndef platform_dma_mapping_error
+# define platform_dma_mapping_error swiotlb_dma_mapping_error
+#endif
+#ifndef platform_dma_supported
+# define platform_dma_supported swiotlb_dma_supported
+#endif
+#ifndef platform_irq_to_vector
+# define platform_irq_to_vector __ia64_irq_to_vector
+#endif
+#ifndef platform_local_vector_to_irq
+# define platform_local_vector_to_irq __ia64_local_vector_to_irq
+#endif
+#ifndef platform_pci_get_legacy_mem
+# define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem
+#endif
+#ifndef platform_pci_legacy_read
+# define platform_pci_legacy_read ia64_pci_legacy_read
+extern int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size);
+#endif
+#ifndef platform_pci_legacy_write
+# define platform_pci_legacy_write ia64_pci_legacy_write
+extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size);
+#endif
+#ifndef platform_inb
+# define platform_inb __ia64_inb
+#endif
+#ifndef platform_inw
+# define platform_inw __ia64_inw
+#endif
+#ifndef platform_inl
+# define platform_inl __ia64_inl
+#endif
+#ifndef platform_outb
+# define platform_outb __ia64_outb
+#endif
+#ifndef platform_outw
+# define platform_outw __ia64_outw
+#endif
+#ifndef platform_outl
+# define platform_outl __ia64_outl
+#endif
+#ifndef platform_mmiowb
+# define platform_mmiowb __ia64_mmiowb
+#endif
+#ifndef platform_readb
+# define platform_readb __ia64_readb
+#endif
+#ifndef platform_readw
+# define platform_readw __ia64_readw
+#endif
+#ifndef platform_readl
+# define platform_readl __ia64_readl
+#endif
+#ifndef platform_readq
+# define platform_readq __ia64_readq
+#endif
+#ifndef platform_readb_relaxed
+# define platform_readb_relaxed __ia64_readb_relaxed
+#endif
+#ifndef platform_readw_relaxed
+# define platform_readw_relaxed __ia64_readw_relaxed
+#endif
+#ifndef platform_readl_relaxed
+# define platform_readl_relaxed __ia64_readl_relaxed
+#endif
+#ifndef platform_readq_relaxed
+# define platform_readq_relaxed __ia64_readq_relaxed
+#endif
+#ifndef platform_migrate
+# define platform_migrate machvec_noop_task
+#endif
+#ifndef platform_setup_msi_irq
+# define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
+#endif
+#ifndef platform_teardown_msi_irq
+# define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
+#endif
+#ifndef platform_pci_fixup_bus
+# define platform_pci_fixup_bus machvec_noop_bus
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_H */
diff --git a/arch/ia64/include/asm/machvec_dig.h b/arch/ia64/include/asm/machvec_dig.h
new file mode 100644
index 000000000000..8a0752f40987
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_dig.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_IA64_MACHVEC_DIG_h
+#define _ASM_IA64_MACHVEC_DIG_h
+
+extern ia64_mv_setup_t dig_setup;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "dig"
+#define platform_setup dig_setup
+
+#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff --git a/arch/ia64/include/asm/machvec_hpsim.h b/arch/ia64/include/asm/machvec_hpsim.h
new file mode 100644
index 000000000000..cf72fc87fdfe
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_hpsim.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_IA64_MACHVEC_HPSIM_h
+#define _ASM_IA64_MACHVEC_HPSIM_h
+
+extern ia64_mv_setup_t hpsim_setup;
+extern ia64_mv_irq_init_t hpsim_irq_init;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "hpsim"
+#define platform_setup hpsim_setup
+#define platform_irq_init hpsim_irq_init
+
+#endif /* _ASM_IA64_MACHVEC_HPSIM_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
new file mode 100644
index 000000000000..2f57f5144b9f
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_IA64_MACHVEC_HPZX1_h
+#define _ASM_IA64_MACHVEC_HPZX1_h
+
+extern ia64_mv_setup_t dig_setup;
+extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
+extern ia64_mv_dma_free_coherent sba_free_coherent;
+extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
+extern ia64_mv_dma_supported sba_dma_supported;
+extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "hpzx1"
+#define platform_setup dig_setup
+#define platform_dma_init machvec_noop
+#define platform_dma_alloc_coherent sba_alloc_coherent
+#define platform_dma_free_coherent sba_free_coherent
+#define platform_dma_map_single_attrs sba_map_single_attrs
+#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
+#define platform_dma_map_sg_attrs sba_map_sg_attrs
+#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
+#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
+#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
+#define platform_dma_sync_single_for_device machvec_dma_sync_single
+#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
+#define platform_dma_supported sba_dma_supported
+#define platform_dma_mapping_error sba_dma_mapping_error
+
+#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
new file mode 100644
index 000000000000..a842cdda827b
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
+#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
+
+extern ia64_mv_setup_t dig_setup;
+extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent;
+extern ia64_mv_dma_free_coherent hwsw_free_coherent;
+extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
+extern ia64_mv_dma_supported hwsw_dma_supported;
+extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
+extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
+extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
+extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
+extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "hpzx1_swiotlb"
+
+#define platform_setup dig_setup
+#define platform_dma_init machvec_noop
+#define platform_dma_alloc_coherent hwsw_alloc_coherent
+#define platform_dma_free_coherent hwsw_free_coherent
+#define platform_dma_map_single_attrs hwsw_map_single_attrs
+#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
+#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
+#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
+#define platform_dma_supported hwsw_dma_supported
+#define platform_dma_mapping_error hwsw_dma_mapping_error
+#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
+#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
+
+#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
new file mode 100644
index 000000000000..7f21249fba3f
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -0,0 +1,33 @@
+#include <asm/machvec.h>
+
+extern ia64_mv_send_ipi_t ia64_send_ipi;
+extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
+extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
+extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
+extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
+extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
+extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
+
+extern ia64_mv_inb_t __ia64_inb;
+extern ia64_mv_inw_t __ia64_inw;
+extern ia64_mv_inl_t __ia64_inl;
+extern ia64_mv_outb_t __ia64_outb;
+extern ia64_mv_outw_t __ia64_outw;
+extern ia64_mv_outl_t __ia64_outl;
+extern ia64_mv_mmiowb_t __ia64_mmiowb;
+extern ia64_mv_readb_t __ia64_readb;
+extern ia64_mv_readw_t __ia64_readw;
+extern ia64_mv_readl_t __ia64_readl;
+extern ia64_mv_readq_t __ia64_readq;
+extern ia64_mv_readb_t __ia64_readb_relaxed;
+extern ia64_mv_readw_t __ia64_readw_relaxed;
+extern ia64_mv_readl_t __ia64_readl_relaxed;
+extern ia64_mv_readq_t __ia64_readq_relaxed;
+
+#define MACHVEC_HELPER(name) \
+ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
+ = MACHVEC_INIT(name);
+
+#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
+
+MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
new file mode 100644
index 000000000000..781308ea7b88
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#ifndef _ASM_IA64_MACHVEC_SN2_H
+#define _ASM_IA64_MACHVEC_SN2_H
+
+extern ia64_mv_setup_t sn_setup;
+extern ia64_mv_cpu_init_t sn_cpu_init;
+extern ia64_mv_irq_init_t sn_irq_init;
+extern ia64_mv_send_ipi_t sn2_send_IPI;
+extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
+extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
+extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
+extern ia64_mv_irq_to_vector sn_irq_to_vector;
+extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
+extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
+extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
+extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
+extern ia64_mv_inb_t __sn_inb;
+extern ia64_mv_inw_t __sn_inw;
+extern ia64_mv_inl_t __sn_inl;
+extern ia64_mv_outb_t __sn_outb;
+extern ia64_mv_outw_t __sn_outw;
+extern ia64_mv_outl_t __sn_outl;
+extern ia64_mv_mmiowb_t __sn_mmiowb;
+extern ia64_mv_readb_t __sn_readb;
+extern ia64_mv_readw_t __sn_readw;
+extern ia64_mv_readl_t __sn_readl;
+extern ia64_mv_readq_t __sn_readq;
+extern ia64_mv_readb_t __sn_readb_relaxed;
+extern ia64_mv_readw_t __sn_readw_relaxed;
+extern ia64_mv_readl_t __sn_readl_relaxed;
+extern ia64_mv_readq_t __sn_readq_relaxed;
+extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
+extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
+extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
+extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
+extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
+extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
+extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
+extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
+extern ia64_mv_dma_supported sn_dma_supported;
+extern ia64_mv_migrate_t sn_migrate;
+extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
+extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
+extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
+extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
+
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "sn2"
+#define platform_setup sn_setup
+#define platform_cpu_init sn_cpu_init
+#define platform_irq_init sn_irq_init
+#define platform_send_ipi sn2_send_IPI
+#define platform_timer_interrupt sn_timer_interrupt
+#define platform_global_tlb_purge sn2_global_tlb_purge
+#define platform_tlb_migrate_finish sn_tlb_migrate_finish
+#define platform_pci_fixup sn_pci_fixup
+#define platform_inb __sn_inb
+#define platform_inw __sn_inw
+#define platform_inl __sn_inl
+#define platform_outb __sn_outb
+#define platform_outw __sn_outw
+#define platform_outl __sn_outl
+#define platform_mmiowb __sn_mmiowb
+#define platform_readb __sn_readb
+#define platform_readw __sn_readw
+#define platform_readl __sn_readl
+#define platform_readq __sn_readq
+#define platform_readb_relaxed __sn_readb_relaxed
+#define platform_readw_relaxed __sn_readw_relaxed
+#define platform_readl_relaxed __sn_readl_relaxed
+#define platform_readq_relaxed __sn_readq_relaxed
+#define platform_irq_to_vector sn_irq_to_vector
+#define platform_local_vector_to_irq sn_local_vector_to_irq
+#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
+#define platform_pci_legacy_read sn_pci_legacy_read
+#define platform_pci_legacy_write sn_pci_legacy_write
+#define platform_dma_init machvec_noop
+#define platform_dma_alloc_coherent sn_dma_alloc_coherent
+#define platform_dma_free_coherent sn_dma_free_coherent
+#define platform_dma_map_single_attrs sn_dma_map_single_attrs
+#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
+#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
+#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
+#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
+#define platform_dma_mapping_error sn_dma_mapping_error
+#define platform_dma_supported sn_dma_supported
+#define platform_migrate sn_migrate
+#define platform_kernel_launch_event sn_kernel_launch_event
+#ifdef CONFIG_PCI_MSI
+#define platform_setup_msi_irq sn_setup_msi_irq
+#define platform_teardown_msi_irq sn_teardown_msi_irq
+#else
+#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
+#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
+#endif
+#define platform_pci_fixup_bus sn_pci_fixup_bus
+
+#include <asm/sn/io.h>
+
+#endif /* _ASM_IA64_MACHVEC_SN2_H */
diff --git a/arch/ia64/include/asm/machvec_uv.h b/arch/ia64/include/asm/machvec_uv.h
new file mode 100644
index 000000000000..2931447f3813
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_uv.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV Core Functions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_MACHVEC_UV_H
+#define _ASM_IA64_MACHVEC_UV_H
+
+extern ia64_mv_setup_t uv_setup;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "uv"
+#define platform_setup uv_setup
+
+#endif /* _ASM_IA64_MACHVEC_UV_H */
diff --git a/arch/ia64/include/asm/mc146818rtc.h b/arch/ia64/include/asm/mc146818rtc.h
new file mode 100644
index 000000000000..407787a237ba
--- /dev/null
+++ b/arch/ia64/include/asm/mc146818rtc.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_IA64_MC146818RTC_H
+#define _ASM_IA64_MC146818RTC_H
+
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+
+/* empty include file to satisfy the include in genrtc.c */
+
+#endif /* _ASM_IA64_MC146818RTC_H */
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
new file mode 100644
index 000000000000..18a4321349a3
--- /dev/null
+++ b/arch/ia64/include/asm/mca.h
@@ -0,0 +1,179 @@
+/*
+ * File: mca.h
+ * Purpose: Machine check handling specific defines
+ *
+ * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) Russ Anderson <rja@sgi.com>
+ */
+
+#ifndef _ASM_IA64_MCA_H
+#define _ASM_IA64_MCA_H
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#include <asm/param.h>
+#include <asm/sal.h>
+#include <asm/processor.h>
+#include <asm/mca_asm.h>
+
+#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */
+
+typedef struct ia64_fptr {
+ unsigned long fp;
+ unsigned long gp;
+} ia64_fptr_t;
+
+typedef union cmcv_reg_u {
+ u64 cmcv_regval;
+ struct {
+ u64 cmcr_vector : 8;
+ u64 cmcr_reserved1 : 4;
+ u64 cmcr_ignored1 : 1;
+ u64 cmcr_reserved2 : 3;
+ u64 cmcr_mask : 1;
+ u64 cmcr_ignored2 : 47;
+ } cmcv_reg_s;
+
+} cmcv_reg_t;
+
+#define cmcv_mask cmcv_reg_s.cmcr_mask
+#define cmcv_vector cmcv_reg_s.cmcr_vector
+
+enum {
+ IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0,
+ IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1,
+ IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2,
+ IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA = 0x3,
+};
+
+/* Information maintained by the MC infrastructure */
+typedef struct ia64_mc_info_s {
+ u64 imi_mca_handler;
+ size_t imi_mca_handler_size;
+ u64 imi_monarch_init_handler;
+ size_t imi_monarch_init_handler_size;
+ u64 imi_slave_init_handler;
+ size_t imi_slave_init_handler_size;
+ u8 imi_rendez_checkin[NR_CPUS];
+
+} ia64_mc_info_t;
+
+/* Handover state from SAL to OS and vice versa, for both MCA and INIT events.
+ * Besides the handover state, it also contains some saved registers from the
+ * time of the event.
+ * Note: mca_asm.S depends on the precise layout of this structure.
+ */
+
+struct ia64_sal_os_state {
+
+ /* SAL to OS */
+ u64 os_gp; /* GP of the os registered with the SAL, physical */
+ u64 pal_proc; /* PAL_PROC entry point, physical */
+ u64 sal_proc; /* SAL_PROC entry point, physical */
+ u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */
+ u64 proc_state_param; /* from R18 */
+ u64 monarch; /* 1 for a monarch event, 0 for a slave */
+
+ /* common */
+ u64 sal_ra; /* Return address in SAL, physical */
+ u64 sal_gp; /* GP of the SAL - physical */
+ pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
+ /* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK).
+ * Note: if the MCA/INIT recovery code wants to resume to a new context
+ * then it must change these values to reflect the new kernel stack.
+ */
+ u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */
+ u64 prev_IA64_KR_CURRENT_STACK;
+ struct task_struct *prev_task; /* previous task, NULL if it is not useful */
+ /* Some interrupt registers are not saved in minstate, pt_regs or
+ * switch_stack. Because MCA/INIT can occur when interrupts are
+ * disabled, we need to save the additional interrupt registers over
+ * MCA/INIT and resume.
+ */
+ u64 isr;
+ u64 ifa;
+ u64 itir;
+ u64 iipa;
+ u64 iim;
+ u64 iha;
+
+ /* OS to SAL */
+ u64 os_status; /* OS status to SAL, enum below */
+ u64 context; /* 0 if return to same context
+ 1 if return to new context */
+};
+
+enum {
+ IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */
+ IA64_MCA_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
+ IA64_MCA_COLD_BOOT = -2, /* Cold boot of the system need from SAL */
+ IA64_MCA_HALT = -3 /* System to be halted by SAL */
+};
+
+enum {
+ IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */
+ IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */
+};
+
+enum {
+ IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */
+ IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */
+};
+
+/* Per-CPU MCA state that is too big for normal per-CPU variables. */
+
+struct ia64_mca_cpu {
+ u64 mca_stack[KERNEL_STACK_SIZE/8];
+ u64 init_stack[KERNEL_STACK_SIZE/8];
+};
+
+/* Array of physical addresses of each CPU's MCA area. */
+extern unsigned long __per_cpu_mca[NR_CPUS];
+
+extern int cpe_vector;
+extern int ia64_cpe_irq;
+extern void ia64_mca_init(void);
+extern void ia64_mca_cpu_init(void *);
+extern void ia64_os_mca_dispatch(void);
+extern void ia64_os_mca_dispatch_end(void);
+extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
+extern void ia64_init_handler(struct pt_regs *,
+ struct switch_stack *,
+ struct ia64_sal_os_state *);
+extern void ia64_monarch_init_handler(void);
+extern void ia64_slave_init_handler(void);
+extern void ia64_mca_cmc_vector_setup(void);
+extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
+extern void ia64_unreg_MCA_extension(void);
+extern u64 ia64_get_rnat(u64 *);
+extern void ia64_mca_printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+struct ia64_mca_notify_die {
+ struct ia64_sal_os_state *sos;
+ int *monarch_cpu;
+ int *data;
+};
+
+DECLARE_PER_CPU(u64, ia64_mca_pal_base);
+
+#else /* __ASSEMBLY__ */
+
+#define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */
+#define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */
+#define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */
+#define IA64_MCA_HALT -3 /* System to be halted by SAL */
+
+#define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */
+#define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */
+
+#define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */
+#define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_MCA_H */
diff --git a/arch/ia64/include/asm/mca_asm.h b/arch/ia64/include/asm/mca_asm.h
new file mode 100644
index 000000000000..dd2a5b134390
--- /dev/null
+++ b/arch/ia64/include/asm/mca_asm.h
@@ -0,0 +1,242 @@
+/*
+ * File: mca_asm.h
+ * Purpose: Machine check handling specific defines
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) 2000 Hewlett-Packard Co.
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2002 Intel Corp.
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
+ * Copyright (C) 2005 Silicon Graphics, Inc
+ * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
+ */
+#ifndef _ASM_IA64_MCA_ASM_H
+#define _ASM_IA64_MCA_ASM_H
+
+#define PSR_IC 13
+#define PSR_I 14
+#define PSR_DT 17
+#define PSR_RT 27
+#define PSR_MC 35
+#define PSR_IT 36
+#define PSR_BN 44
+
+/*
+ * This macro converts a instruction virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define INST_VA_TO_PA(addr) \
+ dep addr = 0, addr, 61, 3
+/*
+ * This macro converts a data virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Lop off bits 61 thru 63 in the virtual address
+ */
+#define DATA_VA_TO_PA(addr) \
+ tpa addr = addr
+/*
+ * This macro converts a data physical address to a virtual address
+ * Right now for simulation purposes the virtual addresses are
+ * direct mapped to physical addresses.
+ * 1. Put 0x7 in bits 61 thru 63.
+ */
+#define DATA_PA_TO_VA(addr,temp) \
+ mov temp = 0x7 ;; \
+ dep addr = temp, addr, 61, 3
+
+#define GET_THIS_PADDR(reg, var) \
+ mov reg = IA64_KR(PER_CPU_DATA);; \
+ addl reg = THIS_CPU(var), reg
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in physical mode with all the address
+ * translations turned off.
+ * 1. Save the current psr
+ * 2. Make sure that all the upper 32 bits are off
+ *
+ * 3. Clear the interrupt enable and interrupt state collection bits
+ * in the psr before updating the ipsr and iip.
+ *
+ * 4. Turn off the instruction, data and rse translation bits of the psr
+ * and store the new value into ipsr
+ * Also make sure that the interrupts are disabled.
+ * Ensure that we are in little endian mode.
+ * [psr.{rt, it, dt, i, be} = 0]
+ *
+ * 5. Get the physical address corresponding to the virtual address
+ * of the next instruction bundle and put it in iip.
+ * (Using magic numbers 24 and 40 in the deposint instruction since
+ * the IA64_SDK code directly maps to lower 24bits as physical address
+ * from a virtual address).
+ *
+ * 6. Do an rfi to move the values from ipsr to psr and iip to ip.
+ */
+#define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
+ mov old_psr = psr; \
+ ;; \
+ dep old_psr = 0, old_psr, 32, 32; \
+ \
+ mov ar.rsc = 0 ; \
+ ;; \
+ srlz.d; \
+ mov temp2 = ar.bspstore; \
+ ;; \
+ DATA_VA_TO_PA(temp2); \
+ ;; \
+ mov temp1 = ar.rnat; \
+ ;; \
+ mov ar.bspstore = temp2; \
+ ;; \
+ mov ar.rnat = temp1; \
+ mov temp1 = psr; \
+ mov temp2 = psr; \
+ ;; \
+ \
+ dep temp2 = 0, temp2, PSR_IC, 2; \
+ ;; \
+ mov psr.l = temp2; \
+ ;; \
+ srlz.d; \
+ dep temp1 = 0, temp1, 32, 32; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_IT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_DT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_RT, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_I, 1; \
+ ;; \
+ dep temp1 = 0, temp1, PSR_IC, 1; \
+ ;; \
+ dep temp1 = -1, temp1, PSR_MC, 1; \
+ ;; \
+ mov cr.ipsr = temp1; \
+ ;; \
+ LOAD_PHYSICAL(p0, temp2, start_addr); \
+ ;; \
+ mov cr.iip = temp2; \
+ mov cr.ifs = r0; \
+ DATA_VA_TO_PA(sp); \
+ DATA_VA_TO_PA(gp); \
+ ;; \
+ srlz.i; \
+ ;; \
+ nop 1; \
+ nop 2; \
+ nop 1; \
+ nop 2; \
+ rfi; \
+ ;;
+
+/*
+ * This macro jumps to the instruction at the given virtual address
+ * and starts execution in virtual mode with all the address
+ * translations turned on.
+ * 1. Get the old saved psr
+ *
+ * 2. Clear the interrupt state collection bit in the current psr.
+ *
+ * 3. Set the instruction translation bit back in the old psr
+ * Note we have to do this since we are right now saving only the
+ * lower 32-bits of old psr.(Also the old psr has the data and
+ * rse translation bits on)
+ *
+ * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
+ *
+ * 5. Reset the current thread pointer (r13).
+ *
+ * 6. Set iip to the virtual address of the next instruction bundle.
+ *
+ * 7. Do an rfi to move ipsr to psr and iip to ip.
+ */
+
+#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
+ mov temp2 = psr; \
+ ;; \
+ mov old_psr = temp2; \
+ ;; \
+ dep temp2 = 0, temp2, PSR_IC, 2; \
+ ;; \
+ mov psr.l = temp2; \
+ mov ar.rsc = 0; \
+ ;; \
+ srlz.d; \
+ mov r13 = ar.k6; \
+ mov temp2 = ar.bspstore; \
+ ;; \
+ DATA_PA_TO_VA(temp2,temp1); \
+ ;; \
+ mov temp1 = ar.rnat; \
+ ;; \
+ mov ar.bspstore = temp2; \
+ ;; \
+ mov ar.rnat = temp1; \
+ ;; \
+ mov temp1 = old_psr; \
+ ;; \
+ mov temp2 = 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_IC, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_IT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_DT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_RT, 1; \
+ ;; \
+ dep temp1 = temp2, temp1, PSR_BN, 1; \
+ ;; \
+ \
+ mov cr.ipsr = temp1; \
+ movl temp2 = start_addr; \
+ ;; \
+ mov cr.iip = temp2; \
+ movl gp = __gp \
+ ;; \
+ DATA_PA_TO_VA(sp, temp1); \
+ srlz.i; \
+ ;; \
+ nop 1; \
+ nop 2; \
+ nop 1; \
+ rfi \
+ ;;
+
+/*
+ * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
+ * stacks, except that the SAL/OS state and a switch_stack are stored near the
+ * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as
+ * well as MCA over INIT, each event needs its own SAL/OS state. All entries
+ * are 16 byte aligned.
+ *
+ * +---------------------------+
+ * | pt_regs |
+ * +---------------------------+
+ * | switch_stack |
+ * +---------------------------+
+ * | SAL/OS state |
+ * +---------------------------+
+ * | 16 byte scratch area |
+ * +---------------------------+ <-------- SP at start of C MCA handler
+ * | ..... |
+ * +---------------------------+
+ * | RBS for MCA/INIT handler |
+ * +---------------------------+
+ * | struct task for MCA/INIT |
+ * +---------------------------+ <-------- Bottom of MCA/INIT stack
+ */
+
+#define ALIGN16(x) ((x)&~15)
+#define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
+#define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
+#define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
+#define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16)
+
+#endif /* _ASM_IA64_MCA_ASM_H */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
new file mode 100644
index 000000000000..7245a5781594
--- /dev/null
+++ b/arch/ia64/include/asm/meminit.h
@@ -0,0 +1,75 @@
+#ifndef meminit_h
+#define meminit_h
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+
+/*
+ * Entries defined so far:
+ * - boot param structure itself
+ * - memory map
+ * - initrd (optional)
+ * - command line string
+ * - kernel code & data
+ * - crash dumping code reserved region
+ * - Kernel memory map built from EFI memory map
+ * - ELF core header
+ *
+ * More could be added if necessary
+ */
+#define IA64_MAX_RSVD_REGIONS 8
+
+struct rsvd_region {
+ unsigned long start; /* virtual address of beginning of element */
+ unsigned long end; /* virtual address of end of element + 1 */
+};
+
+extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+extern int num_rsvd_regions;
+
+extern void find_memory (void);
+extern void reserve_memory (void);
+extern void find_initrd (void);
+extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
+extern int filter_memory (unsigned long start, unsigned long end, void *arg);
+extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e);
+extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
+
+extern unsigned long vmcore_find_descriptor_size(unsigned long address);
+extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
+
+/*
+ * For rounding an address to the next IA64_GRANULE_SIZE or order
+ */
+#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
+#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
+#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
+
+#ifdef CONFIG_NUMA
+ extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
+#else
+# define call_pernode_memory(start, len, func) (*func)(start, len, 0)
+#endif
+
+#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
+
+extern int register_active_ranges(u64 start, u64 len, int nid);
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
+ extern unsigned long vmalloc_end;
+ extern struct page *vmem_map;
+ extern int find_largest_hole (u64 start, u64 end, void *arg);
+ extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
+ extern int vmemmap_find_next_valid_pfn(int, int);
+#else
+static inline int vmemmap_find_next_valid_pfn(int node, int i)
+{
+ return i + 1;
+}
+#endif
+#endif /* meminit_h */
diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h
new file mode 100644
index 000000000000..c73b87832a1e
--- /dev/null
+++ b/arch/ia64/include/asm/mman.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_IA64_MMAN_H
+#define _ASM_IA64_MMAN_H
+
+/*
+ * Based on <asm-i386/mman.h>.
+ *
+ * Modified 1998-2000, 2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm-generic/mman.h>
+
+#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
+#define MAP_GROWSUP 0x00200 /* register stack-like segment */
+#define MAP_DENYWRITE 0x00800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */
+#define MAP_LOCKED 0x02000 /* pages are locked */
+#define MAP_NORESERVE 0x04000 /* don't check for reservations */
+#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+#define arch_mmap_check ia64_mmap_check
+int ia64_mmap_check(unsigned long addr, unsigned long len,
+ unsigned long flags);
+#endif
+#endif
+
+#endif /* _ASM_IA64_MMAN_H */
diff --git a/arch/ia64/include/asm/mmu.h b/arch/ia64/include/asm/mmu.h
new file mode 100644
index 000000000000..611432ba579c
--- /dev/null
+++ b/arch/ia64/include/asm/mmu.h
@@ -0,0 +1,13 @@
+#ifndef __MMU_H
+#define __MMU_H
+
+/*
+ * Type for a context number. We declare it volatile to ensure proper
+ * ordering when it's accessed outside of spinlock'd critical sections
+ * (e.g., as done in activate_mm() and init_new_context()).
+ */
+typedef volatile unsigned long mm_context_t;
+
+typedef unsigned long nv_mm_context_t;
+
+#endif
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
new file mode 100644
index 000000000000..040bc87db930
--- /dev/null
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -0,0 +1,198 @@
+#ifndef _ASM_IA64_MMU_CONTEXT_H
+#define _ASM_IA64_MMU_CONTEXT_H
+
+/*
+ * Copyright (C) 1998-2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+/*
+ * Routines to manage the allocation of task context numbers. Task context
+ * numbers are used to reduce or eliminate the need to perform TLB flushes
+ * due to context switches. Context numbers are implemented using ia-64
+ * region ids. Since the IA-64 TLB does not consider the region number when
+ * performing a TLB lookup, we need to assign a unique region id to each
+ * region in a process. We use the least significant three bits in aregion
+ * id for this purpose.
+ */
+
+#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
+
+#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
+
+# include <asm/page.h>
+# ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+#include <asm/processor.h>
+#include <asm-generic/mm_hooks.h>
+
+struct ia64_ctx {
+ spinlock_t lock;
+ unsigned int next; /* next context number to use */
+ unsigned int limit; /* available free range */
+ unsigned int max_ctx; /* max. context value supported by all CPUs */
+ /* call wrap_mmu_context when next >= max */
+ unsigned long *bitmap; /* bitmap size is max_ctx+1 */
+ unsigned long *flushmap;/* pending rid to be flushed */
+};
+
+extern struct ia64_ctx ia64_ctx;
+DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
+
+extern void mmu_context_init (void);
+extern void wrap_mmu_context (struct mm_struct *mm);
+
+static inline void
+enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/*
+ * When the context counter wraps around all TLBs need to be flushed because
+ * an old context number might have been reused. This is signalled by the
+ * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
+ * below. Called by activate_mm(). <efocht@ess.nec.de>
+ */
+static inline void
+delayed_tlb_flush (void)
+{
+ extern void local_flush_tlb_all (void);
+ unsigned long flags;
+
+ if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
+ local_flush_tlb_all();
+ __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
+ }
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+ }
+}
+
+static inline nv_mm_context_t
+get_mmu_context (struct mm_struct *mm)
+{
+ unsigned long flags;
+ nv_mm_context_t context = mm->context;
+
+ if (likely(context))
+ goto out;
+
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ /* re-check, now that we've got the lock: */
+ context = mm->context;
+ if (context == 0) {
+ cpus_clear(mm->cpu_vm_mask);
+ if (ia64_ctx.next >= ia64_ctx.limit) {
+ ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
+ ia64_ctx.max_ctx, ia64_ctx.next);
+ ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
+ ia64_ctx.max_ctx, ia64_ctx.next);
+ if (ia64_ctx.next >= ia64_ctx.max_ctx)
+ wrap_mmu_context(mm);
+ }
+ mm->context = context = ia64_ctx.next++;
+ __set_bit(context, ia64_ctx.bitmap);
+ }
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+out:
+ /*
+ * Ensure we're not starting to use "context" before any old
+ * uses of it are gone from our TLB.
+ */
+ delayed_tlb_flush();
+
+ return context;
+}
+
+/*
+ * Initialize context number to some sane value. MM is guaranteed to be a
+ * brand-new address-space, so no TLB flushing is needed, ever.
+ */
+static inline int
+init_new_context (struct task_struct *p, struct mm_struct *mm)
+{
+ mm->context = 0;
+ return 0;
+}
+
+static inline void
+destroy_context (struct mm_struct *mm)
+{
+ /* Nothing to do. */
+}
+
+static inline void
+reload_context (nv_mm_context_t context)
+{
+ unsigned long rid;
+ unsigned long rid_incr = 0;
+ unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
+
+ old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
+ rid = context << 3; /* make space for encoding the region number */
+ rid_incr = 1 << 8;
+
+ /* encode the region id, preferred page size, and VHPT enable bit: */
+ rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
+ rr1 = rr0 + 1*rid_incr;
+ rr2 = rr0 + 2*rid_incr;
+ rr3 = rr0 + 3*rid_incr;
+ rr4 = rr0 + 4*rid_incr;
+#ifdef CONFIG_HUGETLB_PAGE
+ rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
+
+# if RGN_HPAGE != 4
+# error "reload_context assumes RGN_HPAGE is 4"
+# endif
+#endif
+
+ ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+}
+
+/*
+ * Must be called with preemption off
+ */
+static inline void
+activate_context (struct mm_struct *mm)
+{
+ nv_mm_context_t context;
+
+ do {
+ context = get_mmu_context(mm);
+ if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
+ cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+ reload_context(context);
+ /*
+ * in the unlikely event of a TLB-flush by another thread,
+ * redo the load.
+ */
+ } while (unlikely(context != mm->context));
+}
+
+#define deactivate_mm(tsk,mm) do { } while (0)
+
+/*
+ * Switch from address space PREV to address space NEXT.
+ */
+static inline void
+activate_mm (struct mm_struct *prev, struct mm_struct *next)
+{
+ /*
+ * We may get interrupts here, but that's OK because interrupt
+ * handlers cannot touch user-space.
+ */
+ ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
+ activate_context(next);
+}
+
+#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
+
+# endif /* ! __ASSEMBLY__ */
+#endif /* _ASM_IA64_MMU_CONTEXT_H */
diff --git a/arch/ia64/include/asm/mmzone.h b/arch/ia64/include/asm/mmzone.h
new file mode 100644
index 000000000000..34efe88eb849
--- /dev/null
+++ b/arch/ia64/include/asm/mmzone.h
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000,2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2002 NEC Corp.
+ * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
+ * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
+ */
+#ifndef _ASM_IA64_MMZONE_H
+#define _ASM_IA64_MMZONE_H
+
+#include <linux/numa.h>
+#include <asm/page.h>
+#include <asm/meminit.h>
+
+#ifdef CONFIG_NUMA
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+#ifdef CONFIG_NUMA
+ extern int paddr_to_nid(unsigned long);
+ int nid = paddr_to_nid(pfn << PAGE_SHIFT);
+ if (nid < 0)
+ return 0;
+ else
+ return nid;
+#else
+ return 0;
+#endif
+}
+
+#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
+extern int early_pfn_to_nid(unsigned long pfn);
+#endif
+
+#ifdef CONFIG_IA64_DIG /* DIG systems are small */
+# define MAX_PHYSNODE_ID 8
+# define NR_NODE_MEMBLKS (MAX_NUMNODES * 8)
+#else /* sn2 is the biggest case, so we use that if !DIG */
+# define MAX_PHYSNODE_ID 2048
+# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
+#endif
+
+#else /* CONFIG_NUMA */
+# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_MMZONE_H */
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
new file mode 100644
index 000000000000..d2da61e4c49b
--- /dev/null
+++ b/arch/ia64/include/asm/module.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_IA64_MODULE_H
+#define _ASM_IA64_MODULE_H
+
+/*
+ * IA-64-specific support for kernel module loader.
+ *
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+struct elf64_shdr; /* forward declration */
+
+struct mod_arch_specific {
+ struct elf64_shdr *core_plt; /* core PLT section */
+ struct elf64_shdr *init_plt; /* init PLT section */
+ struct elf64_shdr *got; /* global offset table */
+ struct elf64_shdr *opd; /* official procedure descriptors */
+ struct elf64_shdr *unwind; /* unwind-table section */
+ unsigned long gp; /* global-pointer for module */
+
+ void *core_unw_table; /* core unwind-table cookie returned by unwinder */
+ void *init_unw_table; /* init unwind-table cookie returned by unwinder */
+ unsigned int next_got_entry; /* index of next available got entry */
+};
+
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Ehdr Elf64_Ehdr
+
+#define MODULE_PROC_FAMILY "ia64"
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY \
+ "gcc-" __stringify(__GNUC__) "." __stringify(__GNUC_MINOR__)
+
+#define ARCH_SHF_SMALL SHF_IA_64_SHORT
+
+#endif /* _ASM_IA64_MODULE_H */
diff --git a/arch/ia64/include/asm/msgbuf.h b/arch/ia64/include/asm/msgbuf.h
new file mode 100644
index 000000000000..6c64c0d2aae1
--- /dev/null
+++ b/arch/ia64/include/asm/msgbuf.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_IA64_MSGBUF_H
+#define _ASM_IA64_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _ASM_IA64_MSGBUF_H */
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
new file mode 100644
index 000000000000..bed73a643a56
--- /dev/null
+++ b/arch/ia64/include/asm/mutex.h
@@ -0,0 +1,92 @@
+/*
+ * ia64 implementation of the mutex fastpath.
+ *
+ * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
+ *
+ */
+
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
+ fail_fn(count);
+}
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
+ return fail_fn(count);
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, then the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ int ret = ia64_fetchadd4_rel(count, 1);
+ if (unlikely(ret < 0))
+ fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (cmpxchg_acq(count, 1, 0) == 1)
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
new file mode 100644
index 000000000000..c8efbf7b849e
--- /dev/null
+++ b/arch/ia64/include/asm/native/inst.h
@@ -0,0 +1,175 @@
+/******************************************************************************
+ * arch/ia64/include/asm/native/inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
+
+#define __paravirt_switch_to ia64_native_switch_to
+#define __paravirt_leave_syscall ia64_native_leave_syscall
+#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
+#define __paravirt_leave_kernel ia64_native_leave_kernel
+#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
+#define __paravirt_work_processed_syscall_target \
+ ia64_work_processed_syscall
+
+#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
+# define PARAVIRT_POISON 0xdeadbeefbaadf00d
+# define CLOBBER(clob) \
+ ;; \
+ movl clob = PARAVIRT_POISON; \
+ ;;
+#else
+# define CLOBBER(clob) /* nothing */
+#endif
+
+#define MOV_FROM_IFA(reg) \
+ mov reg = cr.ifa
+
+#define MOV_FROM_ITIR(reg) \
+ mov reg = cr.itir
+
+#define MOV_FROM_ISR(reg) \
+ mov reg = cr.isr
+
+#define MOV_FROM_IHA(reg) \
+ mov reg = cr.iha
+
+#define MOV_FROM_IPSR(pred, reg) \
+(pred) mov reg = cr.ipsr
+
+#define MOV_FROM_IIM(reg) \
+ mov reg = cr.iim
+
+#define MOV_FROM_IIP(reg) \
+ mov reg = cr.iip
+
+#define MOV_FROM_IVR(reg, clob) \
+ mov reg = cr.ivr \
+ CLOBBER(clob)
+
+#define MOV_FROM_PSR(pred, reg, clob) \
+(pred) mov reg = psr \
+ CLOBBER(clob)
+
+#define MOV_TO_IFA(reg, clob) \
+ mov cr.ifa = reg \
+ CLOBBER(clob)
+
+#define MOV_TO_ITIR(pred, reg, clob) \
+(pred) mov cr.itir = reg \
+ CLOBBER(clob)
+
+#define MOV_TO_IHA(pred, reg, clob) \
+(pred) mov cr.iha = reg \
+ CLOBBER(clob)
+
+#define MOV_TO_IPSR(pred, reg, clob) \
+(pred) mov cr.ipsr = reg \
+ CLOBBER(clob)
+
+#define MOV_TO_IFS(pred, reg, clob) \
+(pred) mov cr.ifs = reg \
+ CLOBBER(clob)
+
+#define MOV_TO_IIP(reg, clob) \
+ mov cr.iip = reg \
+ CLOBBER(clob)
+
+#define MOV_TO_KR(kr, reg, clob0, clob1) \
+ mov IA64_KR(kr) = reg \
+ CLOBBER(clob0) \
+ CLOBBER(clob1)
+
+#define ITC_I(pred, reg, clob) \
+(pred) itc.i reg \
+ CLOBBER(clob)
+
+#define ITC_D(pred, reg, clob) \
+(pred) itc.d reg \
+ CLOBBER(clob)
+
+#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
+(pred_i) itc.i reg; \
+(pred_d) itc.d reg \
+ CLOBBER(clob)
+
+#define THASH(pred, reg0, reg1, clob) \
+(pred) thash reg0 = reg1 \
+ CLOBBER(clob)
+
+#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
+ ssm psr.ic | PSR_DEFAULT_BITS \
+ CLOBBER(clob0) \
+ CLOBBER(clob1) \
+ ;; \
+ srlz.i /* guarantee that interruption collectin is on */ \
+ ;;
+
+#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
+ ssm psr.ic \
+ CLOBBER(clob0) \
+ CLOBBER(clob1) \
+ ;; \
+ srlz.d
+
+#define RSM_PSR_IC(clob) \
+ rsm psr.ic \
+ CLOBBER(clob)
+
+#define SSM_PSR_I(pred, pred_clob, clob) \
+(pred) ssm psr.i \
+ CLOBBER(clob)
+
+#define RSM_PSR_I(pred, clob0, clob1) \
+(pred) rsm psr.i \
+ CLOBBER(clob0) \
+ CLOBBER(clob1)
+
+#define RSM_PSR_I_IC(clob0, clob1, clob2) \
+ rsm psr.i | psr.ic \
+ CLOBBER(clob0) \
+ CLOBBER(clob1) \
+ CLOBBER(clob2)
+
+#define RSM_PSR_DT \
+ rsm psr.dt
+
+#define SSM_PSR_DT_AND_SRLZ_I \
+ ssm psr.dt \
+ ;; \
+ srlz.i
+
+#define BSW_0(clob0, clob1, clob2) \
+ bsw.0 \
+ CLOBBER(clob0) \
+ CLOBBER(clob1) \
+ CLOBBER(clob2)
+
+#define BSW_1(clob0, clob1) \
+ bsw.1 \
+ CLOBBER(clob0) \
+ CLOBBER(clob1)
+
+#define COVER \
+ cover
+
+#define RFI \
+ rfi
diff --git a/arch/ia64/include/asm/native/irq.h b/arch/ia64/include/asm/native/irq.h
new file mode 100644
index 000000000000..887a228e2edb
--- /dev/null
+++ b/arch/ia64/include/asm/native/irq.h
@@ -0,0 +1,33 @@
+/******************************************************************************
+ * arch/ia64/include/asm/native/irq.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_IA64_NATIVE_IRQ_H
+#define _ASM_IA64_NATIVE_IRQ_H
+
+#define NR_VECTORS 256
+
+#if (NR_VECTORS + 32 * NR_CPUS) < 1024
+#define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
+#else
+#define IA64_NATIVE_NR_IRQS 1024
+#endif
+
+#endif /* _ASM_IA64_NATIVE_IRQ_H */
diff --git a/arch/ia64/include/asm/nodedata.h b/arch/ia64/include/asm/nodedata.h
new file mode 100644
index 000000000000..2fb337b0e9b7
--- /dev/null
+++ b/arch/ia64/include/asm/nodedata.h
@@ -0,0 +1,63 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2002 NEC Corp.
+ * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
+ * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
+ */
+#ifndef _ASM_IA64_NODEDATA_H
+#define _ASM_IA64_NODEDATA_H
+
+#include <linux/numa.h>
+
+#include <asm/percpu.h>
+#include <asm/mmzone.h>
+
+#ifdef CONFIG_NUMA
+
+/*
+ * Node Data. One of these structures is located on each node of a NUMA system.
+ */
+
+struct pglist_data;
+struct ia64_node_data {
+ short active_cpu_count;
+ short node;
+ struct pglist_data *pg_data_ptrs[MAX_NUMNODES];
+};
+
+
+/*
+ * Return a pointer to the node_data structure for the executing cpu.
+ */
+#define local_node_data (local_cpu_data->node_data)
+
+/*
+ * Given a node id, return a pointer to the pg_data_t for the node.
+ *
+ * NODE_DATA - should be used in all code not related to system
+ * initialization. It uses pernode data structures to minimize
+ * offnode memory references. However, these structure are not
+ * present during boot. This macro can be used once cpu_init
+ * completes.
+ */
+#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
+
+/*
+ * LOCAL_DATA_ADDR - This is to calculate the address of other node's
+ * "local_node_data" at hot-plug phase. The local_node_data
+ * is pointed by per_cpu_page. Kernel usually use it for
+ * just executing cpu. However, when new node is hot-added,
+ * the addresses of local data for other nodes are necessary
+ * to update all of them.
+ */
+#define LOCAL_DATA_ADDR(pgdat) \
+ ((struct ia64_node_data *)((u64)(pgdat) + \
+ L1_CACHE_ALIGN(sizeof(struct pglist_data))))
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_NODEDATA_H */
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
new file mode 100644
index 000000000000..3499ff57bf42
--- /dev/null
+++ b/arch/ia64/include/asm/numa.h
@@ -0,0 +1,82 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file contains NUMA specific prototypes and definitions.
+ *
+ * 2002/08/05 Erich Focht <efocht@ess.nec.de>
+ *
+ */
+#ifndef _ASM_IA64_NUMA_H
+#define _ASM_IA64_NUMA_H
+
+
+#ifdef CONFIG_NUMA
+
+#include <linux/cache.h>
+#include <linux/cpumask.h>
+#include <linux/numa.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+
+#include <asm/mmzone.h>
+
+#define NUMA_NO_NODE -1
+
+extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
+extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
+extern pg_data_t *pgdat_list[MAX_NUMNODES];
+
+/* Stuff below this line could be architecture independent */
+
+extern int num_node_memblks; /* total number of memory chunks */
+
+/*
+ * List of node memory chunks. Filled when parsing SRAT table to
+ * obtain information about memory nodes.
+*/
+
+struct node_memblk_s {
+ unsigned long start_paddr;
+ unsigned long size;
+ int nid; /* which logical node contains this chunk? */
+ int bank; /* which mem bank on this node */
+};
+
+struct node_cpuid_s {
+ u16 phys_id; /* id << 8 | eid */
+ int nid; /* logical node containing this CPU */
+};
+
+extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
+extern struct node_cpuid_s node_cpuid[NR_CPUS];
+
+/*
+ * ACPI 2.0 SLIT (System Locality Information Table)
+ * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
+ *
+ * This is a matrix with "distances" between nodes, they should be
+ * proportional to the memory access latency ratios.
+ */
+
+extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
+#define node_distance(from,to) (numa_slit[(from) * num_online_nodes() + (to)])
+
+extern int paddr_to_nid(unsigned long paddr);
+
+#define local_nodeid (cpu_to_node_map[smp_processor_id()])
+
+extern void map_cpu_to_node(int cpu, int nid);
+extern void unmap_cpu_from_node(int cpu, int nid);
+
+
+#else /* !CONFIG_NUMA */
+#define map_cpu_to_node(cpu, nid) do{}while(0)
+#define unmap_cpu_from_node(cpu, nid) do{}while(0)
+
+#define paddr_to_nid(addr) 0
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _ASM_IA64_NUMA_H */
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
new file mode 100644
index 000000000000..5f271bc712ee
--- /dev/null
+++ b/arch/ia64/include/asm/page.h
@@ -0,0 +1,223 @@
+#ifndef _ASM_IA64_PAGE_H
+#define _ASM_IA64_PAGE_H
+/*
+ * Pagetable related stuff.
+ *
+ * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/intrinsics.h>
+#include <asm/types.h>
+
+/*
+ * The top three bits of an IA64 address are its Region Number.
+ * Different regions are assigned to different purposes.
+ */
+#define RGN_SHIFT (61)
+#define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
+#define RGN_BITS (RGN_BASE(-1))
+
+#define RGN_KERNEL 7 /* Identity mapped region */
+#define RGN_UNCACHED 6 /* Identity mapped I/O region */
+#define RGN_GATE 5 /* Gate page, Kernel text, etc */
+#define RGN_HPAGE 4 /* For Huge TLB pages */
+
+/*
+ * PAGE_SHIFT determines the actual kernel page size.
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define PAGE_SHIFT 12
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define PAGE_SHIFT 13
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define PAGE_SHIFT 14
+#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
+# define PAGE_SHIFT 16
+#else
+# error Unsupported page size!
+#endif
+
+#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
+#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
+
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
+# define HPAGE_SHIFT hpage_shift
+# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
+# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
+# define HPAGE_MASK (~(HPAGE_SIZE - 1))
+
+# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#ifdef __ASSEMBLY__
+# define __pa(x) ((x) - PAGE_OFFSET)
+# define __va(x) ((x) + PAGE_OFFSET)
+#else /* !__ASSEMBLY */
+# define STRICT_MM_TYPECHECKS
+
+extern void clear_page (void *page);
+extern void copy_page (void *to, void *from);
+
+/*
+ * clear_user_page() and copy_user_page() can't be inline functions because
+ * flush_dcache_page() can't be defined until later...
+ */
+#define clear_user_page(addr, vaddr, page) \
+do { \
+ clear_page(addr); \
+ flush_dcache_page(page); \
+} while (0)
+
+#define copy_user_page(to, from, vaddr, page) \
+do { \
+ copy_page((to), (from)); \
+ flush_dcache_page(page); \
+} while (0)
+
+
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+({ \
+ struct page *page = alloc_page_vma( \
+ GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
+ if (page) \
+ flush_dcache_page(page); \
+ page; \
+})
+
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+extern int ia64_pfn_valid (unsigned long pfn);
+#else
+# define ia64_pfn_valid(pfn) 1
+#endif
+
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+extern struct page *vmem_map;
+#ifdef CONFIG_DISCONTIGMEM
+# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
+# define pfn_to_page(pfn) (vmem_map + (pfn))
+#else
+# include <asm-generic/memory_model.h>
+#endif
+#else
+# include <asm-generic/memory_model.h>
+#endif
+
+#ifdef CONFIG_FLATMEM
+# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
+#elif defined(CONFIG_DISCONTIGMEM)
+extern unsigned long min_low_pfn;
+extern unsigned long max_low_pfn;
+# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
+#endif
+
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+typedef union ia64_va {
+ struct {
+ unsigned long off : 61; /* intra-region offset */
+ unsigned long reg : 3; /* region number */
+ } f;
+ unsigned long l;
+ void *p;
+} ia64_va;
+
+/*
+ * Note: These macros depend on the fact that PAGE_OFFSET has all
+ * region bits set to 1 and all other bits set to zero. They are
+ * expressed in this way to ensure they result in a single "dep"
+ * instruction.
+ */
+#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
+#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
+
+#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
+#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
+
+#ifdef CONFIG_HUGETLB_PAGE
+# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
+ | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
+# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+extern unsigned int hpage_shift;
+#endif
+
+static __inline__ int
+get_order (unsigned long size)
+{
+ long double d = size - 1;
+ long order;
+
+ order = ia64_getf_exp(d);
+ order = order - PAGE_SHIFT - 0xffff + 1;
+ if (order < 0)
+ order = 0;
+ return order;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef STRICT_MM_TYPECHECKS
+ /*
+ * These are used to make use of C type-checking..
+ */
+ typedef struct { unsigned long pte; } pte_t;
+ typedef struct { unsigned long pmd; } pmd_t;
+#ifdef CONFIG_PGTABLE_4
+ typedef struct { unsigned long pud; } pud_t;
+#endif
+ typedef struct { unsigned long pgd; } pgd_t;
+ typedef struct { unsigned long pgprot; } pgprot_t;
+ typedef struct page *pgtable_t;
+
+# define pte_val(x) ((x).pte)
+# define pmd_val(x) ((x).pmd)
+#ifdef CONFIG_PGTABLE_4
+# define pud_val(x) ((x).pud)
+#endif
+# define pgd_val(x) ((x).pgd)
+# define pgprot_val(x) ((x).pgprot)
+
+# define __pte(x) ((pte_t) { (x) } )
+# define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else /* !STRICT_MM_TYPECHECKS */
+ /*
+ * .. while these make it easier on the compiler
+ */
+# ifndef __ASSEMBLY__
+ typedef unsigned long pte_t;
+ typedef unsigned long pmd_t;
+ typedef unsigned long pgd_t;
+ typedef unsigned long pgprot_t;
+ typedef struct page *pgtable_t;
+# endif
+
+# define pte_val(x) (x)
+# define pmd_val(x) (x)
+# define pgd_val(x) (x)
+# define pgprot_val(x) (x)
+
+# define __pte(x) (x)
+# define __pgd(x) (x)
+# define __pgprot(x) (x)
+#endif /* !STRICT_MM_TYPECHECKS */
+
+#define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
+ (((current->personality & READ_IMPLIES_EXEC) != 0) \
+ ? VM_EXEC : 0))
+
+#endif /* _ASM_IA64_PAGE_H */
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
new file mode 100644
index 000000000000..67b02901ead4
--- /dev/null
+++ b/arch/ia64/include/asm/pal.h
@@ -0,0 +1,1827 @@
+#ifndef _ASM_IA64_PAL_H
+#define _ASM_IA64_PAL_H
+
+/*
+ * Processor Abstraction Layer definitions.
+ *
+ * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0
+ * chapter 11 IA-64 Processor Abstraction Layer
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ * Copyright (C) 2008 Silicon Graphics, Inc. (SGI)
+ *
+ * 99/10/01 davidm Make sure we pass zero for reserved parameters.
+ * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6.
+ * 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info
+ * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added
+ * 00/05/25 eranian Support for stack calls, and static physical calls
+ * 00/06/18 eranian Support for stacked physical calls
+ * 06/10/26 rja Support for Intel Itanium Architecture Software Developer's
+ * Manual Rev 2.2 (Jan 2006)
+ */
+
+/*
+ * Note that some of these calls use a static-register only calling
+ * convention which has nothing to do with the regular calling
+ * convention.
+ */
+#define PAL_CACHE_FLUSH 1 /* flush i/d cache */
+#define PAL_CACHE_INFO 2 /* get detailed i/d cache info */
+#define PAL_CACHE_INIT 3 /* initialize i/d cache */
+#define PAL_CACHE_SUMMARY 4 /* get summary of cache hierarchy */
+#define PAL_MEM_ATTRIB 5 /* list supported memory attributes */
+#define PAL_PTCE_INFO 6 /* purge TLB info */
+#define PAL_VM_INFO 7 /* return supported virtual memory features */
+#define PAL_VM_SUMMARY 8 /* return summary on supported vm features */
+#define PAL_BUS_GET_FEATURES 9 /* return processor bus interface features settings */
+#define PAL_BUS_SET_FEATURES 10 /* set processor bus features */
+#define PAL_DEBUG_INFO 11 /* get number of debug registers */
+#define PAL_FIXED_ADDR 12 /* get fixed component of processors's directed address */
+#define PAL_FREQ_BASE 13 /* base frequency of the platform */
+#define PAL_FREQ_RATIOS 14 /* ratio of processor, bus and ITC frequency */
+#define PAL_PERF_MON_INFO 15 /* return performance monitor info */
+#define PAL_PLATFORM_ADDR 16 /* set processor interrupt block and IO port space addr */
+#define PAL_PROC_GET_FEATURES 17 /* get configurable processor features & settings */
+#define PAL_PROC_SET_FEATURES 18 /* enable/disable configurable processor features */
+#define PAL_RSE_INFO 19 /* return rse information */
+#define PAL_VERSION 20 /* return version of PAL code */
+#define PAL_MC_CLEAR_LOG 21 /* clear all processor log info */
+#define PAL_MC_DRAIN 22 /* drain operations which could result in an MCA */
+#define PAL_MC_EXPECTED 23 /* set/reset expected MCA indicator */
+#define PAL_MC_DYNAMIC_STATE 24 /* get processor dynamic state */
+#define PAL_MC_ERROR_INFO 25 /* get processor MCA info and static state */
+#define PAL_MC_RESUME 26 /* Return to interrupted process */
+#define PAL_MC_REGISTER_MEM 27 /* Register memory for PAL to use during MCAs and inits */
+#define PAL_HALT 28 /* enter the low power HALT state */
+#define PAL_HALT_LIGHT 29 /* enter the low power light halt state*/
+#define PAL_COPY_INFO 30 /* returns info needed to relocate PAL */
+#define PAL_CACHE_LINE_INIT 31 /* init tags & data of cache line */
+#define PAL_PMI_ENTRYPOINT 32 /* register PMI memory entry points with the processor */
+#define PAL_ENTER_IA_32_ENV 33 /* enter IA-32 system environment */
+#define PAL_VM_PAGE_SIZE 34 /* return vm TC and page walker page sizes */
+
+#define PAL_MEM_FOR_TEST 37 /* get amount of memory needed for late processor test */
+#define PAL_CACHE_PROT_INFO 38 /* get i/d cache protection info */
+#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/
+#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
+#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
+#define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */
+#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
+#define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */
+#define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */
+#define PAL_VP_INFO 50 /* Information about virtual processor features */
+#define PAL_MC_HW_TRACKING 51 /* Hardware tracking status */
+
+#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
+#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
+#define PAL_TEST_PROC 258 /* perform late processor self-test */
+#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */
+#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */
+#define PAL_VM_TR_READ 261 /* read contents of translation register */
+#define PAL_GET_PSTATE 262 /* get the current P-state */
+#define PAL_SET_PSTATE 263 /* set the P-state */
+#define PAL_BRAND_INFO 274 /* Processor branding information */
+
+#define PAL_GET_PSTATE_TYPE_LASTSET 0
+#define PAL_GET_PSTATE_TYPE_AVGANDRESET 1
+#define PAL_GET_PSTATE_TYPE_AVGNORESET 2
+#define PAL_GET_PSTATE_TYPE_INSTANT 3
+
+#define PAL_MC_ERROR_INJECT 276 /* Injects processor error or returns injection capabilities */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/fpu.h>
+
+/*
+ * Data types needed to pass information into PAL procedures and
+ * interpret information returned by them.
+ */
+
+/* Return status from the PAL procedure */
+typedef s64 pal_status_t;
+
+#define PAL_STATUS_SUCCESS 0 /* No error */
+#define PAL_STATUS_UNIMPLEMENTED (-1) /* Unimplemented procedure */
+#define PAL_STATUS_EINVAL (-2) /* Invalid argument */
+#define PAL_STATUS_ERROR (-3) /* Error */
+#define PAL_STATUS_CACHE_INIT_FAIL (-4) /* Could not initialize the
+ * specified level and type of
+ * cache without sideeffects
+ * and "restrict" was 1
+ */
+#define PAL_STATUS_REQUIRES_MEMORY (-9) /* Call requires PAL memory buffer */
+
+/* Processor cache level in the hierarchy */
+typedef u64 pal_cache_level_t;
+#define PAL_CACHE_LEVEL_L0 0 /* L0 */
+#define PAL_CACHE_LEVEL_L1 1 /* L1 */
+#define PAL_CACHE_LEVEL_L2 2 /* L2 */
+
+
+/* Processor cache type at a particular level in the hierarchy */
+
+typedef u64 pal_cache_type_t;
+#define PAL_CACHE_TYPE_INSTRUCTION 1 /* Instruction cache */
+#define PAL_CACHE_TYPE_DATA 2 /* Data or unified cache */
+#define PAL_CACHE_TYPE_INSTRUCTION_DATA 3 /* Both Data & Instruction */
+
+
+#define PAL_CACHE_FLUSH_INVALIDATE 1 /* Invalidate clean lines */
+#define PAL_CACHE_FLUSH_CHK_INTRS 2 /* check for interrupts/mc while flushing */
+
+/* Processor cache line size in bytes */
+typedef int pal_cache_line_size_t;
+
+/* Processor cache line state */
+typedef u64 pal_cache_line_state_t;
+#define PAL_CACHE_LINE_STATE_INVALID 0 /* Invalid */
+#define PAL_CACHE_LINE_STATE_SHARED 1 /* Shared */
+#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2 /* Exclusive */
+#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */
+
+typedef struct pal_freq_ratio {
+ u32 den, num; /* numerator & denominator */
+} itc_ratio, proc_ratio;
+
+typedef union pal_cache_config_info_1_s {
+ struct {
+ u64 u : 1, /* 0 Unified cache ? */
+ at : 2, /* 2-1 Cache mem attr*/
+ reserved : 5, /* 7-3 Reserved */
+ associativity : 8, /* 16-8 Associativity*/
+ line_size : 8, /* 23-17 Line size */
+ stride : 8, /* 31-24 Stride */
+ store_latency : 8, /*39-32 Store latency*/
+ load_latency : 8, /* 47-40 Load latency*/
+ store_hints : 8, /* 55-48 Store hints*/
+ load_hints : 8; /* 63-56 Load hints */
+ } pcci1_bits;
+ u64 pcci1_data;
+} pal_cache_config_info_1_t;
+
+typedef union pal_cache_config_info_2_s {
+ struct {
+ u32 cache_size; /*cache size in bytes*/
+
+
+ u32 alias_boundary : 8, /* 39-32 aliased addr
+ * separation for max
+ * performance.
+ */
+ tag_ls_bit : 8, /* 47-40 LSb of addr*/
+ tag_ms_bit : 8, /* 55-48 MSb of addr*/
+ reserved : 8; /* 63-56 Reserved */
+ } pcci2_bits;
+ u64 pcci2_data;
+} pal_cache_config_info_2_t;
+
+
+typedef struct pal_cache_config_info_s {
+ pal_status_t pcci_status;
+ pal_cache_config_info_1_t pcci_info_1;
+ pal_cache_config_info_2_t pcci_info_2;
+ u64 pcci_reserved;
+} pal_cache_config_info_t;
+
+#define pcci_ld_hints pcci_info_1.pcci1_bits.load_hints
+#define pcci_st_hints pcci_info_1.pcci1_bits.store_hints
+#define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency
+#define pcci_st_latency pcci_info_1.pcci1_bits.store_latency
+#define pcci_stride pcci_info_1.pcci1_bits.stride
+#define pcci_line_size pcci_info_1.pcci1_bits.line_size
+#define pcci_assoc pcci_info_1.pcci1_bits.associativity
+#define pcci_cache_attr pcci_info_1.pcci1_bits.at
+#define pcci_unified pcci_info_1.pcci1_bits.u
+#define pcci_tag_msb pcci_info_2.pcci2_bits.tag_ms_bit
+#define pcci_tag_lsb pcci_info_2.pcci2_bits.tag_ls_bit
+#define pcci_alias_boundary pcci_info_2.pcci2_bits.alias_boundary
+#define pcci_cache_size pcci_info_2.pcci2_bits.cache_size
+
+
+
+/* Possible values for cache attributes */
+
+#define PAL_CACHE_ATTR_WT 0 /* Write through cache */
+#define PAL_CACHE_ATTR_WB 1 /* Write back cache */
+#define PAL_CACHE_ATTR_WT_OR_WB 2 /* Either write thru or write
+ * back depending on TLB
+ * memory attributes
+ */
+
+
+/* Possible values for cache hints */
+
+#define PAL_CACHE_HINT_TEMP_1 0 /* Temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_1 1 /* Non-temporal level 1 */
+#define PAL_CACHE_HINT_NTEMP_ALL 3 /* Non-temporal all levels */
+
+/* Processor cache protection information */
+typedef union pal_cache_protection_element_u {
+ u32 pcpi_data;
+ struct {
+ u32 data_bits : 8, /* # data bits covered by
+ * each unit of protection
+ */
+
+ tagprot_lsb : 6, /* Least -do- */
+ tagprot_msb : 6, /* Most Sig. tag address
+ * bit that this
+ * protection covers.
+ */
+ prot_bits : 6, /* # of protection bits */
+ method : 4, /* Protection method */
+ t_d : 2; /* Indicates which part
+ * of the cache this
+ * protection encoding
+ * applies.
+ */
+ } pcp_info;
+} pal_cache_protection_element_t;
+
+#define pcpi_cache_prot_part pcp_info.t_d
+#define pcpi_prot_method pcp_info.method
+#define pcpi_prot_bits pcp_info.prot_bits
+#define pcpi_tagprot_msb pcp_info.tagprot_msb
+#define pcpi_tagprot_lsb pcp_info.tagprot_lsb
+#define pcpi_data_bits pcp_info.data_bits
+
+/* Processor cache part encodings */
+#define PAL_CACHE_PROT_PART_DATA 0 /* Data protection */
+#define PAL_CACHE_PROT_PART_TAG 1 /* Tag protection */
+#define PAL_CACHE_PROT_PART_TAG_DATA 2 /* Tag+data protection (tag is
+ * more significant )
+ */
+#define PAL_CACHE_PROT_PART_DATA_TAG 3 /* Data+tag protection (data is
+ * more significant )
+ */
+#define PAL_CACHE_PROT_PART_MAX 6
+
+
+typedef struct pal_cache_protection_info_s {
+ pal_status_t pcpi_status;
+ pal_cache_protection_element_t pcp_info[PAL_CACHE_PROT_PART_MAX];
+} pal_cache_protection_info_t;
+
+
+/* Processor cache protection method encodings */
+#define PAL_CACHE_PROT_METHOD_NONE 0 /* No protection */
+#define PAL_CACHE_PROT_METHOD_ODD_PARITY 1 /* Odd parity */
+#define PAL_CACHE_PROT_METHOD_EVEN_PARITY 2 /* Even parity */
+#define PAL_CACHE_PROT_METHOD_ECC 3 /* ECC protection */
+
+
+/* Processor cache line identification in the hierarchy */
+typedef union pal_cache_line_id_u {
+ u64 pclid_data;
+ struct {
+ u64 cache_type : 8, /* 7-0 cache type */
+ level : 8, /* 15-8 level of the
+ * cache in the
+ * hierarchy.
+ */
+ way : 8, /* 23-16 way in the set
+ */
+ part : 8, /* 31-24 part of the
+ * cache
+ */
+ reserved : 32; /* 63-32 is reserved*/
+ } pclid_info_read;
+ struct {
+ u64 cache_type : 8, /* 7-0 cache type */
+ level : 8, /* 15-8 level of the
+ * cache in the
+ * hierarchy.
+ */
+ way : 8, /* 23-16 way in the set
+ */
+ part : 8, /* 31-24 part of the
+ * cache
+ */
+ mesi : 8, /* 39-32 cache line
+ * state
+ */
+ start : 8, /* 47-40 lsb of data to
+ * invert
+ */
+ length : 8, /* 55-48 #bits to
+ * invert
+ */
+ trigger : 8; /* 63-56 Trigger error
+ * by doing a load
+ * after the write
+ */
+
+ } pclid_info_write;
+} pal_cache_line_id_u_t;
+
+#define pclid_read_part pclid_info_read.part
+#define pclid_read_way pclid_info_read.way
+#define pclid_read_level pclid_info_read.level
+#define pclid_read_cache_type pclid_info_read.cache_type
+
+#define pclid_write_trigger pclid_info_write.trigger
+#define pclid_write_length pclid_info_write.length
+#define pclid_write_start pclid_info_write.start
+#define pclid_write_mesi pclid_info_write.mesi
+#define pclid_write_part pclid_info_write.part
+#define pclid_write_way pclid_info_write.way
+#define pclid_write_level pclid_info_write.level
+#define pclid_write_cache_type pclid_info_write.cache_type
+
+/* Processor cache line part encodings */
+#define PAL_CACHE_LINE_ID_PART_DATA 0 /* Data */
+#define PAL_CACHE_LINE_ID_PART_TAG 1 /* Tag */
+#define PAL_CACHE_LINE_ID_PART_DATA_PROT 2 /* Data protection */
+#define PAL_CACHE_LINE_ID_PART_TAG_PROT 3 /* Tag protection */
+#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT 4 /* Data+tag
+ * protection
+ */
+typedef struct pal_cache_line_info_s {
+ pal_status_t pcli_status; /* Return status of the read cache line
+ * info call.
+ */
+ u64 pcli_data; /* 64-bit data, tag, protection bits .. */
+ u64 pcli_data_len; /* data length in bits */
+ pal_cache_line_state_t pcli_cache_line_state; /* mesi state */
+
+} pal_cache_line_info_t;
+
+
+/* Machine Check related crap */
+
+/* Pending event status bits */
+typedef u64 pal_mc_pending_events_t;
+
+#define PAL_MC_PENDING_MCA (1 << 0)
+#define PAL_MC_PENDING_INIT (1 << 1)
+
+/* Error information type */
+typedef u64 pal_mc_info_index_t;
+
+#define PAL_MC_INFO_PROCESSOR 0 /* Processor */
+#define PAL_MC_INFO_CACHE_CHECK 1 /* Cache check */
+#define PAL_MC_INFO_TLB_CHECK 2 /* Tlb check */
+#define PAL_MC_INFO_BUS_CHECK 3 /* Bus check */
+#define PAL_MC_INFO_REQ_ADDR 4 /* Requestor address */
+#define PAL_MC_INFO_RESP_ADDR 5 /* Responder address */
+#define PAL_MC_INFO_TARGET_ADDR 6 /* Target address */
+#define PAL_MC_INFO_IMPL_DEP 7 /* Implementation
+ * dependent
+ */
+
+#define PAL_TLB_CHECK_OP_PURGE 8
+
+typedef struct pal_process_state_info_s {
+ u64 reserved1 : 2,
+ rz : 1, /* PAL_CHECK processor
+ * rendezvous
+ * successful.
+ */
+
+ ra : 1, /* PAL_CHECK attempted
+ * a rendezvous.
+ */
+ me : 1, /* Distinct multiple
+ * errors occurred
+ */
+
+ mn : 1, /* Min. state save
+ * area has been
+ * registered with PAL
+ */
+
+ sy : 1, /* Storage integrity
+ * synched
+ */
+
+
+ co : 1, /* Continuable */
+ ci : 1, /* MC isolated */
+ us : 1, /* Uncontained storage
+ * damage.
+ */
+
+
+ hd : 1, /* Non-essential hw
+ * lost (no loss of
+ * functionality)
+ * causing the
+ * processor to run in
+ * degraded mode.
+ */
+
+ tl : 1, /* 1 => MC occurred
+ * after an instr was
+ * executed but before
+ * the trap that
+ * resulted from instr
+ * execution was
+ * generated.
+ * (Trap Lost )
+ */
+ mi : 1, /* More information available
+ * call PAL_MC_ERROR_INFO
+ */
+ pi : 1, /* Precise instruction pointer */
+ pm : 1, /* Precise min-state save area */
+
+ dy : 1, /* Processor dynamic
+ * state valid
+ */
+
+
+ in : 1, /* 0 = MC, 1 = INIT */
+ rs : 1, /* RSE valid */
+ cm : 1, /* MC corrected */
+ ex : 1, /* MC is expected */
+ cr : 1, /* Control regs valid*/
+ pc : 1, /* Perf cntrs valid */
+ dr : 1, /* Debug regs valid */
+ tr : 1, /* Translation regs
+ * valid
+ */
+ rr : 1, /* Region regs valid */
+ ar : 1, /* App regs valid */
+ br : 1, /* Branch regs valid */
+ pr : 1, /* Predicate registers
+ * valid
+ */
+
+ fp : 1, /* fp registers valid*/
+ b1 : 1, /* Preserved bank one
+ * general registers
+ * are valid
+ */
+ b0 : 1, /* Preserved bank zero
+ * general registers
+ * are valid
+ */
+ gr : 1, /* General registers
+ * are valid
+ * (excl. banked regs)
+ */
+ dsize : 16, /* size of dynamic
+ * state returned
+ * by the processor
+ */
+
+ se : 1, /* Shared error. MCA in a
+ shared structure */
+ reserved2 : 10,
+ cc : 1, /* Cache check */
+ tc : 1, /* TLB check */
+ bc : 1, /* Bus check */
+ rc : 1, /* Register file check */
+ uc : 1; /* Uarch check */
+
+} pal_processor_state_info_t;
+
+typedef struct pal_cache_check_info_s {
+ u64 op : 4, /* Type of cache
+ * operation that
+ * caused the machine
+ * check.
+ */
+ level : 2, /* Cache level */
+ reserved1 : 2,
+ dl : 1, /* Failure in data part
+ * of cache line
+ */
+ tl : 1, /* Failure in tag part
+ * of cache line
+ */
+ dc : 1, /* Failure in dcache */
+ ic : 1, /* Failure in icache */
+ mesi : 3, /* Cache line state */
+ mv : 1, /* mesi valid */
+ way : 5, /* Way in which the
+ * error occurred
+ */
+ wiv : 1, /* Way field valid */
+ reserved2 : 1,
+ dp : 1, /* Data poisoned on MBE */
+ reserved3 : 6,
+ hlth : 2, /* Health indicator */
+
+ index : 20, /* Cache line index */
+ reserved4 : 2,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+ rq : 1, /* Requester identifier
+ * structure is valid
+ */
+ rp : 1, /* Responder identifier
+ * structure is valid
+ */
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_cache_check_info_t;
+
+typedef struct pal_tlb_check_info_s {
+
+ u64 tr_slot : 8, /* Slot# of TR where
+ * error occurred
+ */
+ trv : 1, /* tr_slot field is valid */
+ reserved1 : 1,
+ level : 2, /* TLB level where failure occurred */
+ reserved2 : 4,
+ dtr : 1, /* Fail in data TR */
+ itr : 1, /* Fail in inst TR */
+ dtc : 1, /* Fail in data TC */
+ itc : 1, /* Fail in inst. TC */
+ op : 4, /* Cache operation */
+ reserved3 : 6,
+ hlth : 2, /* Health indicator */
+ reserved4 : 22,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+ rq : 1, /* Requester identifier
+ * structure is valid
+ */
+ rp : 1, /* Responder identifier
+ * structure is valid
+ */
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_tlb_check_info_t;
+
+typedef struct pal_bus_check_info_s {
+ u64 size : 5, /* Xaction size */
+ ib : 1, /* Internal bus error */
+ eb : 1, /* External bus error */
+ cc : 1, /* Error occurred
+ * during cache-cache
+ * transfer.
+ */
+ type : 8, /* Bus xaction type*/
+ sev : 5, /* Bus error severity*/
+ hier : 2, /* Bus hierarchy level */
+ dp : 1, /* Data poisoned on MBE */
+ bsi : 8, /* Bus error status
+ * info
+ */
+ reserved2 : 22,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+ rq : 1, /* Requester identifier
+ * structure is valid
+ */
+ rp : 1, /* Responder identifier
+ * structure is valid
+ */
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_bus_check_info_t;
+
+typedef struct pal_reg_file_check_info_s {
+ u64 id : 4, /* Register file identifier */
+ op : 4, /* Type of register
+ * operation that
+ * caused the machine
+ * check.
+ */
+ reg_num : 7, /* Register number */
+ rnv : 1, /* reg_num valid */
+ reserved2 : 38,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ reserved3 : 3,
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_reg_file_check_info_t;
+
+typedef struct pal_uarch_check_info_s {
+ u64 sid : 5, /* Structure identification */
+ level : 3, /* Level of failure */
+ array_id : 4, /* Array identification */
+ op : 4, /* Type of
+ * operation that
+ * caused the machine
+ * check.
+ */
+ way : 6, /* Way of structure */
+ wv : 1, /* way valid */
+ xv : 1, /* index valid */
+ reserved1 : 6,
+ hlth : 2, /* Health indicator */
+ index : 8, /* Index or set of the uarch
+ * structure that failed.
+ */
+ reserved2 : 24,
+
+ is : 1, /* instruction set (1 == ia32) */
+ iv : 1, /* instruction set field valid */
+ pl : 2, /* privilege level */
+ pv : 1, /* privilege level field valid */
+ mcc : 1, /* Machine check corrected */
+ tv : 1, /* Target address
+ * structure is valid
+ */
+ rq : 1, /* Requester identifier
+ * structure is valid
+ */
+ rp : 1, /* Responder identifier
+ * structure is valid
+ */
+ pi : 1; /* Precise instruction pointer
+ * structure is valid
+ */
+} pal_uarch_check_info_t;
+
+typedef union pal_mc_error_info_u {
+ u64 pmei_data;
+ pal_processor_state_info_t pme_processor;
+ pal_cache_check_info_t pme_cache;
+ pal_tlb_check_info_t pme_tlb;
+ pal_bus_check_info_t pme_bus;
+ pal_reg_file_check_info_t pme_reg_file;
+ pal_uarch_check_info_t pme_uarch;
+} pal_mc_error_info_t;
+
+#define pmci_proc_unknown_check pme_processor.uc
+#define pmci_proc_bus_check pme_processor.bc
+#define pmci_proc_tlb_check pme_processor.tc
+#define pmci_proc_cache_check pme_processor.cc
+#define pmci_proc_dynamic_state_size pme_processor.dsize
+#define pmci_proc_gpr_valid pme_processor.gr
+#define pmci_proc_preserved_bank0_gpr_valid pme_processor.b0
+#define pmci_proc_preserved_bank1_gpr_valid pme_processor.b1
+#define pmci_proc_fp_valid pme_processor.fp
+#define pmci_proc_predicate_regs_valid pme_processor.pr
+#define pmci_proc_branch_regs_valid pme_processor.br
+#define pmci_proc_app_regs_valid pme_processor.ar
+#define pmci_proc_region_regs_valid pme_processor.rr
+#define pmci_proc_translation_regs_valid pme_processor.tr
+#define pmci_proc_debug_regs_valid pme_processor.dr
+#define pmci_proc_perf_counters_valid pme_processor.pc
+#define pmci_proc_control_regs_valid pme_processor.cr
+#define pmci_proc_machine_check_expected pme_processor.ex
+#define pmci_proc_machine_check_corrected pme_processor.cm
+#define pmci_proc_rse_valid pme_processor.rs
+#define pmci_proc_machine_check_or_init pme_processor.in
+#define pmci_proc_dynamic_state_valid pme_processor.dy
+#define pmci_proc_operation pme_processor.op
+#define pmci_proc_trap_lost pme_processor.tl
+#define pmci_proc_hardware_damage pme_processor.hd
+#define pmci_proc_uncontained_storage_damage pme_processor.us
+#define pmci_proc_machine_check_isolated pme_processor.ci
+#define pmci_proc_continuable pme_processor.co
+#define pmci_proc_storage_intergrity_synced pme_processor.sy
+#define pmci_proc_min_state_save_area_regd pme_processor.mn
+#define pmci_proc_distinct_multiple_errors pme_processor.me
+#define pmci_proc_pal_attempted_rendezvous pme_processor.ra
+#define pmci_proc_pal_rendezvous_complete pme_processor.rz
+
+
+#define pmci_cache_level pme_cache.level
+#define pmci_cache_line_state pme_cache.mesi
+#define pmci_cache_line_state_valid pme_cache.mv
+#define pmci_cache_line_index pme_cache.index
+#define pmci_cache_instr_cache_fail pme_cache.ic
+#define pmci_cache_data_cache_fail pme_cache.dc
+#define pmci_cache_line_tag_fail pme_cache.tl
+#define pmci_cache_line_data_fail pme_cache.dl
+#define pmci_cache_operation pme_cache.op
+#define pmci_cache_way_valid pme_cache.wv
+#define pmci_cache_target_address_valid pme_cache.tv
+#define pmci_cache_way pme_cache.way
+#define pmci_cache_mc pme_cache.mc
+
+#define pmci_tlb_instr_translation_cache_fail pme_tlb.itc
+#define pmci_tlb_data_translation_cache_fail pme_tlb.dtc
+#define pmci_tlb_instr_translation_reg_fail pme_tlb.itr
+#define pmci_tlb_data_translation_reg_fail pme_tlb.dtr
+#define pmci_tlb_translation_reg_slot pme_tlb.tr_slot
+#define pmci_tlb_mc pme_tlb.mc
+
+#define pmci_bus_status_info pme_bus.bsi
+#define pmci_bus_req_address_valid pme_bus.rq
+#define pmci_bus_resp_address_valid pme_bus.rp
+#define pmci_bus_target_address_valid pme_bus.tv
+#define pmci_bus_error_severity pme_bus.sev
+#define pmci_bus_transaction_type pme_bus.type
+#define pmci_bus_cache_cache_transfer pme_bus.cc
+#define pmci_bus_transaction_size pme_bus.size
+#define pmci_bus_internal_error pme_bus.ib
+#define pmci_bus_external_error pme_bus.eb
+#define pmci_bus_mc pme_bus.mc
+
+/*
+ * NOTE: this min_state_save area struct only includes the 1KB
+ * architectural state save area. The other 3 KB is scratch space
+ * for PAL.
+ */
+
+typedef struct pal_min_state_area_s {
+ u64 pmsa_nat_bits; /* nat bits for saved GRs */
+ u64 pmsa_gr[15]; /* GR1 - GR15 */
+ u64 pmsa_bank0_gr[16]; /* GR16 - GR31 */
+ u64 pmsa_bank1_gr[16]; /* GR16 - GR31 */
+ u64 pmsa_pr; /* predicate registers */
+ u64 pmsa_br0; /* branch register 0 */
+ u64 pmsa_rsc; /* ar.rsc */
+ u64 pmsa_iip; /* cr.iip */
+ u64 pmsa_ipsr; /* cr.ipsr */
+ u64 pmsa_ifs; /* cr.ifs */
+ u64 pmsa_xip; /* previous iip */
+ u64 pmsa_xpsr; /* previous psr */
+ u64 pmsa_xfs; /* previous ifs */
+ u64 pmsa_br1; /* branch register 1 */
+ u64 pmsa_reserved[70]; /* pal_min_state_area should total to 1KB */
+} pal_min_state_area_t;
+
+
+struct ia64_pal_retval {
+ /*
+ * A zero status value indicates call completed without error.
+ * A negative status value indicates reason of call failure.
+ * A positive status value indicates success but an
+ * informational value should be printed (e.g., "reboot for
+ * change to take effect").
+ */
+ s64 status;
+ u64 v0;
+ u64 v1;
+ u64 v2;
+};
+
+/*
+ * Note: Currently unused PAL arguments are generally labeled
+ * "reserved" so the value specified in the PAL documentation
+ * (generally 0) MUST be passed. Reserved parameters are not optional
+ * parameters.
+ */
+extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64);
+extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64);
+extern void ia64_save_scratch_fpregs (struct ia64_fpreg *);
+extern void ia64_load_scratch_fpregs (struct ia64_fpreg *);
+
+#define PAL_CALL(iprv,a0,a1,a2,a3) do { \
+ struct ia64_fpreg fr[6]; \
+ ia64_save_scratch_fpregs(fr); \
+ iprv = ia64_pal_call_static(a0, a1, a2, a3); \
+ ia64_load_scratch_fpregs(fr); \
+} while (0)
+
+#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do { \
+ struct ia64_fpreg fr[6]; \
+ ia64_save_scratch_fpregs(fr); \
+ iprv = ia64_pal_call_stacked(a0, a1, a2, a3); \
+ ia64_load_scratch_fpregs(fr); \
+} while (0)
+
+#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do { \
+ struct ia64_fpreg fr[6]; \
+ ia64_save_scratch_fpregs(fr); \
+ iprv = ia64_pal_call_phys_static(a0, a1, a2, a3); \
+ ia64_load_scratch_fpregs(fr); \
+} while (0)
+
+#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do { \
+ struct ia64_fpreg fr[6]; \
+ ia64_save_scratch_fpregs(fr); \
+ iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3); \
+ ia64_load_scratch_fpregs(fr); \
+} while (0)
+
+typedef int (*ia64_pal_handler) (u64, ...);
+extern ia64_pal_handler ia64_pal;
+extern void ia64_pal_handler_init (void *);
+
+extern ia64_pal_handler ia64_pal;
+
+extern pal_cache_config_info_t l0d_cache_config_info;
+extern pal_cache_config_info_t l0i_cache_config_info;
+extern pal_cache_config_info_t l1_cache_config_info;
+extern pal_cache_config_info_t l2_cache_config_info;
+
+extern pal_cache_protection_info_t l0d_cache_protection_info;
+extern pal_cache_protection_info_t l0i_cache_protection_info;
+extern pal_cache_protection_info_t l1_cache_protection_info;
+extern pal_cache_protection_info_t l2_cache_protection_info;
+
+extern pal_cache_config_info_t pal_cache_config_info_get(pal_cache_level_t,
+ pal_cache_type_t);
+
+extern pal_cache_protection_info_t pal_cache_protection_info_get(pal_cache_level_t,
+ pal_cache_type_t);
+
+
+extern void pal_error(int);
+
+
+/* Useful wrappers for the current list of pal procedures */
+
+typedef union pal_bus_features_u {
+ u64 pal_bus_features_val;
+ struct {
+ u64 pbf_reserved1 : 29;
+ u64 pbf_req_bus_parking : 1;
+ u64 pbf_bus_lock_mask : 1;
+ u64 pbf_enable_half_xfer_rate : 1;
+ u64 pbf_reserved2 : 20;
+ u64 pbf_enable_shared_line_replace : 1;
+ u64 pbf_enable_exclusive_line_replace : 1;
+ u64 pbf_disable_xaction_queueing : 1;
+ u64 pbf_disable_resp_err_check : 1;
+ u64 pbf_disable_berr_check : 1;
+ u64 pbf_disable_bus_req_internal_err_signal : 1;
+ u64 pbf_disable_bus_req_berr_signal : 1;
+ u64 pbf_disable_bus_init_event_check : 1;
+ u64 pbf_disable_bus_init_event_signal : 1;
+ u64 pbf_disable_bus_addr_err_check : 1;
+ u64 pbf_disable_bus_addr_err_signal : 1;
+ u64 pbf_disable_bus_data_err_check : 1;
+ } pal_bus_features_s;
+} pal_bus_features_u_t;
+
+extern void pal_bus_features_print (u64);
+
+/* Provide information about configurable processor bus features */
+static inline s64
+ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail,
+ pal_bus_features_u_t *features_status,
+ pal_bus_features_u_t *features_control)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0);
+ if (features_avail)
+ features_avail->pal_bus_features_val = iprv.v0;
+ if (features_status)
+ features_status->pal_bus_features_val = iprv.v1;
+ if (features_control)
+ features_control->pal_bus_features_val = iprv.v2;
+ return iprv.status;
+}
+
+/* Enables/disables specific processor bus features */
+static inline s64
+ia64_pal_bus_set_features (pal_bus_features_u_t feature_select)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0);
+ return iprv.status;
+}
+
+/* Get detailed cache information */
+static inline s64
+ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0);
+
+ if (iprv.status == 0) {
+ conf->pcci_status = iprv.status;
+ conf->pcci_info_1.pcci1_data = iprv.v0;
+ conf->pcci_info_2.pcci2_data = iprv.v1;
+ conf->pcci_reserved = iprv.v2;
+ }
+ return iprv.status;
+
+}
+
+/* Get detailed cche protection information */
+static inline s64
+ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0);
+
+ if (iprv.status == 0) {
+ prot->pcpi_status = iprv.status;
+ prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff;
+ prot->pcp_info[1].pcpi_data = iprv.v0 >> 32;
+ prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff;
+ prot->pcp_info[3].pcpi_data = iprv.v1 >> 32;
+ prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff;
+ prot->pcp_info[5].pcpi_data = iprv.v2 >> 32;
+ }
+ return iprv.status;
+}
+
+/*
+ * Flush the processor instruction or data caches. *PROGRESS must be
+ * initialized to zero before calling this for the first time..
+ */
+static inline s64
+ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress);
+ if (vector)
+ *vector = iprv.v0;
+ *progress = iprv.v1;
+ return iprv.status;
+}
+
+
+/* Initialize the processor controlled caches */
+static inline s64
+ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest);
+ return iprv.status;
+}
+
+/* Initialize the tags and data of a data or unified cache line of
+ * processor controlled cache to known values without the availability
+ * of backing memory.
+ */
+static inline s64
+ia64_pal_cache_line_init (u64 physical_addr, u64 data_value)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0);
+ return iprv.status;
+}
+
+
+/* Read the data and tag of a processor controlled cache line for diags */
+static inline s64
+ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS_STK(iprv, PAL_CACHE_READ, line_id.pclid_data,
+ physical_addr, 0);
+ return iprv.status;
+}
+
+/* Return summary information about the hierarchy of caches controlled by the processor */
+static inline s64
+ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
+ if (cache_levels)
+ *cache_levels = iprv.v0;
+ if (unique_caches)
+ *unique_caches = iprv.v1;
+ return iprv.status;
+}
+
+/* Write the data and tag of a processor-controlled cache line for diags */
+static inline s64
+ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS_STK(iprv, PAL_CACHE_WRITE, line_id.pclid_data,
+ physical_addr, data);
+ return iprv.status;
+}
+
+
+/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */
+static inline s64
+ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics,
+ u64 *buffer_size, u64 *buffer_align)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics);
+ if (buffer_size)
+ *buffer_size = iprv.v0;
+ if (buffer_align)
+ *buffer_align = iprv.v1;
+ return iprv.status;
+}
+
+/* Copy relocatable PAL procedures from ROM to memory */
+static inline s64
+ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor);
+ if (pal_proc_offset)
+ *pal_proc_offset = iprv.v0;
+ return iprv.status;
+}
+
+/* Return the number of instruction and data debug register pairs */
+static inline s64
+ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
+ if (inst_regs)
+ *inst_regs = iprv.v0;
+ if (data_regs)
+ *data_regs = iprv.v1;
+
+ return iprv.status;
+}
+
+#ifdef TBD
+/* Switch from IA64-system environment to IA-32 system environment */
+static inline s64
+ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3);
+ return iprv.status;
+}
+#endif
+
+/* Get unique geographical address of this processor on its bus */
+static inline s64
+ia64_pal_fixed_addr (u64 *global_unique_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0);
+ if (global_unique_addr)
+ *global_unique_addr = iprv.v0;
+ return iprv.status;
+}
+
+/* Get base frequency of the platform if generated by the processor */
+static inline s64
+ia64_pal_freq_base (u64 *platform_base_freq)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
+ if (platform_base_freq)
+ *platform_base_freq = iprv.v0;
+ return iprv.status;
+}
+
+/*
+ * Get the ratios for processor frequency, bus frequency and interval timer to
+ * to base frequency of the platform
+ */
+static inline s64
+ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
+ struct pal_freq_ratio *itc_ratio)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0);
+ if (proc_ratio)
+ *(u64 *)proc_ratio = iprv.v0;
+ if (bus_ratio)
+ *(u64 *)bus_ratio = iprv.v1;
+ if (itc_ratio)
+ *(u64 *)itc_ratio = iprv.v2;
+ return iprv.status;
+}
+
+/*
+ * Get the current hardware resource sharing policy of the processor
+ */
+static inline s64
+ia64_pal_get_hw_policy (u64 proc_num, u64 *cur_policy, u64 *num_impacted,
+ u64 *la)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_GET_HW_POLICY, proc_num, 0, 0);
+ if (cur_policy)
+ *cur_policy = iprv.v0;
+ if (num_impacted)
+ *num_impacted = iprv.v1;
+ if (la)
+ *la = iprv.v2;
+ return iprv.status;
+}
+
+/* Make the processor enter HALT or one of the implementation dependent low
+ * power states where prefetching and execution are suspended and cache and
+ * TLB coherency is not maintained.
+ */
+static inline s64
+ia64_pal_halt (u64 halt_state)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0);
+ return iprv.status;
+}
+
+typedef union pal_power_mgmt_info_u {
+ u64 ppmi_data;
+ struct {
+ u64 exit_latency : 16,
+ entry_latency : 16,
+ power_consumption : 28,
+ im : 1,
+ co : 1,
+ reserved : 2;
+ } pal_power_mgmt_info_s;
+} pal_power_mgmt_info_u_t;
+
+/* Return information about processor's optional power management capabilities. */
+static inline s64
+ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0);
+ return iprv.status;
+}
+
+/* Get the current P-state information */
+static inline s64
+ia64_pal_get_pstate (u64 *pstate_index, unsigned long type)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_GET_PSTATE, type, 0, 0);
+ *pstate_index = iprv.v0;
+ return iprv.status;
+}
+
+/* Set the P-state */
+static inline s64
+ia64_pal_set_pstate (u64 pstate_index)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_SET_PSTATE, pstate_index, 0, 0);
+ return iprv.status;
+}
+
+/* Processor branding information*/
+static inline s64
+ia64_pal_get_brand_info (char *brand_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0);
+ return iprv.status;
+}
+
+/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are
+ * suspended, but cache and TLB coherency is maintained.
+ */
+static inline s64
+ia64_pal_halt_light (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Clear all the processor error logging registers and reset the indicator that allows
+ * the error logging registers to be written. This procedure also checks the pending
+ * machine check bit and pending INIT bit and reports their states.
+ */
+static inline s64
+ia64_pal_mc_clear_log (u64 *pending_vector)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0);
+ if (pending_vector)
+ *pending_vector = iprv.v0;
+ return iprv.status;
+}
+
+/* Ensure that all outstanding transactions in a processor are completed or that any
+ * MCA due to thes outstanding transaction is taken.
+ */
+static inline s64
+ia64_pal_mc_drain (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Return the machine check dynamic processor state */
+static inline s64
+ia64_pal_mc_dynamic_state (u64 info_type, u64 dy_buffer, u64 *size)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, info_type, dy_buffer, 0);
+ if (size)
+ *size = iprv.v0;
+ return iprv.status;
+}
+
+/* Return processor machine check information */
+static inline s64
+ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0);
+ if (size)
+ *size = iprv.v0;
+ if (error_info)
+ *error_info = iprv.v1;
+ return iprv.status;
+}
+
+/* Injects the requested processor error or returns info on
+ * supported injection capabilities for current processor implementation
+ */
+static inline s64
+ia64_pal_mc_error_inject_phys (u64 err_type_info, u64 err_struct_info,
+ u64 err_data_buffer, u64 *capabilities, u64 *resources)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info,
+ err_struct_info, err_data_buffer);
+ if (capabilities)
+ *capabilities= iprv.v0;
+ if (resources)
+ *resources= iprv.v1;
+ return iprv.status;
+}
+
+static inline s64
+ia64_pal_mc_error_inject_virt (u64 err_type_info, u64 err_struct_info,
+ u64 err_data_buffer, u64 *capabilities, u64 *resources)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_STK(iprv, PAL_MC_ERROR_INJECT, err_type_info,
+ err_struct_info, err_data_buffer);
+ if (capabilities)
+ *capabilities= iprv.v0;
+ if (resources)
+ *resources= iprv.v1;
+ return iprv.status;
+}
+
+/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot
+ * attempt to correct any expected machine checks.
+ */
+static inline s64
+ia64_pal_mc_expected (u64 expected, u64 *previous)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0);
+ if (previous)
+ *previous = iprv.v0;
+ return iprv.status;
+}
+
+typedef union pal_hw_tracking_u {
+ u64 pht_data;
+ struct {
+ u64 itc :4, /* Instruction cache tracking */
+ dct :4, /* Date cache tracking */
+ itt :4, /* Instruction TLB tracking */
+ ddt :4, /* Data TLB tracking */
+ reserved:48;
+ } pal_hw_tracking_s;
+} pal_hw_tracking_u_t;
+
+/*
+ * Hardware tracking status.
+ */
+static inline s64
+ia64_pal_mc_hw_tracking (u64 *status)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_HW_TRACKING, 0, 0, 0);
+ if (status)
+ *status = iprv.v0;
+ return iprv.status;
+}
+
+/* Register a platform dependent location with PAL to which it can save
+ * minimal processor state in the event of a machine check or initialization
+ * event.
+ */
+static inline s64
+ia64_pal_mc_register_mem (u64 physical_addr, u64 size, u64 *req_size)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, size, 0);
+ if (req_size)
+ *req_size = iprv.v0;
+ return iprv.status;
+}
+
+/* Restore minimal architectural processor state, set CMC interrupt if necessary
+ * and resume execution
+ */
+static inline s64
+ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0);
+ return iprv.status;
+}
+
+/* Return the memory attributes implemented by the processor */
+static inline s64
+ia64_pal_mem_attrib (u64 *mem_attrib)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0);
+ if (mem_attrib)
+ *mem_attrib = iprv.v0 & 0xff;
+ return iprv.status;
+}
+
+/* Return the amount of memory needed for second phase of processor
+ * self-test and the required alignment of memory.
+ */
+static inline s64
+ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0);
+ if (bytes_needed)
+ *bytes_needed = iprv.v0;
+ if (alignment)
+ *alignment = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_perf_mon_info_u {
+ u64 ppmi_data;
+ struct {
+ u64 generic : 8,
+ width : 8,
+ cycles : 8,
+ retired : 8,
+ reserved : 32;
+ } pal_perf_mon_info_s;
+} pal_perf_mon_info_u_t;
+
+/* Return the performance monitor information about what can be counted
+ * and how to configure the monitors to count the desired events.
+ */
+static inline s64
+ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0);
+ if (pm_info)
+ pm_info->ppmi_data = iprv.v0;
+ return iprv.status;
+}
+
+/* Specifies the physical address of the processor interrupt block
+ * and I/O port space.
+ */
+static inline s64
+ia64_pal_platform_addr (u64 type, u64 physical_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0);
+ return iprv.status;
+}
+
+/* Set the SAL PMI entrypoint in memory */
+static inline s64
+ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0);
+ return iprv.status;
+}
+
+struct pal_features_s;
+/* Provide information about configurable processor features */
+static inline s64
+ia64_pal_proc_get_features (u64 *features_avail,
+ u64 *features_status,
+ u64 *features_control,
+ u64 features_set)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, features_set, 0);
+ if (iprv.status == 0) {
+ *features_avail = iprv.v0;
+ *features_status = iprv.v1;
+ *features_control = iprv.v2;
+ }
+ return iprv.status;
+}
+
+/* Enable/disable processor dependent features */
+static inline s64
+ia64_pal_proc_set_features (u64 feature_select)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0);
+ return iprv.status;
+}
+
+/*
+ * Put everything in a struct so we avoid the global offset table whenever
+ * possible.
+ */
+typedef struct ia64_ptce_info_s {
+ u64 base;
+ u32 count[2];
+ u32 stride[2];
+} ia64_ptce_info_t;
+
+/* Return the information required for the architected loop used to purge
+ * (initialize) the entire TC
+ */
+static inline s64
+ia64_get_ptce (ia64_ptce_info_t *ptce)
+{
+ struct ia64_pal_retval iprv;
+
+ if (!ptce)
+ return -1;
+
+ PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0);
+ if (iprv.status == 0) {
+ ptce->base = iprv.v0;
+ ptce->count[0] = iprv.v1 >> 32;
+ ptce->count[1] = iprv.v1 & 0xffffffff;
+ ptce->stride[0] = iprv.v2 >> 32;
+ ptce->stride[1] = iprv.v2 & 0xffffffff;
+ }
+ return iprv.status;
+}
+
+/* Return info about implemented application and control registers. */
+static inline s64
+ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0);
+ if (reg_info_1)
+ *reg_info_1 = iprv.v0;
+ if (reg_info_2)
+ *reg_info_2 = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_hints_u {
+ u64 ph_data;
+ struct {
+ u64 si : 1,
+ li : 1,
+ reserved : 62;
+ } pal_hints_s;
+} pal_hints_u_t;
+
+/* Return information about the register stack and RSE for this processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
+ if (num_phys_stacked)
+ *num_phys_stacked = iprv.v0;
+ if (hints)
+ hints->ph_data = iprv.v1;
+ return iprv.status;
+}
+
+/*
+ * Set the current hardware resource sharing policy of the processor
+ */
+static inline s64
+ia64_pal_set_hw_policy (u64 policy)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_SET_HW_POLICY, policy, 0, 0);
+ return iprv.status;
+}
+
+/* Cause the processor to enter SHUTDOWN state, where prefetching and execution are
+ * suspended, but cause cache and TLB coherency to be maintained.
+ * This is usually called in IA-32 mode.
+ */
+static inline s64
+ia64_pal_shutdown (void)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0);
+ return iprv.status;
+}
+
+/* Perform the second phase of processor self-test. */
+static inline s64
+ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 *self_test_state)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes);
+ if (self_test_state)
+ *self_test_state = iprv.v0;
+ return iprv.status;
+}
+
+typedef union pal_version_u {
+ u64 pal_version_val;
+ struct {
+ u64 pv_pal_b_rev : 8;
+ u64 pv_pal_b_model : 8;
+ u64 pv_reserved1 : 8;
+ u64 pv_pal_vendor : 8;
+ u64 pv_pal_a_rev : 8;
+ u64 pv_pal_a_model : 8;
+ u64 pv_reserved2 : 16;
+ } pal_version_s;
+} pal_version_u_t;
+
+
+/*
+ * Return PAL version information. While the documentation states that
+ * PAL_VERSION can be called in either physical or virtual mode, some
+ * implementations only allow physical calls. We don't call it very often,
+ * so the overhead isn't worth eliminating.
+ */
+static inline s64
+ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
+ if (pal_min_version)
+ pal_min_version->pal_version_val = iprv.v0;
+
+ if (pal_cur_version)
+ pal_cur_version->pal_version_val = iprv.v1;
+
+ return iprv.status;
+}
+
+typedef union pal_tc_info_u {
+ u64 pti_val;
+ struct {
+ u64 num_sets : 8,
+ associativity : 8,
+ num_entries : 16,
+ pf : 1,
+ unified : 1,
+ reduce_tr : 1,
+ reserved : 29;
+ } pal_tc_info_s;
+} pal_tc_info_u_t;
+
+#define tc_reduce_tr pal_tc_info_s.reduce_tr
+#define tc_unified pal_tc_info_s.unified
+#define tc_pf pal_tc_info_s.pf
+#define tc_num_entries pal_tc_info_s.num_entries
+#define tc_associativity pal_tc_info_s.associativity
+#define tc_num_sets pal_tc_info_s.num_sets
+
+
+/* Return information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_pages)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0);
+ if (tc_info)
+ tc_info->pti_val = iprv.v0;
+ if (tc_pages)
+ *tc_pages = iprv.v1;
+ return iprv.status;
+}
+
+/* Get page size information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
+ if (tr_pages)
+ *tr_pages = iprv.v0;
+ if (vw_pages)
+ *vw_pages = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_vm_info_1_u {
+ u64 pvi1_val;
+ struct {
+ u64 vw : 1,
+ phys_add_size : 7,
+ key_size : 8,
+ max_pkr : 8,
+ hash_tag_id : 8,
+ max_dtr_entry : 8,
+ max_itr_entry : 8,
+ max_unique_tcs : 8,
+ num_tc_levels : 8;
+ } pal_vm_info_1_s;
+} pal_vm_info_1_u_t;
+
+#define PAL_MAX_PURGES 0xFFFF /* all ones is means unlimited */
+
+typedef union pal_vm_info_2_u {
+ u64 pvi2_val;
+ struct {
+ u64 impl_va_msb : 8,
+ rid_size : 8,
+ max_purges : 16,
+ reserved : 32;
+ } pal_vm_info_2_s;
+} pal_vm_info_2_u_t;
+
+/* Get summary information about the virtual memory characteristics of the processor
+ * implementation.
+ */
+static inline s64
+ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0);
+ if (vm_info_1)
+ vm_info_1->pvi1_val = iprv.v0;
+ if (vm_info_2)
+ vm_info_2->pvi2_val = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_vp_info_u {
+ u64 pvi_val;
+ struct {
+ u64 index: 48, /* virtual feature set info */
+ vmm_id: 16; /* feature set id */
+ } pal_vp_info_s;
+} pal_vp_info_u_t;
+
+/*
+ * Returns infomation about virtual processor features
+ */
+static inline s64
+ia64_pal_vp_info (u64 feature_set, u64 vp_buffer, u64 *vp_info, u64 *vmm_id)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_VP_INFO, feature_set, vp_buffer, 0);
+ if (vp_info)
+ *vp_info = iprv.v0;
+ if (vmm_id)
+ *vmm_id = iprv.v1;
+ return iprv.status;
+}
+
+typedef union pal_itr_valid_u {
+ u64 piv_val;
+ struct {
+ u64 access_rights_valid : 1,
+ priv_level_valid : 1,
+ dirty_bit_valid : 1,
+ mem_attr_valid : 1,
+ reserved : 60;
+ } pal_tr_valid_s;
+} pal_tr_valid_u_t;
+
+/* Read a translation register */
+static inline s64
+ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)ia64_tpa(tr_buffer));
+ if (tr_valid)
+ tr_valid->piv_val = iprv.v0;
+ return iprv.status;
+}
+
+/*
+ * PAL_PREFETCH_VISIBILITY transaction types
+ */
+#define PAL_VISIBILITY_VIRTUAL 0
+#define PAL_VISIBILITY_PHYSICAL 1
+
+/*
+ * PAL_PREFETCH_VISIBILITY return codes
+ */
+#define PAL_VISIBILITY_OK 1
+#define PAL_VISIBILITY_OK_REMOTE_NEEDED 0
+#define PAL_VISIBILITY_INVAL_ARG -2
+#define PAL_VISIBILITY_ERROR -3
+
+static inline s64
+ia64_pal_prefetch_visibility (s64 trans_type)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0);
+ return iprv.status;
+}
+
+/* data structure for getting information on logical to physical mappings */
+typedef union pal_log_overview_u {
+ struct {
+ u64 num_log :16, /* Total number of logical
+ * processors on this die
+ */
+ tpc :8, /* Threads per core */
+ reserved3 :8, /* Reserved */
+ cpp :8, /* Cores per processor */
+ reserved2 :8, /* Reserved */
+ ppid :8, /* Physical processor ID */
+ reserved1 :8; /* Reserved */
+ } overview_bits;
+ u64 overview_data;
+} pal_log_overview_t;
+
+typedef union pal_proc_n_log_info1_u{
+ struct {
+ u64 tid :16, /* Thread id */
+ reserved2 :16, /* Reserved */
+ cid :16, /* Core id */
+ reserved1 :16; /* Reserved */
+ } ppli1_bits;
+ u64 ppli1_data;
+} pal_proc_n_log_info1_t;
+
+typedef union pal_proc_n_log_info2_u {
+ struct {
+ u64 la :16, /* Logical address */
+ reserved :48; /* Reserved */
+ } ppli2_bits;
+ u64 ppli2_data;
+} pal_proc_n_log_info2_t;
+
+typedef struct pal_logical_to_physical_s
+{
+ pal_log_overview_t overview;
+ pal_proc_n_log_info1_t ppli1;
+ pal_proc_n_log_info2_t ppli2;
+} pal_logical_to_physical_t;
+
+#define overview_num_log overview.overview_bits.num_log
+#define overview_tpc overview.overview_bits.tpc
+#define overview_cpp overview.overview_bits.cpp
+#define overview_ppid overview.overview_bits.ppid
+#define log1_tid ppli1.ppli1_bits.tid
+#define log1_cid ppli1.ppli1_bits.cid
+#define log2_la ppli2.ppli2_bits.la
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_LOGICAL_TO_PHYSICAL, proc_number, 0, 0);
+
+ if (iprv.status == PAL_STATUS_SUCCESS)
+ {
+ mapping->overview.overview_data = iprv.v0;
+ mapping->ppli1.ppli1_data = iprv.v1;
+ mapping->ppli2.ppli2_data = iprv.v2;
+ }
+
+ return iprv.status;
+}
+
+typedef struct pal_cache_shared_info_s
+{
+ u64 num_shared;
+ pal_proc_n_log_info1_t ppli1;
+ pal_proc_n_log_info2_t ppli2;
+} pal_cache_shared_info_t;
+
+/* Get information on logical to physical processor mappings. */
+static inline s64
+ia64_pal_cache_shared_info(u64 level,
+ u64 type,
+ u64 proc_number,
+ pal_cache_shared_info_t *info)
+{
+ struct ia64_pal_retval iprv;
+
+ PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number);
+
+ if (iprv.status == PAL_STATUS_SUCCESS) {
+ info->num_shared = iprv.v0;
+ info->ppli1.ppli1_data = iprv.v1;
+ info->ppli2.ppli2_data = iprv.v2;
+ }
+
+ return iprv.status;
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PAL_H */
diff --git a/arch/ia64/include/asm/param.h b/arch/ia64/include/asm/param.h
new file mode 100644
index 000000000000..0964c32c1358
--- /dev/null
+++ b/arch/ia64/include/asm/param.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_IA64_PARAM_H
+#define _ASM_IA64_PARAM_H
+
+/*
+ * Fundamental kernel parameters.
+ *
+ * Based on <asm-i386/param.h>.
+ *
+ * Modified 1998, 1999, 2002-2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#define EXEC_PAGESIZE 65536
+
+#ifndef NOGROUP
+# define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#ifdef __KERNEL__
+# define HZ CONFIG_HZ
+# define USER_HZ HZ
+# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
+#else
+ /*
+ * Technically, this is wrong, but some old apps still refer to it. The proper way to
+ * get the HZ value is via sysconf(_SC_CLK_TCK).
+ */
+# define HZ 1024
+#endif
+
+#endif /* _ASM_IA64_PARAM_H */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
new file mode 100644
index 000000000000..660cab044834
--- /dev/null
+++ b/arch/ia64/include/asm/paravirt.h
@@ -0,0 +1,253 @@
+/******************************************************************************
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+
+#ifndef __ASM_PARAVIRT_H
+#define __ASM_PARAVIRT_H
+
+#ifdef CONFIG_PARAVIRT_GUEST
+
+#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
+#define PARAVIRT_HYPERVISOR_TYPE_XEN 1
+
+#ifndef __ASSEMBLY__
+
+#include <asm/hw_irq.h>
+#include <asm/meminit.h>
+
+/******************************************************************************
+ * general info
+ */
+struct pv_info {
+ unsigned int kernel_rpl;
+ int paravirt_enabled;
+ const char *name;
+};
+
+extern struct pv_info pv_info;
+
+static inline int paravirt_enabled(void)
+{
+ return pv_info.paravirt_enabled;
+}
+
+static inline unsigned int get_kernel_rpl(void)
+{
+ return pv_info.kernel_rpl;
+}
+
+/******************************************************************************
+ * initialization hooks.
+ */
+struct rsvd_region;
+
+struct pv_init_ops {
+ void (*banner)(void);
+
+ int (*reserve_memory)(struct rsvd_region *region);
+
+ void (*arch_setup_early)(void);
+ void (*arch_setup_console)(char **cmdline_p);
+ int (*arch_setup_nomca)(void);
+
+ void (*post_smp_prepare_boot_cpu)(void);
+};
+
+extern struct pv_init_ops pv_init_ops;
+
+static inline void paravirt_banner(void)
+{
+ if (pv_init_ops.banner)
+ pv_init_ops.banner();
+}
+
+static inline int paravirt_reserve_memory(struct rsvd_region *region)
+{
+ if (pv_init_ops.reserve_memory)
+ return pv_init_ops.reserve_memory(region);
+ return 0;
+}
+
+static inline void paravirt_arch_setup_early(void)
+{
+ if (pv_init_ops.arch_setup_early)
+ pv_init_ops.arch_setup_early();
+}
+
+static inline void paravirt_arch_setup_console(char **cmdline_p)
+{
+ if (pv_init_ops.arch_setup_console)
+ pv_init_ops.arch_setup_console(cmdline_p);
+}
+
+static inline int paravirt_arch_setup_nomca(void)
+{
+ if (pv_init_ops.arch_setup_nomca)
+ return pv_init_ops.arch_setup_nomca();
+ return 0;
+}
+
+static inline void paravirt_post_smp_prepare_boot_cpu(void)
+{
+ if (pv_init_ops.post_smp_prepare_boot_cpu)
+ pv_init_ops.post_smp_prepare_boot_cpu();
+}
+
+/******************************************************************************
+ * replacement of iosapic operations.
+ */
+
+struct pv_iosapic_ops {
+ void (*pcat_compat_init)(void);
+
+ struct irq_chip *(*get_irq_chip)(unsigned long trigger);
+
+ unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
+ void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
+};
+
+extern struct pv_iosapic_ops pv_iosapic_ops;
+
+static inline void
+iosapic_pcat_compat_init(void)
+{
+ if (pv_iosapic_ops.pcat_compat_init)
+ pv_iosapic_ops.pcat_compat_init();
+}
+
+static inline struct irq_chip*
+iosapic_get_irq_chip(unsigned long trigger)
+{
+ return pv_iosapic_ops.get_irq_chip(trigger);
+}
+
+static inline unsigned int
+__iosapic_read(char __iomem *iosapic, unsigned int reg)
+{
+ return pv_iosapic_ops.__read(iosapic, reg);
+}
+
+static inline void
+__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
+{
+ return pv_iosapic_ops.__write(iosapic, reg, val);
+}
+
+/******************************************************************************
+ * replacement of irq operations.
+ */
+
+struct pv_irq_ops {
+ void (*register_ipi)(void);
+
+ int (*assign_irq_vector)(int irq);
+ void (*free_irq_vector)(int vector);
+
+ void (*register_percpu_irq)(ia64_vector vec,
+ struct irqaction *action);
+
+ void (*resend_irq)(unsigned int vector);
+};
+
+extern struct pv_irq_ops pv_irq_ops;
+
+static inline void
+ia64_register_ipi(void)
+{
+ pv_irq_ops.register_ipi();
+}
+
+static inline int
+assign_irq_vector(int irq)
+{
+ return pv_irq_ops.assign_irq_vector(irq);
+}
+
+static inline void
+free_irq_vector(int vector)
+{
+ return pv_irq_ops.free_irq_vector(vector);
+}
+
+static inline void
+register_percpu_irq(ia64_vector vec, struct irqaction *action)
+{
+ pv_irq_ops.register_percpu_irq(vec, action);
+}
+
+static inline void
+ia64_resend_irq(unsigned int vector)
+{
+ pv_irq_ops.resend_irq(vector);
+}
+
+/******************************************************************************
+ * replacement of time operations.
+ */
+
+extern struct itc_jitter_data_t itc_jitter_data;
+extern volatile int time_keeper_id;
+
+struct pv_time_ops {
+ void (*init_missing_ticks_accounting)(int cpu);
+ int (*do_steal_accounting)(unsigned long *new_itm);
+
+ void (*clocksource_resume)(void);
+};
+
+extern struct pv_time_ops pv_time_ops;
+
+static inline void
+paravirt_init_missing_ticks_accounting(int cpu)
+{
+ if (pv_time_ops.init_missing_ticks_accounting)
+ pv_time_ops.init_missing_ticks_accounting(cpu);
+}
+
+static inline int
+paravirt_do_steal_accounting(unsigned long *new_itm)
+{
+ return pv_time_ops.do_steal_accounting(new_itm);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#else
+/* fallback for native case */
+
+#ifndef __ASSEMBLY__
+
+#define paravirt_banner() do { } while (0)
+#define paravirt_reserve_memory(region) 0
+
+#define paravirt_arch_setup_early() do { } while (0)
+#define paravirt_arch_setup_console(cmdline_p) do { } while (0)
+#define paravirt_arch_setup_nomca() 0
+#define paravirt_post_smp_prepare_boot_cpu() do { } while (0)
+
+#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0)
+#define paravirt_do_steal_accounting(new_itm) 0
+
+#endif /* __ASSEMBLY__ */
+
+
+#endif /* CONFIG_PARAVIRT_GUEST */
+
+#endif /* __ASM_PARAVIRT_H */
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h
new file mode 100644
index 000000000000..d577aac11835
--- /dev/null
+++ b/arch/ia64/include/asm/paravirt_privop.h
@@ -0,0 +1,112 @@
+/******************************************************************************
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
+#define _ASM_IA64_PARAVIRT_PRIVOP_H
+
+#ifdef CONFIG_PARAVIRT
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/kregs.h> /* for IA64_PSR_I */
+
+/******************************************************************************
+ * replacement of intrinsics operations.
+ */
+
+struct pv_cpu_ops {
+ void (*fc)(unsigned long addr);
+ unsigned long (*thash)(unsigned long addr);
+ unsigned long (*get_cpuid)(int index);
+ unsigned long (*get_pmd)(int index);
+ unsigned long (*getreg)(int reg);
+ void (*setreg)(int reg, unsigned long val);
+ void (*ptcga)(unsigned long addr, unsigned long size);
+ unsigned long (*get_rr)(unsigned long index);
+ void (*set_rr)(unsigned long index, unsigned long val);
+ void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
+ unsigned long val2, unsigned long val3,
+ unsigned long val4);
+ void (*ssm_i)(void);
+ void (*rsm_i)(void);
+ unsigned long (*get_psr_i)(void);
+ void (*intrin_local_irq_restore)(unsigned long flags);
+};
+
+extern struct pv_cpu_ops pv_cpu_ops;
+
+extern void ia64_native_setreg_func(int regnum, unsigned long val);
+extern unsigned long ia64_native_getreg_func(int regnum);
+
+/************************************************/
+/* Instructions paravirtualized for performance */
+/************************************************/
+
+/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
+ * static inline function doesn't satisfy it. */
+#define paravirt_ssm(mask) \
+ do { \
+ if ((mask) == IA64_PSR_I) \
+ pv_cpu_ops.ssm_i(); \
+ else \
+ ia64_native_ssm(mask); \
+ } while (0)
+
+#define paravirt_rsm(mask) \
+ do { \
+ if ((mask) == IA64_PSR_I) \
+ pv_cpu_ops.rsm_i(); \
+ else \
+ ia64_native_rsm(mask); \
+ } while (0)
+
+/******************************************************************************
+ * replacement of hand written assembly codes.
+ */
+struct pv_cpu_asm_switch {
+ unsigned long switch_to;
+ unsigned long leave_syscall;
+ unsigned long work_processed_syscall;
+ unsigned long leave_kernel;
+};
+void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
+
+#endif /* __ASSEMBLY__ */
+
+#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
+
+#else
+
+/* fallback for native case */
+#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
+
+#endif /* CONFIG_PARAVIRT */
+
+/* these routines utilize privilege-sensitive or performance-sensitive
+ * privileged instructions so the code must be replaced with
+ * paravirtualized versions */
+#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
+#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
+#define ia64_work_processed_syscall \
+ IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
+#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
+
+#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
diff --git a/arch/ia64/include/asm/parport.h b/arch/ia64/include/asm/parport.h
new file mode 100644
index 000000000000..67e16adfcd25
--- /dev/null
+++ b/arch/ia64/include/asm/parport.h
@@ -0,0 +1,20 @@
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_IA64_PARPORT_H
+#define _ASM_IA64_PARPORT_H 1
+
+static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
+
+static int __devinit
+parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+ return parport_pc_find_isa_ports(autoirq, autodma);
+}
+
+#endif /* _ASM_IA64_PARPORT_H */
diff --git a/arch/ia64/include/asm/patch.h b/arch/ia64/include/asm/patch.h
new file mode 100644
index 000000000000..295fe6ab4584
--- /dev/null
+++ b/arch/ia64/include/asm/patch.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_IA64_PATCH_H
+#define _ASM_IA64_PATCH_H
+
+/*
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * There are a number of reasons for patching instructions. Rather than duplicating code
+ * all over the place, we put the common stuff here. Reasons for patching: in-kernel
+ * module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
+ * shared library. Undoubtedly, some of these reasons will disappear and others will
+ * be added over time.
+ */
+#include <linux/elf.h>
+#include <linux/types.h>
+
+extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
+extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
+extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
+
+extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
+extern void ia64_patch_vtop (unsigned long start, unsigned long end);
+extern void ia64_patch_phys_stack_reg(unsigned long val);
+extern void ia64_patch_rse (unsigned long start, unsigned long end);
+extern void ia64_patch_gate (void);
+
+#endif /* _ASM_IA64_PATCH_H */
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
new file mode 100644
index 000000000000..0149097b736d
--- /dev/null
+++ b/arch/ia64/include/asm/pci.h
@@ -0,0 +1,167 @@
+#ifndef _ASM_IA64_PCI_H
+#define _ASM_IA64_PCI_H
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+#include <asm/hw_irq.h>
+
+/*
+ * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
+ * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
+ * loader.
+ */
+#define pcibios_assign_all_busses() 0
+#define pcibios_scan_all_fns(a, b) 0
+
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+void pcibios_config_init(void);
+
+struct pci_dev;
+
+/*
+ * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
+ * correspondence between device bus addresses and CPU physical addresses.
+ * Platforms with a hardware I/O MMU _must_ turn this off to suppress the
+ * bounce buffer handling code in the block and network device layers.
+ * Platforms with separate bus address spaces _must_ turn this off and provide
+ * a device DMA mapping implementation that takes care of the necessary
+ * address translation.
+ *
+ * For now, the ia64 platforms which may have separate/multiple bus address
+ * spaces all have I/O MMUs which support the merging of physically
+ * discontiguous buffers, so we can use that as the sole factor to determine
+ * the setting of PCI_DMA_BUS_IS_PHYS.
+ */
+extern unsigned long ia64_max_iommu_merge_mask;
+#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
+
+static inline void
+pcibios_set_master (struct pci_dev *dev)
+{
+ /* No special bus mastering setup handling */
+}
+
+static inline void
+pcibios_penalize_isa_irq (int irq, int active)
+{
+ /* We don't do dynamic PCI IRQ allocation */
+}
+
+#include <asm-generic/pci-dma-compat.h>
+
+/* pci_unmap_{single,page} is not a nop, thus... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ unsigned long cacheline_size;
+ u8 byte;
+
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
+ if (byte == 0)
+ cacheline_size = 1024;
+ else
+ cacheline_size = (int) byte * 4;
+
+ *strat = PCI_DMA_BURST_MULTIPLE;
+ *strategy_parameter = cacheline_size;
+}
+#endif
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+#define HAVE_PCI_LEGACY
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+ struct vm_area_struct *vma);
+extern ssize_t pci_read_legacy_io(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count);
+extern ssize_t pci_write_legacy_io(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count);
+extern int pci_mmap_legacy_mem(struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma);
+
+#define pci_get_legacy_mem platform_pci_get_legacy_mem
+#define pci_legacy_read platform_pci_legacy_read
+#define pci_legacy_write platform_pci_legacy_write
+
+struct pci_window {
+ struct resource resource;
+ u64 offset;
+};
+
+struct pci_controller {
+ void *acpi_handle;
+ void *iommu;
+ int segment;
+ int node; /* nearest node with memory or -1 for global allocation */
+
+ unsigned int windows;
+ struct pci_window *window;
+
+ void *platform_data;
+};
+
+#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
+#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
+
+extern struct pci_ops pci_root_ops;
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return (pci_domain_nr(bus) != 0);
+}
+
+extern void pcibios_resource_to_bus(struct pci_dev *dev,
+ struct pci_bus_region *region, struct resource *res);
+
+extern void pcibios_bus_to_resource(struct pci_dev *dev,
+ struct resource *res, struct pci_bus_region *region);
+
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
+
+#define pcibios_scan_all_fns(a, b) 0
+
+#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
+}
+
+#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
new file mode 100644
index 000000000000..77f30b664b4e
--- /dev/null
+++ b/arch/ia64/include/asm/percpu.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_IA64_PERCPU_H
+#define _ASM_IA64_PERCPU_H
+
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
+
+#ifdef __ASSEMBLY__
+# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
+#else /* !__ASSEMBLY__ */
+
+
+#include <linux/threads.h>
+
+#ifdef CONFIG_SMP
+
+#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
+# define PER_CPU_ATTRIBUTES __attribute__((__model__ (__small__)))
+#endif
+
+#define __my_cpu_offset __ia64_per_cpu_var(local_per_cpu_offset)
+
+extern void *per_cpu_init(void);
+
+#else /* ! SMP */
+
+#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu")))
+
+#define per_cpu_init() (__phys_per_cpu_start)
+
+#endif /* SMP */
+
+/*
+ * Be extremely careful when taking the address of this variable! Due to virtual
+ * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
+ * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
+ * more efficient.
+ */
+#define __ia64_per_cpu_var(var) per_cpu__##var
+
+#include <asm-generic/percpu.h>
+
+/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
+DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PERCPU_H */
diff --git a/arch/ia64/include/asm/perfmon.h b/arch/ia64/include/asm/perfmon.h
new file mode 100644
index 000000000000..7f3333dd00e4
--- /dev/null
+++ b/arch/ia64/include/asm/perfmon.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ */
+
+#ifndef _ASM_IA64_PERFMON_H
+#define _ASM_IA64_PERFMON_H
+
+/*
+ * perfmon comamnds supported on all CPU models
+ */
+#define PFM_WRITE_PMCS 0x01
+#define PFM_WRITE_PMDS 0x02
+#define PFM_READ_PMDS 0x03
+#define PFM_STOP 0x04
+#define PFM_START 0x05
+#define PFM_ENABLE 0x06 /* obsolete */
+#define PFM_DISABLE 0x07 /* obsolete */
+#define PFM_CREATE_CONTEXT 0x08
+#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */
+#define PFM_RESTART 0x0a
+#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */
+#define PFM_GET_FEATURES 0x0c
+#define PFM_DEBUG 0x0d
+#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */
+#define PFM_GET_PMC_RESET_VAL 0x0f
+#define PFM_LOAD_CONTEXT 0x10
+#define PFM_UNLOAD_CONTEXT 0x11
+
+/*
+ * PMU model specific commands (may not be supported on all PMU models)
+ */
+#define PFM_WRITE_IBRS 0x20
+#define PFM_WRITE_DBRS 0x21
+
+/*
+ * context flags
+ */
+#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */
+#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */
+#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */
+
+/*
+ * event set flags
+ */
+#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */
+
+/*
+ * PMC flags
+ */
+#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
+#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */
+
+/*
+ * PMD/PMC/IBR/DBR return flags (ignored on input)
+ *
+ * Those flags are used on output and must be checked in case EAGAIN is returned
+ * by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
+ */
+#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */
+#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */
+#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
+
+#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0)
+
+typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */
+
+/*
+ * Request structure used to define a context
+ */
+typedef struct {
+ pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
+ unsigned long ctx_flags; /* noblock/block */
+ unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */
+ unsigned short ctx_reserved1; /* for future use */
+ int ctx_fd; /* return arg: unique identification for context */
+ void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
+ unsigned long ctx_reserved2[11];/* for future use */
+} pfarg_context_t;
+
+/*
+ * Request structure used to write/read a PMC or PMD
+ */
+typedef struct {
+ unsigned int reg_num; /* which register */
+ unsigned short reg_set; /* event set for this register */
+ unsigned short reg_reserved1; /* for future use */
+
+ unsigned long reg_value; /* initial pmc/pmd value */
+ unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
+
+ unsigned long reg_long_reset; /* reset after buffer overflow notification */
+ unsigned long reg_short_reset; /* reset after counter overflow */
+
+ unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
+ unsigned long reg_random_seed; /* seed value when randomization is used */
+ unsigned long reg_random_mask; /* bitmask used to limit random value */
+ unsigned long reg_last_reset_val;/* return: PMD last reset value */
+
+ unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
+ unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
+
+ unsigned long reg_reserved2[3]; /* for future use */
+} pfarg_reg_t;
+
+typedef struct {
+ unsigned int dbreg_num; /* which debug register */
+ unsigned short dbreg_set; /* event set for this register */
+ unsigned short dbreg_reserved1; /* for future use */
+ unsigned long dbreg_value; /* value for debug register */
+ unsigned long dbreg_flags; /* return: dbreg error */
+ unsigned long dbreg_reserved2[1]; /* for future use */
+} pfarg_dbreg_t;
+
+typedef struct {
+ unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
+ unsigned int ft_reserved; /* reserved for future use */
+ unsigned long reserved[4]; /* for future use */
+} pfarg_features_t;
+
+typedef struct {
+ pid_t load_pid; /* process to load the context into */
+ unsigned short load_set; /* first event set to load */
+ unsigned short load_reserved1; /* for future use */
+ unsigned long load_reserved2[3]; /* for future use */
+} pfarg_load_t;
+
+typedef struct {
+ int msg_type; /* generic message header */
+ int msg_ctx_fd; /* generic message header */
+ unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
+ unsigned short msg_active_set; /* active set at the time of overflow */
+ unsigned short msg_reserved1; /* for future use */
+ unsigned int msg_reserved2; /* for future use */
+ unsigned long msg_tstamp; /* for perf tuning/debug */
+} pfm_ovfl_msg_t;
+
+typedef struct {
+ int msg_type; /* generic message header */
+ int msg_ctx_fd; /* generic message header */
+ unsigned long msg_tstamp; /* for perf tuning */
+} pfm_end_msg_t;
+
+typedef struct {
+ int msg_type; /* type of the message */
+ int msg_ctx_fd; /* unique identifier for the context */
+ unsigned long msg_tstamp; /* for perf tuning */
+} pfm_gen_msg_t;
+
+#define PFM_MSG_OVFL 1 /* an overflow happened */
+#define PFM_MSG_END 2 /* task to which context was attached ended */
+
+typedef union {
+ pfm_ovfl_msg_t pfm_ovfl_msg;
+ pfm_end_msg_t pfm_end_msg;
+ pfm_gen_msg_t pfm_gen_msg;
+} pfm_msg_t;
+
+/*
+ * Define the version numbers for both perfmon as a whole and the sampling buffer format.
+ */
+#define PFM_VERSION_MAJ 2U
+#define PFM_VERSION_MIN 0U
+#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
+#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
+#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
+
+
+/*
+ * miscellaneous architected definitions
+ */
+#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */
+#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */
+#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */
+
+#ifdef __KERNEL__
+
+extern long perfmonctl(int fd, int cmd, void *arg, int narg);
+
+typedef struct {
+ void (*handler)(int irq, void *arg, struct pt_regs *regs);
+} pfm_intr_handler_desc_t;
+
+extern void pfm_save_regs (struct task_struct *);
+extern void pfm_load_regs (struct task_struct *);
+
+extern void pfm_exit_thread(struct task_struct *);
+extern int pfm_use_debug_registers(struct task_struct *);
+extern int pfm_release_debug_registers(struct task_struct *);
+extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
+extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
+extern void pfm_init_percpu(void);
+extern void pfm_handle_work(void);
+extern int pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
+extern int pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *h);
+
+
+
+/*
+ * Reset PMD register flags
+ */
+#define PFM_PMD_SHORT_RESET 0
+#define PFM_PMD_LONG_RESET 1
+
+typedef union {
+ unsigned int val;
+ struct {
+ unsigned int notify_user:1; /* notify user program of overflow */
+ unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */
+ unsigned int block_task:1; /* block monitored task on kernel exit */
+ unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */
+ unsigned int reserved:28; /* for future use */
+ } bits;
+} pfm_ovfl_ctrl_t;
+
+typedef struct {
+ unsigned char ovfl_pmd; /* index of overflowed PMD */
+ unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */
+ unsigned short active_set; /* event set active at the time of the overflow */
+ pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
+
+ unsigned long pmd_last_reset; /* last reset value of of the PMD */
+ unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */
+ unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */
+ unsigned long pmd_value; /* current 64-bit value of the PMD */
+ unsigned long pmd_eventid; /* eventid associated with PMD */
+} pfm_ovfl_arg_t;
+
+
+typedef struct {
+ char *fmt_name;
+ pfm_uuid_t fmt_uuid;
+ size_t fmt_arg_size;
+ unsigned long fmt_flags;
+
+ int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
+ int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
+ int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
+ int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp);
+ int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
+ int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
+ int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
+
+ struct list_head fmt_list;
+} pfm_buffer_fmt_t;
+
+extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
+extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
+
+/*
+ * perfmon interface exported to modules
+ */
+extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
+extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs);
+
+/*
+ * describe the content of the local_cpu_date->pfm_syst_info field
+ */
+#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
+#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
+#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
+
+/*
+ * sysctl control structure. visible to sampling formats
+ */
+typedef struct {
+ int debug; /* turn on/off debugging via syslog */
+ int debug_ovfl; /* turn on/off debug printk in overflow handler */
+ int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
+ int expert_mode; /* turn on/off value checking */
+} pfm_sysctl_t;
+extern pfm_sysctl_t pfm_sysctl;
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_PERFMON_H */
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h
new file mode 100644
index 000000000000..48822c0811d8
--- /dev/null
+++ b/arch/ia64/include/asm/perfmon_default_smpl.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * This file implements the default sampling buffer format
+ * for Linux/ia64 perfmon subsystem.
+ */
+#ifndef __PERFMON_DEFAULT_SMPL_H__
+#define __PERFMON_DEFAULT_SMPL_H__ 1
+
+#define PFM_DEFAULT_SMPL_UUID { \
+ 0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
+
+/*
+ * format specific parameters (passed at context creation)
+ */
+typedef struct {
+ unsigned long buf_size; /* size of the buffer in bytes */
+ unsigned int flags; /* buffer specific flags */
+ unsigned int res1; /* for future use */
+ unsigned long reserved[2]; /* for future use */
+} pfm_default_smpl_arg_t;
+
+/*
+ * combined context+format specific structure. Can be passed
+ * to PFM_CONTEXT_CREATE
+ */
+typedef struct {
+ pfarg_context_t ctx_arg;
+ pfm_default_smpl_arg_t buf_arg;
+} pfm_default_smpl_ctx_arg_t;
+
+/*
+ * This header is at the beginning of the sampling buffer returned to the user.
+ * It is directly followed by the first record.
+ */
+typedef struct {
+ unsigned long hdr_count; /* how many valid entries */
+ unsigned long hdr_cur_offs; /* current offset from top of buffer */
+ unsigned long hdr_reserved2; /* reserved for future use */
+
+ unsigned long hdr_overflows; /* how many times the buffer overflowed */
+ unsigned long hdr_buf_size; /* how many bytes in the buffer */
+
+ unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
+ unsigned int hdr_reserved1; /* for future use */
+ unsigned long hdr_reserved[10]; /* for future use */
+} pfm_default_smpl_hdr_t;
+
+/*
+ * Entry header in the sampling buffer. The header is directly followed
+ * with the values of the PMD registers of interest saved in increasing
+ * index order: PMD4, PMD5, and so on. How many PMDs are present depends
+ * on how the session was programmed.
+ *
+ * In the case where multiple counters overflow at the same time, multiple
+ * entries are written consecutively.
+ *
+ * last_reset_value member indicates the initial value of the overflowed PMD.
+ */
+typedef struct {
+ int pid; /* thread id (for NPTL, this is gettid()) */
+ unsigned char reserved1[3]; /* reserved for future use */
+ unsigned char ovfl_pmd; /* index of overflowed PMD */
+
+ unsigned long last_reset_val; /* initial value of overflowed PMD */
+ unsigned long ip; /* where did the overflow interrupt happened */
+ unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
+
+ unsigned short cpu; /* cpu on which the overfow occured */
+ unsigned short set; /* event set active when overflow ocurred */
+ int tgid; /* thread group id (for NPTL, this is getpid()) */
+} pfm_default_smpl_entry_t;
+
+#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
+#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
+#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
+
+#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
+#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
+#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
+
+#endif /* __PERFMON_DEFAULT_SMPL_H__ */
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
new file mode 100644
index 000000000000..b9ac1a6fc216
--- /dev/null
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -0,0 +1,122 @@
+#ifndef _ASM_IA64_PGALLOC_H
+#define _ASM_IA64_PGALLOC_H
+
+/*
+ * This file contains the functions and defines necessary to allocate
+ * page tables.
+ *
+ * This hopefully works with any (fixed) ia-64 page-size, as defined
+ * in <asm/page.h> (currently 8192).
+ *
+ * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
+ */
+
+
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/threads.h>
+#include <linux/quicklist.h>
+
+#include <asm/mmu_context.h>
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ quicklist_free(0, NULL, pgd);
+}
+
+#ifdef CONFIG_PGTABLE_4
+static inline void
+pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+{
+ pgd_val(*pgd_entry) = __pa(pud);
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ quicklist_free(0, NULL, pud);
+}
+#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud)
+#endif /* CONFIG_PGTABLE_4 */
+
+static inline void
+pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+{
+ pud_val(*pud_entry) = __pa(pmd);
+}
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ quicklist_free(0, NULL, pmd);
+}
+
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
+{
+ pmd_val(*pmd_entry) = page_to_phys(pte);
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
+{
+ pmd_val(*pmd_entry) = __pa(pte);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *page;
+ void *pg;
+
+ pg = quicklist_alloc(0, GFP_KERNEL, NULL);
+ if (!pg)
+ return NULL;
+ page = virt_to_page(pg);
+ pgtable_page_ctor(page);
+ return page;
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long addr)
+{
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ quicklist_free_page(0, NULL, pte);
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ quicklist_free(0, NULL, pte);
+}
+
+static inline void check_pgt_cache(void)
+{
+ quicklist_trim(0, NULL, 25, 16);
+}
+
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+
+#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
new file mode 100644
index 000000000000..7a9bff47564f
--- /dev/null
+++ b/arch/ia64/include/asm/pgtable.h
@@ -0,0 +1,615 @@
+#ifndef _ASM_IA64_PGTABLE_H
+#define _ASM_IA64_PGTABLE_H
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the IA-64 page table tree.
+ *
+ * This hopefully works with any (fixed) IA-64 page-size, as defined
+ * in <asm/page.h>.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <asm/mman.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/types.h>
+
+#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
+
+/*
+ * First, define the various bits in a PTE. Note that the PTE format
+ * matches the VHPT short format, the firt doubleword of the VHPD long
+ * format, and the first doubleword of the TLB insertion format.
+ */
+#define _PAGE_P_BIT 0
+#define _PAGE_A_BIT 5
+#define _PAGE_D_BIT 6
+
+#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
+#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
+#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
+#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
+#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
+#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
+#define _PAGE_MA_MASK (0x7 << 2)
+#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
+#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
+#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
+#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
+#define _PAGE_PL_MASK (3 << 7)
+#define _PAGE_AR_R (0 << 9) /* read only */
+#define _PAGE_AR_RX (1 << 9) /* read & execute */
+#define _PAGE_AR_RW (2 << 9) /* read & write */
+#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
+#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
+#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
+#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
+#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
+#define _PAGE_AR_MASK (7 << 9)
+#define _PAGE_AR_SHIFT 9
+#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
+#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
+#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
+#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
+#define _PAGE_PROTNONE (__IA64_UL(1) << 63)
+
+/* Valid only for a PTE with the present bit cleared: */
+#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
+
+#define _PFN_MASK _PAGE_PPN_MASK
+/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
+#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
+
+#define _PAGE_SIZE_4K 12
+#define _PAGE_SIZE_8K 13
+#define _PAGE_SIZE_16K 14
+#define _PAGE_SIZE_64K 16
+#define _PAGE_SIZE_256K 18
+#define _PAGE_SIZE_1M 20
+#define _PAGE_SIZE_4M 22
+#define _PAGE_SIZE_16M 24
+#define _PAGE_SIZE_64M 26
+#define _PAGE_SIZE_256M 28
+#define _PAGE_SIZE_1G 30
+#define _PAGE_SIZE_4G 32
+
+#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
+#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
+#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
+
+/*
+ * How many pointers will a page table level hold expressed in shift
+ */
+#define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
+
+/*
+ * Definitions for fourth level:
+ */
+#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
+
+/*
+ * Definitions for third level:
+ *
+ * PMD_SHIFT determines the size of the area a third-level page table
+ * can map.
+ */
+#define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
+
+#ifdef CONFIG_PGTABLE_4
+/*
+ * Definitions for second level:
+ *
+ * PUD_SHIFT determines the size of the area a second-level page table
+ * can map.
+ */
+#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT))
+#endif
+
+/*
+ * Definitions for first level:
+ *
+ * PGDIR_SHIFT determines what a first-level page table entry can map.
+ */
+#ifdef CONFIG_PGTABLE_4
+#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#else
+#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#endif
+#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
+#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
+#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
+#define FIRST_USER_ADDRESS 0
+
+/*
+ * All the normal masks have the "page accessed" bits on, as any time
+ * they are used, the page is accessed. They are cleared only by the
+ * page-out routines.
+ */
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
+#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+
+# ifndef __ASSEMBLY__
+
+#include <linux/sched.h> /* for mm_struct */
+#include <linux/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/processor.h>
+
+/*
+ * Next come the mappings that determine how mmap() protection bits
+ * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
+ * _P version gets used for a private shared memory segment, the _S
+ * version gets used for a shared memory segment with MAP_SHARED on.
+ * In a private shared memory segment, we do a copy-on-write if a task
+ * attempts to write to the page.
+ */
+ /* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
+#define __P011 PAGE_READONLY /* ditto */
+#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
+#define __S011 PAGE_SHARED
+#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
+#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
+#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
+
+#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+#ifdef CONFIG_PGTABLE_4
+#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
+#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+
+
+/*
+ * Some definitions to translate between mem_map, PTEs, and page addresses:
+ */
+
+
+/* Quick test to see if ADDR is a (potentially) valid physical address. */
+static inline long
+ia64_phys_addr_valid (unsigned long addr)
+{
+ return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
+}
+
+/*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+ * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
+ * require a hash-, or multi-level tree-lookup or something of that
+ * sort) but it guarantees to return TRUE only if accessing the page
+ * at that address does not cause an error. Note that there may be
+ * addresses for which kern_addr_valid() returns FALSE even though an
+ * access would not cause an error (e.g., this is typically true for
+ * memory mapped I/O regions.
+ *
+ * XXX Need to implement this for IA-64.
+ */
+#define kern_addr_valid(addr) (1)
+
+
+/*
+ * Now come the defines and routines to manage and access the three-level
+ * page table.
+ */
+
+
+#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
+# define VMALLOC_END vmalloc_end
+ extern unsigned long vmalloc_end;
+#else
+#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
+/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
+# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
+# define vmemmap ((struct page *)VMALLOC_END)
+#else
+# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
+#endif
+#endif
+
+/* fs/proc/kcore.c */
+#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
+#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
+
+#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
+#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
+
+/*
+ * Conversion functions: convert page frame number (pfn) and a protection value to a page
+ * table entry (pte).
+ */
+#define pfn_pte(pfn, pgprot) \
+({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
+
+/* Extract pfn from pte. */
+#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
+
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+/* This takes a physical page address that is used by the remapping functions */
+#define mk_pte_phys(physpage, pgprot) \
+({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
+
+#define pte_modify(_pte, newprot) \
+ (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
+
+#define pte_none(pte) (!pte_val(pte))
+#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
+#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
+/* pte_page() returns the "struct page *" corresponding to the PTE: */
+#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
+#define pmd_present(pmd) (pmd_val(pmd) != 0UL)
+#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
+#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
+
+#define pud_none(pud) (!pud_val(pud))
+#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
+#define pud_present(pud) (pud_val(pud) != 0UL)
+#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
+#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
+
+#ifdef CONFIG_PGTABLE_4
+#define pgd_none(pgd) (!pgd_val(pgd))
+#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
+#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
+#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
+#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
+#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
+#endif
+
+/*
+ * The following have defined behavior only work if pte_present() is true.
+ */
+#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
+#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
+#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
+#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
+#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
+#define pte_special(pte) 0
+
+/*
+ * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
+ * access rights:
+ */
+#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
+#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
+#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
+#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
+#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
+#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
+#define pte_mkhuge(pte) (__pte(pte_val(pte)))
+#define pte_mkspecial(pte) (pte)
+
+/*
+ * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
+ * sync icache and dcache when we insert *new* executable page.
+ * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
+ * if necessary.
+ *
+ * set_pte() is also called by the kernel, but we can expect that the kernel
+ * flushes icache explicitly if necessary.
+ */
+#define pte_present_exec_user(pte)\
+ ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
+ (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
+
+extern void __ia64_sync_icache_dcache(pte_t pteval);
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ /* page is present && page is user && page is executable
+ * && (page swapin or new page or page migraton
+ * || copy_on_write with page copying.)
+ */
+ if (pte_present_exec_user(pteval) &&
+ (!pte_present(*ptep) ||
+ pte_pfn(*ptep) != pte_pfn(pteval)))
+ /* load_module() calles flush_icache_range() explicitly*/
+ __ia64_sync_icache_dcache(pteval);
+ *ptep = pteval;
+}
+
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+/*
+ * Make page protection values cacheable, uncacheable, or write-
+ * combining. Note that "protection" is really a misnomer here as the
+ * protection value contains the memory attribute bits, dirty bits, and
+ * various other bits as well.
+ */
+#define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
+#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
+#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+static inline unsigned long
+pgd_index (unsigned long address)
+{
+ unsigned long region = address >> 61;
+ unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
+
+ return (region << (PAGE_SHIFT - 6)) | l1index;
+}
+
+/* The offset in the 1-level directory is given by the 3 region bits
+ (61..63) and the level-1 bits. */
+static inline pgd_t*
+pgd_offset (const struct mm_struct *mm, unsigned long address)
+{
+ return mm->pgd + pgd_index(address);
+}
+
+/* In the kernel's mapped region we completely ignore the region number
+ (since we know it's in region number 5). */
+#define pgd_offset_k(addr) \
+ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+
+/* Look up a pgd entry in the gate area. On IA-64, the gate-area
+ resides in the kernel-mapped segment, hence we use pgd_offset_k()
+ here. */
+#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
+
+#ifdef CONFIG_PGTABLE_4
+/* Find an entry in the second-level page table.. */
+#define pud_offset(dir,addr) \
+ ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+#endif
+
+/* Find an entry in the third-level page table.. */
+#define pmd_offset(dir,addr) \
+ ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+
+/*
+ * Find an entry in the third-level page table. This looks more complicated than it
+ * should be because some platforms place page tables in high memory.
+ */
+#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
+#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+/* atomic versions of the some PTE manipulations: */
+
+static inline int
+ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_A_BIT, ptep);
+#else
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ return 1;
+#endif
+}
+
+static inline pte_t
+ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+ return __pte(xchg((long *) ptep, 0));
+#else
+ pte_t pte = *ptep;
+ pte_clear(mm, addr, ptep);
+ return pte;
+#endif
+}
+
+static inline void
+ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_SMP
+ unsigned long new, old;
+
+ do {
+ old = pte_val(*ptep);
+ new = pte_val(pte_wrprotect(__pte (old)));
+ } while (cmpxchg((unsigned long *) ptep, old, new) != old);
+#else
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+#endif
+}
+
+static inline int
+pte_same (pte_t a, pte_t b)
+{
+ return pte_val(a) == pte_val(b);
+}
+
+#define update_mmu_cache(vma, address, pte) do { } while (0)
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init (void);
+
+/*
+ * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
+ * bits in the swap-type field of the swap pte. It would be nice to
+ * enforce that, but we can't easily include <linux/swap.h> here.
+ * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
+ *
+ * Format of swap pte:
+ * bit 0 : present bit (must be zero)
+ * bit 1 : _PAGE_FILE (must be zero)
+ * bits 2- 8: swap-type
+ * bits 9-62: swap offset
+ * bit 63 : _PAGE_PROTNONE bit
+ *
+ * Format of file pte:
+ * bit 0 : present bit (must be zero)
+ * bit 1 : _PAGE_FILE (must be one)
+ * bits 2-62: file_offset/PAGE_SIZE
+ * bit 63 : _PAGE_PROTNONE bit
+ */
+#define __swp_type(entry) (((entry).val >> 2) & 0x7f)
+#define __swp_offset(entry) (((entry).val << 1) >> 10)
+#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define PTE_FILE_MAX_BITS 61
+#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+extern struct page *zero_page_memmap_ptr;
+#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
+
+/* We provide our own get_unmapped_area to cope with VA holes for userland */
+#define HAVE_ARCH_UNMAPPED_AREA
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
+#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
+#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
+#endif
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Update PTEP with ENTRY, which is guaranteed to be a less
+ * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
+ * WRITABLE bits turned on, when the value at PTEP did not. The
+ * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
+ *
+ * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
+ * having to worry about races. On SMP machines, there are only two
+ * cases where this is true:
+ *
+ * (1) *PTEP has the PRESENT bit turned OFF
+ * (2) ENTRY has the DIRTY bit turned ON
+ *
+ * On ia64, we could implement this routine with a cmpxchg()-loop
+ * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
+ * However, like on x86, we can get a more streamlined version by
+ * observing that it is OK to drop ACCESSED bit updates when
+ * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
+ * result in an extra Access-bit fault, which would then turn on the
+ * ACCESSED bit in the low-level fault handler (iaccess_bit or
+ * daccess_bit in ivt.S).
+ */
+#ifdef CONFIG_SMP
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed && __safely_writable) { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __addr); \
+ } \
+ __changed; \
+})
+#else
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
+ flush_tlb_page(__vma, __addr); \
+ } \
+ __changed; \
+})
+#endif
+
+# ifdef CONFIG_VIRTUAL_MEM_MAP
+ /* arch mem_map init routine is needed due to holes in a virtual mem_map */
+# define __HAVE_ARCH_MEMMAP_INIT
+ extern void memmap_init (unsigned long size, int nid, unsigned long zone,
+ unsigned long start_pfn);
+# endif /* CONFIG_VIRTUAL_MEM_MAP */
+# endif /* !__ASSEMBLY__ */
+
+/*
+ * Identity-mapped regions use a large page size. We'll call such large pages
+ * "granules". If you can think of a better name that's unambiguous, let me
+ * know...
+ */
+#if defined(CONFIG_IA64_GRANULE_64MB)
+# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
+#elif defined(CONFIG_IA64_GRANULE_16MB)
+# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
+#endif
+#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
+/*
+ * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
+ */
+#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
+#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+/* These tell get_user_pages() that the first gate page is accessible from user-level. */
+#define FIXADDR_USER_START GATE_ADDR
+#ifdef HAVE_BUGGY_SEGREL
+# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
+#else
+# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
+#endif
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#define __HAVE_ARCH_PGD_OFFSET_GATE
+
+
+#ifndef CONFIG_PGTABLE_4
+#include <asm-generic/pgtable-nopud.h>
+#endif
+#include <asm-generic/pgtable.h>
+
+#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/arch/ia64/include/asm/poll.h b/arch/ia64/include/asm/poll.h
new file mode 100644
index 000000000000..c98509d3149e
--- /dev/null
+++ b/arch/ia64/include/asm/poll.h
@@ -0,0 +1 @@
+#include <asm-generic/poll.h>
diff --git a/arch/ia64/include/asm/posix_types.h b/arch/ia64/include/asm/posix_types.h
new file mode 100644
index 000000000000..17885567b731
--- /dev/null
+++ b/arch/ia64/include/asm/posix_types.h
@@ -0,0 +1,126 @@
+#ifndef _ASM_IA64_POSIX_TYPES_H
+#define _ASM_IA64_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ *
+ * Based on <asm-alpha/posix_types.h>.
+ *
+ * Modified 1998-2000, 2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+typedef unsigned long __kernel_ino_t;
+typedef unsigned int __kernel_mode_t;
+typedef unsigned int __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef long long __kernel_loff_t;
+typedef int __kernel_pid_t;
+typedef int __kernel_ipc_pid_t;
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+typedef __kernel_uid_t __kernel_old_uid_t;
+typedef __kernel_gid_t __kernel_old_gid_t;
+typedef __kernel_uid_t __kernel_uid32_t;
+typedef __kernel_gid_t __kernel_gid32_t;
+
+typedef unsigned int __kernel_old_dev_t;
+
+# ifdef __KERNEL__
+
+# ifndef __GNUC__
+
+#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
+#define __FD_ZERO(set) \
+ ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
+
+# else /* !__GNUC__ */
+
+/* With GNU C, use inline functions instead so args are evaluated only once: */
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+ unsigned long *tmp = p->fds_bits;
+ int i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 16:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ return;
+
+ case 8:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+ return;
+
+ case 4:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ return;
+ }
+ }
+ i = __FDSET_LONGS;
+ while (i) {
+ i--;
+ *tmp = 0;
+ tmp++;
+ }
+}
+
+# endif /* !__GNUC__ */
+# endif /* __KERNEL__ */
+#endif /* _ASM_IA64_POSIX_TYPES_H */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
new file mode 100644
index 000000000000..f88fa054d01d
--- /dev/null
+++ b/arch/ia64/include/asm/processor.h
@@ -0,0 +1,771 @@
+#ifndef _ASM_IA64_PROCESSOR_H
+#define _ASM_IA64_PROCESSOR_H
+
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ *
+ * 11/24/98 S.Eranian added ia64_set_iva()
+ * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
+ * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
+ */
+
+
+#include <asm/intrinsics.h>
+#include <asm/kregs.h>
+#include <asm/ptrace.h>
+#include <asm/ustack.h>
+
+#define IA64_NUM_PHYS_STACK_REG 96
+#define IA64_NUM_DBG_REGS 8
+
+#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
+#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
+
+/*
+ * TASK_SIZE really is a mis-named. It really is the maximum user
+ * space address (plus one). On IA-64, there are five regions of 2TB
+ * each (assuming 8KB page size), for a total of 8TB of user virtual
+ * address space.
+ */
+#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
+#define TASK_SIZE TASK_SIZE_OF(current)
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (current->thread.map_base)
+
+#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
+#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
+#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
+#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
+#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
+#define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
+ sync at ctx sw */
+#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
+#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
+
+#define IA64_THREAD_UAC_SHIFT 3
+#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
+#define IA64_THREAD_FPEMU_SHIFT 6
+#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
+
+
+/*
+ * This shift should be large enough to be able to represent 1000000000/itc_freq with good
+ * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
+ * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
+ */
+#define IA64_NSEC_PER_CYC_SHIFT 30
+
+#ifndef __ASSEMBLY__
+
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/threads.h>
+#include <linux/types.h>
+
+#include <asm/fpu.h>
+#include <asm/page.h>
+#include <asm/percpu.h>
+#include <asm/rse.h>
+#include <asm/unwind.h>
+#include <asm/atomic.h>
+#ifdef CONFIG_NUMA
+#include <asm/nodedata.h>
+#endif
+
+/* like above but expressed as bitfields for more efficient access: */
+struct ia64_psr {
+ __u64 reserved0 : 1;
+ __u64 be : 1;
+ __u64 up : 1;
+ __u64 ac : 1;
+ __u64 mfl : 1;
+ __u64 mfh : 1;
+ __u64 reserved1 : 7;
+ __u64 ic : 1;
+ __u64 i : 1;
+ __u64 pk : 1;
+ __u64 reserved2 : 1;
+ __u64 dt : 1;
+ __u64 dfl : 1;
+ __u64 dfh : 1;
+ __u64 sp : 1;
+ __u64 pp : 1;
+ __u64 di : 1;
+ __u64 si : 1;
+ __u64 db : 1;
+ __u64 lp : 1;
+ __u64 tb : 1;
+ __u64 rt : 1;
+ __u64 reserved3 : 4;
+ __u64 cpl : 2;
+ __u64 is : 1;
+ __u64 mc : 1;
+ __u64 it : 1;
+ __u64 id : 1;
+ __u64 da : 1;
+ __u64 dd : 1;
+ __u64 ss : 1;
+ __u64 ri : 2;
+ __u64 ed : 1;
+ __u64 bn : 1;
+ __u64 reserved4 : 19;
+};
+
+union ia64_isr {
+ __u64 val;
+ struct {
+ __u64 code : 16;
+ __u64 vector : 8;
+ __u64 reserved1 : 8;
+ __u64 x : 1;
+ __u64 w : 1;
+ __u64 r : 1;
+ __u64 na : 1;
+ __u64 sp : 1;
+ __u64 rs : 1;
+ __u64 ir : 1;
+ __u64 ni : 1;
+ __u64 so : 1;
+ __u64 ei : 2;
+ __u64 ed : 1;
+ __u64 reserved2 : 20;
+ };
+};
+
+union ia64_lid {
+ __u64 val;
+ struct {
+ __u64 rv : 16;
+ __u64 eid : 8;
+ __u64 id : 8;
+ __u64 ig : 32;
+ };
+};
+
+union ia64_tpr {
+ __u64 val;
+ struct {
+ __u64 ig0 : 4;
+ __u64 mic : 4;
+ __u64 rsv : 8;
+ __u64 mmi : 1;
+ __u64 ig1 : 47;
+ };
+};
+
+union ia64_itir {
+ __u64 val;
+ struct {
+ __u64 rv3 : 2; /* 0-1 */
+ __u64 ps : 6; /* 2-7 */
+ __u64 key : 24; /* 8-31 */
+ __u64 rv4 : 32; /* 32-63 */
+ };
+};
+
+union ia64_rr {
+ __u64 val;
+ struct {
+ __u64 ve : 1; /* enable hw walker */
+ __u64 reserved0: 1; /* reserved */
+ __u64 ps : 6; /* log page size */
+ __u64 rid : 24; /* region id */
+ __u64 reserved1: 32; /* reserved */
+ };
+};
+
+/*
+ * CPU type, hardware bug flags, and per-CPU state. Frequently used
+ * state comes earlier:
+ */
+struct cpuinfo_ia64 {
+ __u32 softirq_pending;
+ __u64 itm_delta; /* # of clock cycles between clock ticks */
+ __u64 itm_next; /* interval timer mask value to use for next clock tick */
+ __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
+ __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
+ __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
+ __u64 itc_freq; /* frequency of ITC counter */
+ __u64 proc_freq; /* frequency of processor */
+ __u64 cyc_per_usec; /* itc_freq/1000000 */
+ __u64 ptce_base;
+ __u32 ptce_count[2];
+ __u32 ptce_stride[2];
+ struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
+
+#ifdef CONFIG_SMP
+ __u64 loops_per_jiffy;
+ int cpu;
+ __u32 socket_id; /* physical processor socket id */
+ __u16 core_id; /* core id */
+ __u16 thread_id; /* thread id */
+ __u16 num_log; /* Total number of logical processors on
+ * this socket that were successfully booted */
+ __u8 cores_per_socket; /* Cores per processor socket */
+ __u8 threads_per_core; /* Threads per core */
+#endif
+
+ /* CPUID-derived information: */
+ __u64 ppn;
+ __u64 features;
+ __u8 number;
+ __u8 revision;
+ __u8 model;
+ __u8 family;
+ __u8 archrev;
+ char vendor[16];
+ char *model_name;
+
+#ifdef CONFIG_NUMA
+ struct ia64_node_data *node_data;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+
+/*
+ * The "local" data variable. It refers to the per-CPU data of the currently executing
+ * CPU, much like "current" points to the per-task data of the currently executing task.
+ * Do not use the address of local_cpu_data, since it will be different from
+ * cpu_data(smp_processor_id())!
+ */
+#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
+#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
+
+extern void print_cpu_info (struct cpuinfo_ia64 *);
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#define SET_UNALIGN_CTL(task,value) \
+({ \
+ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
+ | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
+ 0; \
+})
+#define GET_UNALIGN_CTL(task,addr) \
+({ \
+ put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
+ (int __user *) (addr)); \
+})
+
+#define SET_FPEMU_CTL(task,value) \
+({ \
+ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
+ | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
+ 0; \
+})
+#define GET_FPEMU_CTL(task,addr) \
+({ \
+ put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
+ (int __user *) (addr)); \
+})
+
+#ifdef CONFIG_IA32_SUPPORT
+struct desc_struct {
+ unsigned int a, b;
+};
+
+#define desc_empty(desc) (!((desc)->a | (desc)->b))
+#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+
+#define GDT_ENTRY_TLS_ENTRIES 3
+#define GDT_ENTRY_TLS_MIN 6
+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+struct ia64_partial_page_list;
+#endif
+
+struct thread_struct {
+ __u32 flags; /* various thread flags (see IA64_THREAD_*) */
+ /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
+ __u8 on_ustack; /* executing on user-stacks? */
+ __u8 pad[3];
+ __u64 ksp; /* kernel stack pointer */
+ __u64 map_base; /* base address for get_unmapped_area() */
+ __u64 task_size; /* limit for task size */
+ __u64 rbs_bot; /* the base address for the RBS */
+ int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
+
+#ifdef CONFIG_IA32_SUPPORT
+ __u64 eflag; /* IA32 EFLAGS reg */
+ __u64 fsr; /* IA32 floating pt status reg */
+ __u64 fcr; /* IA32 floating pt control reg */
+ __u64 fir; /* IA32 fp except. instr. reg */
+ __u64 fdr; /* IA32 fp except. data reg */
+ __u64 old_k1; /* old value of ar.k1 */
+ __u64 old_iob; /* old IOBase value */
+ struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
+ /* cached TLS descriptors. */
+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+
+# define INIT_THREAD_IA32 .eflag = 0, \
+ .fsr = 0, \
+ .fcr = 0x17800000037fULL, \
+ .fir = 0, \
+ .fdr = 0, \
+ .old_k1 = 0, \
+ .old_iob = 0, \
+ .ppl = NULL,
+#else
+# define INIT_THREAD_IA32
+#endif /* CONFIG_IA32_SUPPORT */
+#ifdef CONFIG_PERFMON
+ void *pfm_context; /* pointer to detailed PMU context */
+ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
+# define INIT_THREAD_PM .pfm_context = NULL, \
+ .pfm_needs_checking = 0UL,
+#else
+# define INIT_THREAD_PM
+#endif
+ __u64 dbr[IA64_NUM_DBG_REGS];
+ __u64 ibr[IA64_NUM_DBG_REGS];
+ struct ia64_fpreg fph[96]; /* saved/loaded on demand */
+};
+
+#define INIT_THREAD { \
+ .flags = 0, \
+ .on_ustack = 0, \
+ .ksp = 0, \
+ .map_base = DEFAULT_MAP_BASE, \
+ .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
+ .task_size = DEFAULT_TASK_SIZE, \
+ .last_fph_cpu = -1, \
+ INIT_THREAD_IA32 \
+ INIT_THREAD_PM \
+ .dbr = {0, }, \
+ .ibr = {0, }, \
+ .fph = {{{{0}}}, } \
+}
+
+#define start_thread(regs,new_ip,new_sp) do { \
+ set_fs(USER_DS); \
+ regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
+ & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
+ regs->cr_iip = new_ip; \
+ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
+ regs->ar_rnat = 0; \
+ regs->ar_bspstore = current->thread.rbs_bot; \
+ regs->ar_fpsr = FPSR_DEFAULT; \
+ regs->loadrs = 0; \
+ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
+ if (unlikely(!get_dumpable(current->mm))) { \
+ /* \
+ * Zap scratch regs to avoid leaking bits between processes with different \
+ * uid/privileges. \
+ */ \
+ regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
+ regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
+ } \
+} while (0)
+
+/* Forward declarations, a strange C thing... */
+struct mm_struct;
+struct task_struct;
+
+/*
+ * Free all resources held by a thread. This is called after the
+ * parent of DEAD_TASK has collected the exit status of the task via
+ * wait().
+ */
+#define release_thread(dead_task)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE 1: Only a kernel-only process (ie the swapper or direct
+ * descendants who haven't done an "execve()") should use this: it
+ * will work within a system call from a "real" process, but the
+ * process memory space will not be free'd until both the parent and
+ * the child have exited.
+ *
+ * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
+ * into trouble in init/main.c when the child thread returns to
+ * do_basic_setup() and the timing is such that free_initmem() has
+ * been called already.
+ */
+extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
+
+/* Get wait channel for task P. */
+extern unsigned long get_wchan (struct task_struct *p);
+
+/* Return instruction pointer of blocked task TSK. */
+#define KSTK_EIP(tsk) \
+ ({ \
+ struct pt_regs *_regs = task_pt_regs(tsk); \
+ _regs->cr_iip + ia64_psr(_regs)->ri; \
+ })
+
+/* Return stack pointer of blocked task TSK. */
+#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
+
+extern void ia64_getreg_unknown_kr (void);
+extern void ia64_setreg_unknown_kr (void);
+
+#define ia64_get_kr(regnum) \
+({ \
+ unsigned long r = 0; \
+ \
+ switch (regnum) { \
+ case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
+ case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
+ case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
+ case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
+ case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
+ case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
+ case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
+ case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
+ default: ia64_getreg_unknown_kr(); break; \
+ } \
+ r; \
+})
+
+#define ia64_set_kr(regnum, r) \
+({ \
+ switch (regnum) { \
+ case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
+ case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
+ case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
+ case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
+ case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
+ case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
+ case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
+ case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
+ default: ia64_setreg_unknown_kr(); break; \
+ } \
+})
+
+/*
+ * The following three macros can't be inline functions because we don't have struct
+ * task_struct at this point.
+ */
+
+/*
+ * Return TRUE if task T owns the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
+#define ia64_is_local_fpu_owner(t) \
+({ \
+ struct task_struct *__ia64_islfo_task = (t); \
+ (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
+ && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
+})
+
+/*
+ * Mark task T as owning the fph partition of the CPU we're running on.
+ * Must be called from code that has preemption disabled.
+ */
+#define ia64_set_local_fpu_owner(t) do { \
+ struct task_struct *__ia64_slfo_task = (t); \
+ __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
+ ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
+} while (0)
+
+/* Mark the fph partition of task T as being invalid on all CPUs. */
+#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
+
+extern void __ia64_init_fpu (void);
+extern void __ia64_save_fpu (struct ia64_fpreg *fph);
+extern void __ia64_load_fpu (struct ia64_fpreg *fph);
+extern void ia64_save_debug_regs (unsigned long *save_area);
+extern void ia64_load_debug_regs (unsigned long *save_area);
+
+#ifdef CONFIG_IA32_SUPPORT
+extern void ia32_save_state (struct task_struct *task);
+extern void ia32_load_state (struct task_struct *task);
+#endif
+
+#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
+#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
+
+/* load fp 0.0 into fph */
+static inline void
+ia64_init_fpu (void) {
+ ia64_fph_enable();
+ __ia64_init_fpu();
+ ia64_fph_disable();
+}
+
+/* save f32-f127 at FPH */
+static inline void
+ia64_save_fpu (struct ia64_fpreg *fph) {
+ ia64_fph_enable();
+ __ia64_save_fpu(fph);
+ ia64_fph_disable();
+}
+
+/* load f32-f127 from FPH */
+static inline void
+ia64_load_fpu (struct ia64_fpreg *fph) {
+ ia64_fph_enable();
+ __ia64_load_fpu(fph);
+ ia64_fph_disable();
+}
+
+static inline __u64
+ia64_clear_ic (void)
+{
+ __u64 psr;
+ psr = ia64_getreg(_IA64_REG_PSR);
+ ia64_stop();
+ ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
+ ia64_srlz_i();
+ return psr;
+}
+
+/*
+ * Restore the psr.
+ */
+static inline void
+ia64_set_psr (__u64 psr)
+{
+ ia64_stop();
+ ia64_setreg(_IA64_REG_PSR_L, psr);
+ ia64_srlz_i();
+}
+
+/*
+ * Insert a translation into an instruction and/or data translation
+ * register.
+ */
+static inline void
+ia64_itr (__u64 target_mask, __u64 tr_num,
+ __u64 vmaddr, __u64 pte,
+ __u64 log_page_size)
+{
+ ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+ ia64_stop();
+ if (target_mask & 0x1)
+ ia64_itri(tr_num, pte);
+ if (target_mask & 0x2)
+ ia64_itrd(tr_num, pte);
+}
+
+/*
+ * Insert a translation into the instruction and/or data translation
+ * cache.
+ */
+static inline void
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
+ __u64 log_page_size)
+{
+ ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+ ia64_stop();
+ /* as per EAS2.6, itc must be the last instruction in an instruction group */
+ if (target_mask & 0x1)
+ ia64_itci(pte);
+ if (target_mask & 0x2)
+ ia64_itcd(pte);
+}
+
+/*
+ * Purge a range of addresses from instruction and/or data translation
+ * register(s).
+ */
+static inline void
+ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
+{
+ if (target_mask & 0x1)
+ ia64_ptri(vmaddr, (log_size << 2));
+ if (target_mask & 0x2)
+ ia64_ptrd(vmaddr, (log_size << 2));
+}
+
+/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
+static inline void
+ia64_set_iva (void *ivt_addr)
+{
+ ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
+ ia64_srlz_i();
+}
+
+/* Set the page table address and control bits. */
+static inline void
+ia64_set_pta (__u64 pta)
+{
+ /* Note: srlz.i implies srlz.d */
+ ia64_setreg(_IA64_REG_CR_PTA, pta);
+ ia64_srlz_i();
+}
+
+static inline void
+ia64_eoi (void)
+{
+ ia64_setreg(_IA64_REG_CR_EOI, 0);
+ ia64_srlz_d();
+}
+
+#define cpu_relax() ia64_hint(ia64_hint_pause)
+
+static inline int
+ia64_get_irr(unsigned int vector)
+{
+ unsigned int reg = vector / 64;
+ unsigned int bit = vector % 64;
+ u64 irr;
+
+ switch (reg) {
+ case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
+ case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
+ case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
+ case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
+ }
+
+ return test_bit(bit, &irr);
+}
+
+static inline void
+ia64_set_lrr0 (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_CR_LRR0, val);
+ ia64_srlz_d();
+}
+
+static inline void
+ia64_set_lrr1 (unsigned long val)
+{
+ ia64_setreg(_IA64_REG_CR_LRR1, val);
+ ia64_srlz_d();
+}
+
+
+/*
+ * Given the address to which a spill occurred, return the unat bit
+ * number that corresponds to this address.
+ */
+static inline __u64
+ia64_unat_pos (void *spill_addr)
+{
+ return ((__u64) spill_addr >> 3) & 0x3f;
+}
+
+/*
+ * Set the NaT bit of an integer register which was spilled at address
+ * SPILL_ADDR. UNAT is the mask to be updated.
+ */
+static inline void
+ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
+{
+ __u64 bit = ia64_unat_pos(spill_addr);
+ __u64 mask = 1UL << bit;
+
+ *unat = (*unat & ~mask) | (nat << bit);
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ * Note that the only way T can block is through a call to schedule() -> switch_to().
+ */
+static inline unsigned long
+thread_saved_pc (struct task_struct *t)
+{
+ struct unw_frame_info info;
+ unsigned long ip;
+
+ unw_init_from_blocked_task(&info, t);
+ if (unw_unwind(&info) < 0)
+ return 0;
+ unw_get_ip(&info, &ip);
+ return ip;
+}
+
+/*
+ * Get the current instruction/program counter value.
+ */
+#define current_text_addr() \
+ ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
+
+static inline __u64
+ia64_get_ivr (void)
+{
+ __u64 r;
+ ia64_srlz_d();
+ r = ia64_getreg(_IA64_REG_CR_IVR);
+ ia64_srlz_d();
+ return r;
+}
+
+static inline void
+ia64_set_dbr (__u64 regnum, __u64 value)
+{
+ __ia64_set_dbr(regnum, value);
+#ifdef CONFIG_ITANIUM
+ ia64_srlz_d();
+#endif
+}
+
+static inline __u64
+ia64_get_dbr (__u64 regnum)
+{
+ __u64 retval;
+
+ retval = __ia64_get_dbr(regnum);
+#ifdef CONFIG_ITANIUM
+ ia64_srlz_d();
+#endif
+ return retval;
+}
+
+static inline __u64
+ia64_rotr (__u64 w, __u64 n)
+{
+ return (w >> n) | (w << (64 - n));
+}
+
+#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
+
+/*
+ * Take a mapped kernel address and return the equivalent address
+ * in the region 7 identity mapped virtual area.
+ */
+static inline void *
+ia64_imva (void *addr)
+{
+ void *result;
+ result = (void *) ia64_tpa(addr);
+ return __va(result);
+}
+
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+#define PREFETCH_STRIDE L1_CACHE_BYTES
+
+static inline void
+prefetch (const void *x)
+{
+ ia64_lfetch(ia64_lfhint_none, x);
+}
+
+static inline void
+prefetchw (const void *x)
+{
+ ia64_lfetch_excl(ia64_lfhint_none, x);
+}
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+extern unsigned long boot_option_idle_override;
+extern unsigned long idle_halt;
+extern unsigned long idle_nomwait;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_PROCESSOR_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
new file mode 100644
index 000000000000..15f8dcfe6eee
--- /dev/null
+++ b/arch/ia64/include/asm/ptrace.h
@@ -0,0 +1,364 @@
+#ifndef _ASM_IA64_PTRACE_H
+#define _ASM_IA64_PTRACE_H
+
+/*
+ * Copyright (C) 1998-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2003 Intel Co
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Arun Sharma <arun.sharma@intel.com>
+ *
+ * 12/07/98 S. Eranian added pt_regs & switch_stack
+ * 12/21/98 D. Mosberger updated to match latest code
+ * 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
+ *
+ */
+/*
+ * When a user process is blocked, its state looks as follows:
+ *
+ * +----------------------+ ------- IA64_STK_OFFSET
+ * | | ^
+ * | struct pt_regs | |
+ * | | |
+ * +----------------------+ |
+ * | | |
+ * | memory stack | |
+ * | (growing downwards) | |
+ * //.....................// |
+ * |
+ * //.....................// |
+ * | | |
+ * +----------------------+ |
+ * | struct switch_stack | |
+ * | | |
+ * +----------------------+ |
+ * | | |
+ * //.....................// |
+ * |
+ * //.....................// |
+ * | | |
+ * | register stack | |
+ * | (growing upwards) | |
+ * | | |
+ * +----------------------+ | --- IA64_RBS_OFFSET
+ * | struct thread_info | | ^
+ * +----------------------+ | |
+ * | | | |
+ * | struct task_struct | | |
+ * current -> | | | |
+ * +----------------------+ -------
+ *
+ * Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
+ * This is because ar.ec is saved as part of ar.pfs.
+ */
+
+
+#include <asm/fpu.h>
+
+#ifdef __KERNEL__
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
+
+/*
+ * Base-2 logarithm of number of pages to allocate per task structure
+ * (including register backing store and memory stack):
+ */
+#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
+# define KERNEL_STACK_SIZE_ORDER 3
+#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
+# define KERNEL_STACK_SIZE_ORDER 2
+#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
+# define KERNEL_STACK_SIZE_ORDER 1
+#else
+# define KERNEL_STACK_SIZE_ORDER 0
+#endif
+
+#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
+#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
+
+#define KERNEL_STACK_SIZE IA64_STK_OFFSET
+
+#endif /* __KERNEL__ */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are saved on system
+ * calls.
+ *
+ * We don't save all floating point register because the kernel
+ * is compiled to use only a very small subset, so the other are
+ * untouched.
+ *
+ * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
+ * (because the memory stack pointer MUST ALWAYS be aligned this way)
+ *
+ */
+struct pt_regs {
+ /* The following registers are saved by SAVE_MIN: */
+ unsigned long b6; /* scratch */
+ unsigned long b7; /* scratch */
+
+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+ unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
+
+ unsigned long cr_ipsr; /* interrupted task's psr */
+ unsigned long cr_iip; /* interrupted task's instruction pointer */
+ /*
+ * interrupted task's function state; if bit 63 is cleared, it
+ * contains syscall's ar.pfs.pfm:
+ */
+ unsigned long cr_ifs;
+
+ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+ unsigned long ar_pfs; /* prev function state */
+ unsigned long ar_rsc; /* RSE configuration */
+ /* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */
+ unsigned long ar_rnat; /* RSE NaT */
+ unsigned long ar_bspstore; /* RSE bspstore */
+
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+ unsigned long b0; /* return pointer (bp) */
+ unsigned long loadrs; /* size of dirty partition << 16 */
+
+ unsigned long r1; /* the gp pointer */
+ unsigned long r12; /* interrupted task's memory stack pointer */
+ unsigned long r13; /* thread pointer */
+
+ unsigned long ar_fpsr; /* floating point status (preserved) */
+ unsigned long r15; /* scratch */
+
+ /* The remaining registers are NOT saved for system calls. */
+
+ unsigned long r14; /* scratch */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+
+ /* The following registers are saved by SAVE_REST: */
+ unsigned long r16; /* scratch */
+ unsigned long r17; /* scratch */
+ unsigned long r18; /* scratch */
+ unsigned long r19; /* scratch */
+ unsigned long r20; /* scratch */
+ unsigned long r21; /* scratch */
+ unsigned long r22; /* scratch */
+ unsigned long r23; /* scratch */
+ unsigned long r24; /* scratch */
+ unsigned long r25; /* scratch */
+ unsigned long r26; /* scratch */
+ unsigned long r27; /* scratch */
+ unsigned long r28; /* scratch */
+ unsigned long r29; /* scratch */
+ unsigned long r30; /* scratch */
+ unsigned long r31; /* scratch */
+
+ unsigned long ar_ccv; /* compare/exchange value (scratch) */
+
+ /*
+ * Floating point registers that the kernel considers scratch:
+ */
+ struct ia64_fpreg f6; /* scratch */
+ struct ia64_fpreg f7; /* scratch */
+ struct ia64_fpreg f8; /* scratch */
+ struct ia64_fpreg f9; /* scratch */
+ struct ia64_fpreg f10; /* scratch */
+ struct ia64_fpreg f11; /* scratch */
+};
+
+/*
+ * This structure contains the addition registers that need to
+ * preserved across a context switch. This generally consists of
+ * "preserved" registers.
+ */
+struct switch_stack {
+ unsigned long caller_unat; /* user NaT collection register (preserved) */
+ unsigned long ar_fpsr; /* floating-point status register */
+
+ struct ia64_fpreg f2; /* preserved */
+ struct ia64_fpreg f3; /* preserved */
+ struct ia64_fpreg f4; /* preserved */
+ struct ia64_fpreg f5; /* preserved */
+
+ struct ia64_fpreg f12; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f13; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f14; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f15; /* scratch, but untouched by kernel */
+ struct ia64_fpreg f16; /* preserved */
+ struct ia64_fpreg f17; /* preserved */
+ struct ia64_fpreg f18; /* preserved */
+ struct ia64_fpreg f19; /* preserved */
+ struct ia64_fpreg f20; /* preserved */
+ struct ia64_fpreg f21; /* preserved */
+ struct ia64_fpreg f22; /* preserved */
+ struct ia64_fpreg f23; /* preserved */
+ struct ia64_fpreg f24; /* preserved */
+ struct ia64_fpreg f25; /* preserved */
+ struct ia64_fpreg f26; /* preserved */
+ struct ia64_fpreg f27; /* preserved */
+ struct ia64_fpreg f28; /* preserved */
+ struct ia64_fpreg f29; /* preserved */
+ struct ia64_fpreg f30; /* preserved */
+ struct ia64_fpreg f31; /* preserved */
+
+ unsigned long r4; /* preserved */
+ unsigned long r5; /* preserved */
+ unsigned long r6; /* preserved */
+ unsigned long r7; /* preserved */
+
+ unsigned long b0; /* so we can force a direct return in copy_thread */
+ unsigned long b1;
+ unsigned long b2;
+ unsigned long b3;
+ unsigned long b4;
+ unsigned long b5;
+
+ unsigned long ar_pfs; /* previous function state */
+ unsigned long ar_lc; /* loop counter (preserved) */
+ unsigned long ar_unat; /* NaT bits for r4-r7 */
+ unsigned long ar_rnat; /* RSE NaT collection register */
+ unsigned long ar_bspstore; /* RSE dirty base (preserved) */
+ unsigned long pr; /* 64 predicate registers (1 bit each) */
+};
+
+#ifdef __KERNEL__
+
+#include <asm/current.h>
+#include <asm/page.h>
+
+/*
+ * We use the ia64_psr(regs)->ri to determine which of the three
+ * instructions in bundle (16 bytes) took the sample. Generate
+ * the canonical representation by adding to instruction pointer.
+ */
+# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
+
+#define regs_return_value(regs) ((regs)->r8)
+
+/* Conserve space in histogram by encoding slot bits in address
+ * bits 2 and 3 rather than bits 0 and 1.
+ */
+#define profile_pc(regs) \
+({ \
+ unsigned long __ip = instruction_pointer(regs); \
+ (__ip & ~3UL) + ((__ip & 3UL) << 2); \
+})
+
+ /* given a pointer to a task_struct, return the user's pt_regs */
+# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
+# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
+# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
+# define fsys_mode(task,regs) \
+ ({ \
+ struct task_struct *_task = (task); \
+ struct pt_regs *_regs = (regs); \
+ !user_mode(_regs) && user_stack(_task, _regs); \
+ })
+
+ /*
+ * System call handlers that, upon successful completion, need to return a negative value
+ * should call force_successful_syscall_return() right before returning. On architectures
+ * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
+ * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
+ * flag will not get set. On architectures which do not support a separate error flag,
+ * the macro is a no-op and the spurious error condition needs to be filtered out by some
+ * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
+ * or something along those lines).
+ *
+ * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
+ */
+# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
+
+ struct task_struct; /* forward decl */
+ struct unw_frame_info; /* forward decl */
+
+ extern void show_regs (struct pt_regs *);
+ extern void ia64_do_show_stack (struct unw_frame_info *, void *);
+ extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
+ unsigned long *);
+ extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
+ unsigned long, long *);
+ extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
+ unsigned long, long);
+ extern void ia64_flush_fph (struct task_struct *);
+ extern void ia64_sync_fph (struct task_struct *);
+ extern void ia64_sync_krbs(void);
+ extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
+ unsigned long, unsigned long);
+
+ /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
+ extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
+ /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
+ extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
+
+ extern void ia64_increment_ip (struct pt_regs *pt);
+ extern void ia64_decrement_ip (struct pt_regs *pt);
+
+ extern void ia64_ptrace_stop(void);
+ #define arch_ptrace_stop(code, info) \
+ ia64_ptrace_stop()
+ #define arch_ptrace_stop_needed(code, info) \
+ (!test_thread_flag(TIF_RESTORE_RSE))
+
+ extern void ptrace_attach_sync_user_rbs (struct task_struct *);
+ #define arch_ptrace_attach(child) \
+ ptrace_attach_sync_user_rbs(child)
+
+ #define arch_has_single_step() (1)
+ extern void user_enable_single_step(struct task_struct *);
+ extern void user_disable_single_step(struct task_struct *);
+
+ #define arch_has_block_step() (1)
+ extern void user_enable_block_step(struct task_struct *);
+
+#endif /* !__KERNEL__ */
+
+/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
+struct pt_all_user_regs {
+ unsigned long nat;
+ unsigned long cr_iip;
+ unsigned long cfm;
+ unsigned long cr_ipsr;
+ unsigned long pr;
+
+ unsigned long gr[32];
+ unsigned long br[8];
+ unsigned long ar[128];
+ struct ia64_fpreg fr[128];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/* indices to application-registers array in pt_all_user_regs */
+#define PT_AUR_RSC 16
+#define PT_AUR_BSP 17
+#define PT_AUR_BSPSTORE 18
+#define PT_AUR_RNAT 19
+#define PT_AUR_CCV 32
+#define PT_AUR_UNAT 36
+#define PT_AUR_FPSR 40
+#define PT_AUR_PFS 64
+#define PT_AUR_LC 65
+#define PT_AUR_EC 66
+
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
+#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */
+#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */
+#define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */
+#define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */
+
+#define PTRACE_OLDSETOPTIONS 21
+
+#endif /* _ASM_IA64_PTRACE_H */
diff --git a/arch/ia64/include/asm/ptrace_offsets.h b/arch/ia64/include/asm/ptrace_offsets.h
new file mode 100644
index 000000000000..b712773c759e
--- /dev/null
+++ b/arch/ia64/include/asm/ptrace_offsets.h
@@ -0,0 +1,268 @@
+#ifndef _ASM_IA64_PTRACE_OFFSETS_H
+#define _ASM_IA64_PTRACE_OFFSETS_H
+
+/*
+ * Copyright (C) 1999, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
+ * virtual structure that would have the following definition:
+ *
+ * struct uarea {
+ * struct ia64_fpreg fph[96]; // f32-f127
+ * unsigned long nat_bits;
+ * unsigned long empty1;
+ * struct ia64_fpreg f2; // f2-f5
+ * :
+ * struct ia64_fpreg f5;
+ * struct ia64_fpreg f10; // f10-f31
+ * :
+ * struct ia64_fpreg f31;
+ * unsigned long r4; // r4-r7
+ * :
+ * unsigned long r7;
+ * unsigned long b1; // b1-b5
+ * :
+ * unsigned long b5;
+ * unsigned long ar_ec;
+ * unsigned long ar_lc;
+ * unsigned long empty2[5];
+ * unsigned long cr_ipsr;
+ * unsigned long cr_iip;
+ * unsigned long cfm;
+ * unsigned long ar_unat;
+ * unsigned long ar_pfs;
+ * unsigned long ar_rsc;
+ * unsigned long ar_rnat;
+ * unsigned long ar_bspstore;
+ * unsigned long pr;
+ * unsigned long b6;
+ * unsigned long ar_bsp;
+ * unsigned long r1;
+ * unsigned long r2;
+ * unsigned long r3;
+ * unsigned long r12;
+ * unsigned long r13;
+ * unsigned long r14;
+ * unsigned long r15;
+ * unsigned long r8;
+ * unsigned long r9;
+ * unsigned long r10;
+ * unsigned long r11;
+ * unsigned long r16;
+ * :
+ * unsigned long r31;
+ * unsigned long ar_ccv;
+ * unsigned long ar_fpsr;
+ * unsigned long b0;
+ * unsigned long b7;
+ * unsigned long f6;
+ * unsigned long f7;
+ * unsigned long f8;
+ * unsigned long f9;
+ * unsigned long ar_csd;
+ * unsigned long ar_ssd;
+ * unsigned long rsvd1[710];
+ * unsigned long dbr[8];
+ * unsigned long rsvd2[504];
+ * unsigned long ibr[8];
+ * unsigned long rsvd3[504];
+ * unsigned long pmd[4];
+ * }
+ */
+
+/* fph: */
+#define PT_F32 0x0000
+#define PT_F33 0x0010
+#define PT_F34 0x0020
+#define PT_F35 0x0030
+#define PT_F36 0x0040
+#define PT_F37 0x0050
+#define PT_F38 0x0060
+#define PT_F39 0x0070
+#define PT_F40 0x0080
+#define PT_F41 0x0090
+#define PT_F42 0x00a0
+#define PT_F43 0x00b0
+#define PT_F44 0x00c0
+#define PT_F45 0x00d0
+#define PT_F46 0x00e0
+#define PT_F47 0x00f0
+#define PT_F48 0x0100
+#define PT_F49 0x0110
+#define PT_F50 0x0120
+#define PT_F51 0x0130
+#define PT_F52 0x0140
+#define PT_F53 0x0150
+#define PT_F54 0x0160
+#define PT_F55 0x0170
+#define PT_F56 0x0180
+#define PT_F57 0x0190
+#define PT_F58 0x01a0
+#define PT_F59 0x01b0
+#define PT_F60 0x01c0
+#define PT_F61 0x01d0
+#define PT_F62 0x01e0
+#define PT_F63 0x01f0
+#define PT_F64 0x0200
+#define PT_F65 0x0210
+#define PT_F66 0x0220
+#define PT_F67 0x0230
+#define PT_F68 0x0240
+#define PT_F69 0x0250
+#define PT_F70 0x0260
+#define PT_F71 0x0270
+#define PT_F72 0x0280
+#define PT_F73 0x0290
+#define PT_F74 0x02a0
+#define PT_F75 0x02b0
+#define PT_F76 0x02c0
+#define PT_F77 0x02d0
+#define PT_F78 0x02e0
+#define PT_F79 0x02f0
+#define PT_F80 0x0300
+#define PT_F81 0x0310
+#define PT_F82 0x0320
+#define PT_F83 0x0330
+#define PT_F84 0x0340
+#define PT_F85 0x0350
+#define PT_F86 0x0360
+#define PT_F87 0x0370
+#define PT_F88 0x0380
+#define PT_F89 0x0390
+#define PT_F90 0x03a0
+#define PT_F91 0x03b0
+#define PT_F92 0x03c0
+#define PT_F93 0x03d0
+#define PT_F94 0x03e0
+#define PT_F95 0x03f0
+#define PT_F96 0x0400
+#define PT_F97 0x0410
+#define PT_F98 0x0420
+#define PT_F99 0x0430
+#define PT_F100 0x0440
+#define PT_F101 0x0450
+#define PT_F102 0x0460
+#define PT_F103 0x0470
+#define PT_F104 0x0480
+#define PT_F105 0x0490
+#define PT_F106 0x04a0
+#define PT_F107 0x04b0
+#define PT_F108 0x04c0
+#define PT_F109 0x04d0
+#define PT_F110 0x04e0
+#define PT_F111 0x04f0
+#define PT_F112 0x0500
+#define PT_F113 0x0510
+#define PT_F114 0x0520
+#define PT_F115 0x0530
+#define PT_F116 0x0540
+#define PT_F117 0x0550
+#define PT_F118 0x0560
+#define PT_F119 0x0570
+#define PT_F120 0x0580
+#define PT_F121 0x0590
+#define PT_F122 0x05a0
+#define PT_F123 0x05b0
+#define PT_F124 0x05c0
+#define PT_F125 0x05d0
+#define PT_F126 0x05e0
+#define PT_F127 0x05f0
+
+#define PT_NAT_BITS 0x0600
+
+#define PT_F2 0x0610
+#define PT_F3 0x0620
+#define PT_F4 0x0630
+#define PT_F5 0x0640
+#define PT_F10 0x0650
+#define PT_F11 0x0660
+#define PT_F12 0x0670
+#define PT_F13 0x0680
+#define PT_F14 0x0690
+#define PT_F15 0x06a0
+#define PT_F16 0x06b0
+#define PT_F17 0x06c0
+#define PT_F18 0x06d0
+#define PT_F19 0x06e0
+#define PT_F20 0x06f0
+#define PT_F21 0x0700
+#define PT_F22 0x0710
+#define PT_F23 0x0720
+#define PT_F24 0x0730
+#define PT_F25 0x0740
+#define PT_F26 0x0750
+#define PT_F27 0x0760
+#define PT_F28 0x0770
+#define PT_F29 0x0780
+#define PT_F30 0x0790
+#define PT_F31 0x07a0
+#define PT_R4 0x07b0
+#define PT_R5 0x07b8
+#define PT_R6 0x07c0
+#define PT_R7 0x07c8
+
+#define PT_B1 0x07d8
+#define PT_B2 0x07e0
+#define PT_B3 0x07e8
+#define PT_B4 0x07f0
+#define PT_B5 0x07f8
+
+#define PT_AR_EC 0x0800
+#define PT_AR_LC 0x0808
+
+#define PT_CR_IPSR 0x0830
+#define PT_CR_IIP 0x0838
+#define PT_CFM 0x0840
+#define PT_AR_UNAT 0x0848
+#define PT_AR_PFS 0x0850
+#define PT_AR_RSC 0x0858
+#define PT_AR_RNAT 0x0860
+#define PT_AR_BSPSTORE 0x0868
+#define PT_PR 0x0870
+#define PT_B6 0x0878
+#define PT_AR_BSP 0x0880 /* note: this points to the *end* of the backing store! */
+#define PT_R1 0x0888
+#define PT_R2 0x0890
+#define PT_R3 0x0898
+#define PT_R12 0x08a0
+#define PT_R13 0x08a8
+#define PT_R14 0x08b0
+#define PT_R15 0x08b8
+#define PT_R8 0x08c0
+#define PT_R9 0x08c8
+#define PT_R10 0x08d0
+#define PT_R11 0x08d8
+#define PT_R16 0x08e0
+#define PT_R17 0x08e8
+#define PT_R18 0x08f0
+#define PT_R19 0x08f8
+#define PT_R20 0x0900
+#define PT_R21 0x0908
+#define PT_R22 0x0910
+#define PT_R23 0x0918
+#define PT_R24 0x0920
+#define PT_R25 0x0928
+#define PT_R26 0x0930
+#define PT_R27 0x0938
+#define PT_R28 0x0940
+#define PT_R29 0x0948
+#define PT_R30 0x0950
+#define PT_R31 0x0958
+#define PT_AR_CCV 0x0960
+#define PT_AR_FPSR 0x0968
+#define PT_B0 0x0970
+#define PT_B7 0x0978
+#define PT_F6 0x0980
+#define PT_F7 0x0990
+#define PT_F8 0x09a0
+#define PT_F9 0x09b0
+#define PT_AR_CSD 0x09c0
+#define PT_AR_SSD 0x09c8
+
+#define PT_DBR 0x2000 /* data breakpoint registers */
+#define PT_IBR 0x3000 /* instruction breakpoint registers */
+#define PT_PMD 0x4000 /* performance monitoring counters */
+
+#endif /* _ASM_IA64_PTRACE_OFFSETS_H */
diff --git a/arch/ia64/include/asm/resource.h b/arch/ia64/include/asm/resource.h
new file mode 100644
index 000000000000..ba2272a87fc7
--- /dev/null
+++ b/arch/ia64/include/asm/resource.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_IA64_RESOURCE_H
+#define _ASM_IA64_RESOURCE_H
+
+#include <asm/ustack.h>
+#include <asm-generic/resource.h>
+
+#endif /* _ASM_IA64_RESOURCE_H */
diff --git a/arch/ia64/include/asm/rse.h b/arch/ia64/include/asm/rse.h
new file mode 100644
index 000000000000..02830a3b0196
--- /dev/null
+++ b/arch/ia64/include/asm/rse.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_RSE_H
+#define _ASM_IA64_RSE_H
+
+/*
+ * Copyright (C) 1998, 1999 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Register stack engine related helper functions. This file may be
+ * used in applications, so be careful about the name-space and give
+ * some consideration to non-GNU C compilers (though __inline__ is
+ * fine).
+ */
+
+static __inline__ unsigned long
+ia64_rse_slot_num (unsigned long *addr)
+{
+ return (((unsigned long) addr) >> 3) & 0x3f;
+}
+
+/*
+ * Return TRUE if ADDR is the address of an RNAT slot.
+ */
+static __inline__ unsigned long
+ia64_rse_is_rnat_slot (unsigned long *addr)
+{
+ return ia64_rse_slot_num(addr) == 0x3f;
+}
+
+/*
+ * Returns the address of the RNAT slot that covers the slot at
+ * address SLOT_ADDR.
+ */
+static __inline__ unsigned long *
+ia64_rse_rnat_addr (unsigned long *slot_addr)
+{
+ return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3));
+}
+
+/*
+ * Calculate the number of registers in the dirty partition starting at BSPSTORE and
+ * ending at BSP. This isn't simply (BSP-BSPSTORE)/8 because every 64th slot stores
+ * ar.rnat.
+ */
+static __inline__ unsigned long
+ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp)
+{
+ unsigned long slots = (bsp - bspstore);
+
+ return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40;
+}
+
+/*
+ * The inverse of the above: given bspstore and the number of
+ * registers, calculate ar.bsp.
+ */
+static __inline__ unsigned long *
+ia64_rse_skip_regs (unsigned long *addr, long num_regs)
+{
+ long delta = ia64_rse_slot_num(addr) + num_regs;
+
+ if (num_regs < 0)
+ delta -= 0x3e;
+ return addr + num_regs + delta/0x3f;
+}
+
+#endif /* _ASM_IA64_RSE_H */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
new file mode 100644
index 000000000000..fbee74b15782
--- /dev/null
+++ b/arch/ia64/include/asm/rwsem.h
@@ -0,0 +1,182 @@
+/*
+ * R/W semaphores for ia64
+ *
+ * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
+ * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
+ *
+ * Based on asm-i386/rwsem.h and other architecture implementation.
+ *
+ * The MSW of the count is the negated number of active writers and
+ * waiting lockers, and the LSW is the total number of active locks.
+ *
+ * The lock count is initialized to 0 (no active and no waiting lockers).
+ *
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
+ * the case of an uncontended lock. Readers increment by 1 and see a positive
+ * value when uncontended, negative if there are writers (and maybe) readers
+ * waiting (in which case it goes to sleep).
+ */
+
+#ifndef _ASM_IA64_RWSEM_H
+#define _ASM_IA64_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
+#endif
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include <asm/intrinsics.h>
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ signed long count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+};
+
+#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
+#define RWSEM_ACTIVE_BIAS __IA64_UL_CONST(0x0000000000000001)
+#define RWSEM_ACTIVE_MASK __IA64_UL_CONST(0x00000000ffffffff)
+#define RWSEM_WAITING_BIAS -__IA64_UL_CONST(0x0000000100000000)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+static inline void
+init_rwsem (struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+}
+
+/*
+ * lock for reading
+ */
+static inline void
+__down_read (struct rw_semaphore *sem)
+{
+ long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
+
+ if (result < 0)
+ rwsem_down_read_failed(sem);
+}
+
+/*
+ * lock for writing
+ */
+static inline void
+__down_write (struct rw_semaphore *sem)
+{
+ long old, new;
+
+ do {
+ old = sem->count;
+ new = old + RWSEM_ACTIVE_WRITE_BIAS;
+ } while (cmpxchg_acq(&sem->count, old, new) != old);
+
+ if (old != 0)
+ rwsem_down_write_failed(sem);
+}
+
+/*
+ * unlock after reading
+ */
+static inline void
+__up_read (struct rw_semaphore *sem)
+{
+ long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
+
+ if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void
+__up_write (struct rw_semaphore *sem)
+{
+ long old, new;
+
+ do {
+ old = sem->count;
+ new = old - RWSEM_ACTIVE_WRITE_BIAS;
+ } while (cmpxchg_rel(&sem->count, old, new) != old);
+
+ if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int
+__down_read_trylock (struct rw_semaphore *sem)
+{
+ long tmp;
+ while ((tmp = sem->count) >= 0) {
+ if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int
+__down_write_trylock (struct rw_semaphore *sem)
+{
+ long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void
+__downgrade_write (struct rw_semaphore *sem)
+{
+ long old, new;
+
+ do {
+ old = sem->count;
+ new = old - RWSEM_WAITING_BIAS;
+ } while (cmpxchg_rel(&sem->count, old, new) != old);
+
+ if (old < 0)
+ rwsem_downgrade_wake(sem);
+}
+
+/*
+ * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
+ * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
+ */
+#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
+#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
+
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+#endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h
new file mode 100644
index 000000000000..89594b442f83
--- /dev/null
+++ b/arch/ia64/include/asm/sal.h
@@ -0,0 +1,905 @@
+#ifndef _ASM_IA64_SAL_H
+#define _ASM_IA64_SAL_H
+
+/*
+ * System Abstraction Layer definitions.
+ *
+ * This is based on version 2.5 of the manual "IA-64 System
+ * Abstraction Layer".
+ *
+ * Copyright (C) 2001 Intel
+ * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
+ * Copyright (C) 2001 Fred Lewis <frederick.v.lewis@intel.com>
+ * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com>
+ *
+ * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001
+ * revision of the SAL spec.
+ * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000
+ * revision of the SAL spec.
+ * 99/09/29 davidm Updated for SAL 2.6.
+ * 00/03/29 cfleck Updated SAL Error Logging info for processor (SAL 2.6)
+ * (plus examples of platform error info structures from smariset @ Intel)
+ */
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT 0
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT 1
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT 2
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT 3
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT (1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bcd.h>
+#include <linux/spinlock.h>
+#include <linux/efi.h>
+
+#include <asm/pal.h>
+#include <asm/system.h>
+#include <asm/fpu.h>
+
+extern spinlock_t sal_lock;
+
+/* SAL spec _requires_ eight args for each call. */
+#define __IA64_FW_CALL(entry,result,a0,a1,a2,a3,a4,a5,a6,a7) \
+ result = (*entry)(a0,a1,a2,a3,a4,a5,a6,a7)
+
+# define IA64_FW_CALL(entry,result,args...) do { \
+ unsigned long __ia64_sc_flags; \
+ struct ia64_fpreg __ia64_sc_fr[6]; \
+ ia64_save_scratch_fpregs(__ia64_sc_fr); \
+ spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
+ __IA64_FW_CALL(entry, result, args); \
+ spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
+ ia64_load_scratch_fpregs(__ia64_sc_fr); \
+} while (0)
+
+# define SAL_CALL(result,args...) \
+ IA64_FW_CALL(ia64_sal, result, args);
+
+# define SAL_CALL_NOLOCK(result,args...) do { \
+ unsigned long __ia64_scn_flags; \
+ struct ia64_fpreg __ia64_scn_fr[6]; \
+ ia64_save_scratch_fpregs(__ia64_scn_fr); \
+ local_irq_save(__ia64_scn_flags); \
+ __IA64_FW_CALL(ia64_sal, result, args); \
+ local_irq_restore(__ia64_scn_flags); \
+ ia64_load_scratch_fpregs(__ia64_scn_fr); \
+} while (0)
+
+# define SAL_CALL_REENTRANT(result,args...) do { \
+ struct ia64_fpreg __ia64_scs_fr[6]; \
+ ia64_save_scratch_fpregs(__ia64_scs_fr); \
+ preempt_disable(); \
+ __IA64_FW_CALL(ia64_sal, result, args); \
+ preempt_enable(); \
+ ia64_load_scratch_fpregs(__ia64_scs_fr); \
+} while (0)
+
+#define SAL_SET_VECTORS 0x01000000
+#define SAL_GET_STATE_INFO 0x01000001
+#define SAL_GET_STATE_INFO_SIZE 0x01000002
+#define SAL_CLEAR_STATE_INFO 0x01000003
+#define SAL_MC_RENDEZ 0x01000004
+#define SAL_MC_SET_PARAMS 0x01000005
+#define SAL_REGISTER_PHYSICAL_ADDR 0x01000006
+
+#define SAL_CACHE_FLUSH 0x01000008
+#define SAL_CACHE_INIT 0x01000009
+#define SAL_PCI_CONFIG_READ 0x01000010
+#define SAL_PCI_CONFIG_WRITE 0x01000011
+#define SAL_FREQ_BASE 0x01000012
+#define SAL_PHYSICAL_ID_INFO 0x01000013
+
+#define SAL_UPDATE_PAL 0x01000020
+
+struct ia64_sal_retval {
+ /*
+ * A zero status value indicates call completed without error.
+ * A negative status value indicates reason of call failure.
+ * A positive status value indicates success but an
+ * informational value should be printed (e.g., "reboot for
+ * change to take effect").
+ */
+ s64 status;
+ u64 v0;
+ u64 v1;
+ u64 v2;
+};
+
+typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
+
+enum {
+ SAL_FREQ_BASE_PLATFORM = 0,
+ SAL_FREQ_BASE_INTERVAL_TIMER = 1,
+ SAL_FREQ_BASE_REALTIME_CLOCK = 2
+};
+
+/*
+ * The SAL system table is followed by a variable number of variable
+ * length descriptors. The structure of these descriptors follows
+ * below.
+ * The defininition follows SAL specs from July 2000
+ */
+struct ia64_sal_systab {
+ u8 signature[4]; /* should be "SST_" */
+ u32 size; /* size of this table in bytes */
+ u8 sal_rev_minor;
+ u8 sal_rev_major;
+ u16 entry_count; /* # of entries in variable portion */
+ u8 checksum;
+ u8 reserved1[7];
+ u8 sal_a_rev_minor;
+ u8 sal_a_rev_major;
+ u8 sal_b_rev_minor;
+ u8 sal_b_rev_major;
+ /* oem_id & product_id: terminating NUL is missing if string is exactly 32 bytes long. */
+ u8 oem_id[32];
+ u8 product_id[32]; /* ASCII product id */
+ u8 reserved2[8];
+};
+
+enum sal_systab_entry_type {
+ SAL_DESC_ENTRY_POINT = 0,
+ SAL_DESC_MEMORY = 1,
+ SAL_DESC_PLATFORM_FEATURE = 2,
+ SAL_DESC_TR = 3,
+ SAL_DESC_PTC = 4,
+ SAL_DESC_AP_WAKEUP = 5
+};
+
+/*
+ * Entry type: Size:
+ * 0 48
+ * 1 32
+ * 2 16
+ * 3 32
+ * 4 16
+ * 5 16
+ */
+#define SAL_DESC_SIZE(type) "\060\040\020\040\020\020"[(unsigned) type]
+
+typedef struct ia64_sal_desc_entry_point {
+ u8 type;
+ u8 reserved1[7];
+ u64 pal_proc;
+ u64 sal_proc;
+ u64 gp;
+ u8 reserved2[16];
+}ia64_sal_desc_entry_point_t;
+
+typedef struct ia64_sal_desc_memory {
+ u8 type;
+ u8 used_by_sal; /* needs to be mapped for SAL? */
+ u8 mem_attr; /* current memory attribute setting */
+ u8 access_rights; /* access rights set up by SAL */
+ u8 mem_attr_mask; /* mask of supported memory attributes */
+ u8 reserved1;
+ u8 mem_type; /* memory type */
+ u8 mem_usage; /* memory usage */
+ u64 addr; /* physical address of memory */
+ u32 length; /* length (multiple of 4KB pages) */
+ u32 reserved2;
+ u8 oem_reserved[8];
+} ia64_sal_desc_memory_t;
+
+typedef struct ia64_sal_desc_platform_feature {
+ u8 type;
+ u8 feature_mask;
+ u8 reserved1[14];
+} ia64_sal_desc_platform_feature_t;
+
+typedef struct ia64_sal_desc_tr {
+ u8 type;
+ u8 tr_type; /* 0 == instruction, 1 == data */
+ u8 regnum; /* translation register number */
+ u8 reserved1[5];
+ u64 addr; /* virtual address of area covered */
+ u64 page_size; /* encoded page size */
+ u8 reserved2[8];
+} ia64_sal_desc_tr_t;
+
+typedef struct ia64_sal_desc_ptc {
+ u8 type;
+ u8 reserved1[3];
+ u32 num_domains; /* # of coherence domains */
+ u64 domain_info; /* physical address of domain info table */
+} ia64_sal_desc_ptc_t;
+
+typedef struct ia64_sal_ptc_domain_info {
+ u64 proc_count; /* number of processors in domain */
+ u64 proc_list; /* physical address of LID array */
+} ia64_sal_ptc_domain_info_t;
+
+typedef struct ia64_sal_ptc_domain_proc_entry {
+ u64 id : 8; /* id of processor */
+ u64 eid : 8; /* eid of processor */
+} ia64_sal_ptc_domain_proc_entry_t;
+
+
+#define IA64_SAL_AP_EXTERNAL_INT 0
+
+typedef struct ia64_sal_desc_ap_wakeup {
+ u8 type;
+ u8 mechanism; /* 0 == external interrupt */
+ u8 reserved1[6];
+ u64 vector; /* interrupt vector in range 0x10-0xff */
+} ia64_sal_desc_ap_wakeup_t ;
+
+extern ia64_sal_handler ia64_sal;
+extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info;
+
+extern unsigned short sal_revision; /* supported SAL spec revision */
+extern unsigned short sal_version; /* SAL version; OEM dependent */
+#define SAL_VERSION_CODE(major, minor) ((BIN2BCD(major) << 8) | BIN2BCD(minor))
+
+extern const char *ia64_sal_strerror (long status);
+extern void ia64_sal_init (struct ia64_sal_systab *sal_systab);
+
+/* SAL information type encodings */
+enum {
+ SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */
+ SAL_INFO_TYPE_INIT = 1, /* Init information */
+ SAL_INFO_TYPE_CMC = 2, /* Corrected machine check information */
+ SAL_INFO_TYPE_CPE = 3 /* Corrected platform error information */
+};
+
+/* Encodings for machine check parameter types */
+enum {
+ SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezvous interrupt */
+ SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
+ SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */
+};
+
+/* Encodings for rendezvous mechanisms */
+enum {
+ SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */
+ SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/
+};
+
+/* Encodings for vectors which can be registered by the OS with SAL */
+enum {
+ SAL_VECTOR_OS_MCA = 0,
+ SAL_VECTOR_OS_INIT = 1,
+ SAL_VECTOR_OS_BOOT_RENDEZ = 2
+};
+
+/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */
+#define SAL_MC_PARAM_RZ_ALWAYS 0x1
+#define SAL_MC_PARAM_BINIT_ESCALATE 0x10
+
+/*
+ * Definition of the SAL Error Log from the SAL spec
+ */
+
+/* SAL Error Record Section GUID Definitions */
+#define SAL_PROC_DEV_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SAL_PLAT_BUS_ERR_SECT_GUID \
+ EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID \
+ EFI_GUID(0x6cb0a200, 0x893a, 0x11da, 0x96, 0xd2, 0x0, 0x10, 0x83, 0xff, \
+ 0xca, 0x4d)
+
+#define MAX_CACHE_ERRORS 6
+#define MAX_TLB_ERRORS 6
+#define MAX_BUS_ERRORS 1
+
+/* Definition of version according to SAL spec for logging purposes */
+typedef struct sal_log_revision {
+ u8 minor; /* BCD (0..99) */
+ u8 major; /* BCD (0..99) */
+} sal_log_revision_t;
+
+/* Definition of timestamp according to SAL spec for logging purposes */
+typedef struct sal_log_timestamp {
+ u8 slh_second; /* Second (0..59) */
+ u8 slh_minute; /* Minute (0..59) */
+ u8 slh_hour; /* Hour (0..23) */
+ u8 slh_reserved;
+ u8 slh_day; /* Day (1..31) */
+ u8 slh_month; /* Month (1..12) */
+ u8 slh_year; /* Year (00..99) */
+ u8 slh_century; /* Century (19, 20, 21, ...) */
+} sal_log_timestamp_t;
+
+/* Definition of log record header structures */
+typedef struct sal_log_record_header {
+ u64 id; /* Unique monotonically increasing ID */
+ sal_log_revision_t revision; /* Major and Minor revision of header */
+ u8 severity; /* Error Severity */
+ u8 validation_bits; /* 0: platform_guid, 1: !timestamp */
+ u32 len; /* Length of this error log in bytes */
+ sal_log_timestamp_t timestamp; /* Timestamp */
+ efi_guid_t platform_guid; /* Unique OEM Platform ID */
+} sal_log_record_header_t;
+
+#define sal_log_severity_recoverable 0
+#define sal_log_severity_fatal 1
+#define sal_log_severity_corrected 2
+
+/* Definition of log section header structures */
+typedef struct sal_log_sec_header {
+ efi_guid_t guid; /* Unique Section ID */
+ sal_log_revision_t revision; /* Major and Minor revision of Section */
+ u16 reserved;
+ u32 len; /* Section length */
+} sal_log_section_hdr_t;
+
+typedef struct sal_log_mod_error_info {
+ struct {
+ u64 check_info : 1,
+ requestor_identifier : 1,
+ responder_identifier : 1,
+ target_identifier : 1,
+ precise_ip : 1,
+ reserved : 59;
+ } valid;
+ u64 check_info;
+ u64 requestor_identifier;
+ u64 responder_identifier;
+ u64 target_identifier;
+ u64 precise_ip;
+} sal_log_mod_error_info_t;
+
+typedef struct sal_processor_static_info {
+ struct {
+ u64 minstate : 1,
+ br : 1,
+ cr : 1,
+ ar : 1,
+ rr : 1,
+ fr : 1,
+ reserved : 58;
+ } valid;
+ pal_min_state_area_t min_state_area;
+ u64 br[8];
+ u64 cr[128];
+ u64 ar[128];
+ u64 rr[8];
+ struct ia64_fpreg __attribute__ ((packed)) fr[128];
+} sal_processor_static_info_t;
+
+struct sal_cpuid_info {
+ u64 regs[5];
+ u64 reserved;
+};
+
+typedef struct sal_log_processor_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 proc_error_map : 1,
+ proc_state_param : 1,
+ proc_cr_lid : 1,
+ psi_static_struct : 1,
+ num_cache_check : 4,
+ num_tlb_check : 4,
+ num_bus_check : 4,
+ num_reg_file_check : 4,
+ num_ms_check : 4,
+ cpuid_info : 1,
+ reserved1 : 39;
+ } valid;
+ u64 proc_error_map;
+ u64 proc_state_parameter;
+ u64 proc_cr_lid;
+ /*
+ * The rest of this structure consists of variable-length arrays, which can't be
+ * expressed in C.
+ */
+ sal_log_mod_error_info_t info[0];
+ /*
+ * This is what the rest looked like if C supported variable-length arrays:
+ *
+ * sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check];
+ * sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check];
+ * sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check];
+ * sal_log_mod_error_info_t reg_file_check_info[.valid.num_reg_file_check];
+ * sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check];
+ * struct sal_cpuid_info cpuid_info;
+ * sal_processor_static_info_t processor_static_info;
+ */
+} sal_log_processor_info_t;
+
+/* Given a sal_log_processor_info_t pointer, return a pointer to the processor_static_info: */
+#define SAL_LPI_PSI_INFO(l) \
+({ sal_log_processor_info_t *_l = (l); \
+ ((sal_processor_static_info_t *) \
+ ((char *) _l->info + ((_l->valid.num_cache_check + _l->valid.num_tlb_check \
+ + _l->valid.num_bus_check + _l->valid.num_reg_file_check \
+ + _l->valid.num_ms_check) * sizeof(sal_log_mod_error_info_t) \
+ + sizeof(struct sal_cpuid_info)))); \
+})
+
+/* platform error log structures */
+
+typedef struct sal_log_mem_dev_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 error_status : 1,
+ physical_addr : 1,
+ addr_mask : 1,
+ node : 1,
+ card : 1,
+ module : 1,
+ bank : 1,
+ device : 1,
+ row : 1,
+ column : 1,
+ bit_position : 1,
+ requestor_id : 1,
+ responder_id : 1,
+ target_id : 1,
+ bus_spec_data : 1,
+ oem_id : 1,
+ oem_data : 1,
+ reserved : 47;
+ } valid;
+ u64 error_status;
+ u64 physical_addr;
+ u64 addr_mask;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_position;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u64 bus_spec_data;
+ u8 oem_id[16];
+ u8 oem_data[1]; /* Variable length data */
+} sal_log_mem_dev_err_info_t;
+
+typedef struct sal_log_sel_dev_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 record_id : 1,
+ record_type : 1,
+ generator_id : 1,
+ evm_rev : 1,
+ sensor_type : 1,
+ sensor_num : 1,
+ event_dir : 1,
+ event_data1 : 1,
+ event_data2 : 1,
+ event_data3 : 1,
+ reserved : 54;
+ } valid;
+ u16 record_id;
+ u8 record_type;
+ u8 timestamp[4];
+ u16 generator_id;
+ u8 evm_rev;
+ u8 sensor_type;
+ u8 sensor_num;
+ u8 event_dir;
+ u8 event_data1;
+ u8 event_data2;
+ u8 event_data3;
+} sal_log_sel_dev_err_info_t;
+
+typedef struct sal_log_pci_bus_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ err_type : 1,
+ bus_id : 1,
+ bus_address : 1,
+ bus_data : 1,
+ bus_cmd : 1,
+ requestor_id : 1,
+ responder_id : 1,
+ target_id : 1,
+ oem_data : 1,
+ reserved : 54;
+ } valid;
+ u64 err_status;
+ u16 err_type;
+ u16 bus_id;
+ u32 reserved;
+ u64 bus_address;
+ u64 bus_data;
+ u64 bus_cmd;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u8 oem_data[1]; /* Variable length data */
+} sal_log_pci_bus_err_info_t;
+
+typedef struct sal_log_smbios_dev_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 event_type : 1,
+ length : 1,
+ time_stamp : 1,
+ data : 1,
+ reserved1 : 60;
+ } valid;
+ u8 event_type;
+ u8 length;
+ u8 time_stamp[6];
+ u8 data[1]; /* data of variable length, length == slsmb_length */
+} sal_log_smbios_dev_err_info_t;
+
+typedef struct sal_log_pci_comp_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ comp_info : 1,
+ num_mem_regs : 1,
+ num_io_regs : 1,
+ reg_data_pairs : 1,
+ oem_data : 1,
+ reserved : 58;
+ } valid;
+ u64 err_status;
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u8 class_code[3];
+ u8 func_num;
+ u8 dev_num;
+ u8 bus_num;
+ u8 seg_num;
+ u8 reserved[5];
+ } comp_info;
+ u32 num_mem_regs;
+ u32 num_io_regs;
+ u64 reg_data_pairs[1];
+ /*
+ * array of address/data register pairs is num_mem_regs + num_io_regs elements
+ * long. Each array element consists of a u64 address followed by a u64 data
+ * value. The oem_data array immediately follows the reg_data_pairs array
+ */
+ u8 oem_data[1]; /* Variable length data */
+} sal_log_pci_comp_err_info_t;
+
+typedef struct sal_log_plat_specific_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ guid : 1,
+ oem_data : 1,
+ reserved : 61;
+ } valid;
+ u64 err_status;
+ efi_guid_t guid;
+ u8 oem_data[1]; /* platform specific variable length data */
+} sal_log_plat_specific_err_info_t;
+
+typedef struct sal_log_host_ctlr_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ requestor_id : 1,
+ responder_id : 1,
+ target_id : 1,
+ bus_spec_data : 1,
+ oem_data : 1,
+ reserved : 58;
+ } valid;
+ u64 err_status;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u64 bus_spec_data;
+ u8 oem_data[1]; /* Variable length OEM data */
+} sal_log_host_ctlr_err_info_t;
+
+typedef struct sal_log_plat_bus_err_info {
+ sal_log_section_hdr_t header;
+ struct {
+ u64 err_status : 1,
+ requestor_id : 1,
+ responder_id : 1,
+ target_id : 1,
+ bus_spec_data : 1,
+ oem_data : 1,
+ reserved : 58;
+ } valid;
+ u64 err_status;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u64 bus_spec_data;
+ u8 oem_data[1]; /* Variable length OEM data */
+} sal_log_plat_bus_err_info_t;
+
+/* Overall platform error section structure */
+typedef union sal_log_platform_err_info {
+ sal_log_mem_dev_err_info_t mem_dev_err;
+ sal_log_sel_dev_err_info_t sel_dev_err;
+ sal_log_pci_bus_err_info_t pci_bus_err;
+ sal_log_smbios_dev_err_info_t smbios_dev_err;
+ sal_log_pci_comp_err_info_t pci_comp_err;
+ sal_log_plat_specific_err_info_t plat_specific_err;
+ sal_log_host_ctlr_err_info_t host_ctlr_err;
+ sal_log_plat_bus_err_info_t plat_bus_err;
+} sal_log_platform_err_info_t;
+
+/* SAL log over-all, multi-section error record structure (processor+platform) */
+typedef struct err_rec {
+ sal_log_record_header_t sal_elog_header;
+ sal_log_processor_info_t proc_err;
+ sal_log_platform_err_info_t plat_err;
+ u8 oem_data_pad[1024];
+} ia64_err_rec_t;
+
+/*
+ * Now define a couple of inline functions for improved type checking
+ * and convenience.
+ */
+
+extern s64 ia64_sal_cache_flush (u64 cache_type);
+extern void __init check_sal_cache_flush (void);
+
+/* Initialize all the processor and platform level instruction and data caches */
+static inline s64
+ia64_sal_cache_init (void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0);
+ return isrv.status;
+}
+
+/*
+ * Clear the processor and platform information logged by SAL with respect to the machine
+ * state at the time of MCA's, INITs, CMCs, or CPEs.
+ */
+static inline s64
+ia64_sal_clear_state_info (u64 sal_info_type)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0,
+ 0, 0, 0, 0, 0);
+ return isrv.status;
+}
+
+
+/* Get the processor and platform information logged by SAL with respect to the machine
+ * state at the time of the MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0,
+ sal_info, 0, 0, 0, 0);
+ if (isrv.status)
+ return 0;
+
+ return isrv.v0;
+}
+
+/*
+ * Get the maximum size of the information logged by SAL with respect to the machine state
+ * at the time of MCAs, INITs, CMCs, or CPEs.
+ */
+static inline u64
+ia64_sal_get_state_info_size (u64 sal_info_type)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0,
+ 0, 0, 0, 0, 0);
+ if (isrv.status)
+ return 0;
+ return isrv.v0;
+}
+
+/*
+ * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from
+ * the monarch processor. Must not lock, because it will not return on any cpu until the
+ * monarch processor sends a wake up.
+ */
+static inline s64
+ia64_sal_mc_rendez (void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0);
+ return isrv.status;
+}
+
+/*
+ * Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during
+ * the machine check rendezvous sequence as well as the mechanism to wake up the
+ * non-monarch processor at the end of machine check processing.
+ * Returns the complete ia64_sal_retval because some calls return more than just a status
+ * value.
+ */
+static inline struct ia64_sal_retval
+ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout, u64 rz_always)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val,
+ timeout, rz_always, 0, 0);
+ return isrv;
+}
+
+/* Read from PCI configuration space */
+static inline s64
+ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 0, 0);
+ if (value)
+ *value = isrv.v0;
+ return isrv.status;
+}
+
+/* Write to PCI configuration space */
+static inline s64
+ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value,
+ type, 0, 0, 0);
+ return isrv.status;
+}
+
+/*
+ * Register physical addresses of locations needed by SAL when SAL procedures are invoked
+ * in virtual mode.
+ */
+static inline s64
+ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr,
+ 0, 0, 0, 0, 0);
+ return isrv.status;
+}
+
+/*
+ * Register software dependent code locations within SAL. These locations are handlers or
+ * entry points where SAL will pass control for the specified event. These event handlers
+ * are for the bott rendezvous, MCAs and INIT scenarios.
+ */
+static inline s64
+ia64_sal_set_vectors (u64 vector_type,
+ u64 handler_addr1, u64 gp1, u64 handler_len1,
+ u64 handler_addr2, u64 gp2, u64 handler_len2)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_SET_VECTORS, vector_type,
+ handler_addr1, gp1, handler_len1,
+ handler_addr2, gp2, handler_len2);
+
+ return isrv.status;
+}
+
+/* Update the contents of PAL block in the non-volatile storage device */
+static inline s64
+ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
+ u64 *error_code, u64 *scratch_buf_size_needed)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size,
+ 0, 0, 0, 0);
+ if (error_code)
+ *error_code = isrv.v0;
+ if (scratch_buf_size_needed)
+ *scratch_buf_size_needed = isrv.v1;
+ return isrv.status;
+}
+
+/* Get physical processor die mapping in the platform. */
+static inline s64
+ia64_sal_physical_id_info(u16 *splid)
+{
+ struct ia64_sal_retval isrv;
+
+ if (sal_revision < SAL_VERSION_CODE(3,2))
+ return -1;
+
+ SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
+ if (splid)
+ *splid = isrv.v0;
+ return isrv.status;
+}
+
+extern unsigned long sal_platform_features;
+
+extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
+
+struct sal_ret_values {
+ long r8; long r9; long r10; long r11;
+};
+
+#define IA64_SAL_OEMFUNC_MIN 0x02000000
+#define IA64_SAL_OEMFUNC_MAX 0x03ffffff
+
+extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64,
+ u64, u64, u64);
+extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
+ u64, u64, u64, u64, u64);
+extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
+ u64, u64, u64, u64, u64);
+extern long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+ unsigned long *drift_info);
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * System Abstraction Layer Specification
+ * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State.
+ * Note: region regs are stored first in head.S _start. Hence they must
+ * stay up front.
+ */
+struct sal_to_os_boot {
+ u64 rr[8]; /* Region Registers */
+ u64 br[6]; /* br0:
+ * return addr into SAL boot rendez routine */
+ u64 gr1; /* SAL:GP */
+ u64 gr12; /* SAL:SP */
+ u64 gr13; /* SAL: Task Pointer */
+ u64 fpsr;
+ u64 pfs;
+ u64 rnat;
+ u64 unat;
+ u64 bspstore;
+ u64 dcr; /* Default Control Register */
+ u64 iva;
+ u64 pta;
+ u64 itv;
+ u64 pmv;
+ u64 cmcv;
+ u64 lrr[2];
+ u64 gr[4];
+ u64 pr; /* Predicate registers */
+ u64 lc; /* Loop Count */
+ struct ia64_fpreg fp[20];
+};
+
+/*
+ * Global array allocated for NR_CPUS at boot time
+ */
+extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
+
+extern void ia64_jump_to_sal(struct sal_to_os_boot *);
+#endif
+
+extern void ia64_sal_handler_init(void *entry_point, void *gpval);
+
+#define PALO_MAX_TLB_PURGES 0xFFFF
+#define PALO_SIG "PALO"
+
+struct palo_table {
+ u8 signature[4]; /* Should be "PALO" */
+ u32 length;
+ u8 minor_revision;
+ u8 major_revision;
+ u8 checksum;
+ u8 reserved1[5];
+ u16 max_tlb_purges;
+ u8 reserved2[6];
+};
+
+#define NPTCG_FROM_PAL 0
+#define NPTCG_FROM_PALO 1
+#define NPTCG_FROM_KERNEL_PARAMETER 2
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SAL_H */
diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h
new file mode 100644
index 000000000000..d6f57874041d
--- /dev/null
+++ b/arch/ia64/include/asm/scatterlist.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_SCATTERLIST_H
+#define _ASM_IA64_SCATTERLIST_H
+
+/*
+ * Modified 1998-1999, 2001-2002, 2004
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm/types.h>
+
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+ unsigned long sg_magic;
+#endif
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length; /* buffer length */
+
+ dma_addr_t dma_address;
+ unsigned int dma_length;
+};
+
+/*
+ * It used to be that ISA_DMA_THRESHOLD had something to do with the
+ * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
+ * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
+ * tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
+ * address of a page is that is allocated with GFP_DMA. On IA-64,
+ * that's 4GB - 1.
+ */
+#define ISA_DMA_THRESHOLD 0xffffffff
+
+#define sg_dma_len(sg) ((sg)->dma_length)
+#define sg_dma_address(sg) ((sg)->dma_address)
+
+#define ARCH_HAS_SG_CHAIN
+
+#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h
new file mode 100644
index 000000000000..7286e4a9fe84
--- /dev/null
+++ b/arch/ia64/include/asm/sections.h
@@ -0,0 +1,25 @@
+#ifndef _ASM_IA64_SECTIONS_H
+#define _ASM_IA64_SECTIONS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm-generic/sections.h>
+
+extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
+extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
+extern char __start___rse_patchlist[], __end___rse_patchlist[];
+extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
+extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[];
+extern char __start_gate_section[];
+extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
+extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
+extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
+extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
+extern char __start_unwind[], __end_unwind[];
+extern char __start_ivt_text[], __end_ivt_text[];
+
+#endif /* _ASM_IA64_SECTIONS_H */
+
diff --git a/arch/ia64/include/asm/segment.h b/arch/ia64/include/asm/segment.h
new file mode 100644
index 000000000000..b89e2b3d648f
--- /dev/null
+++ b/arch/ia64/include/asm/segment.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_IA64_SEGMENT_H
+#define _ASM_IA64_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif /* _ASM_IA64_SEGMENT_H */
diff --git a/arch/ia64/include/asm/sembuf.h b/arch/ia64/include/asm/sembuf.h
new file mode 100644
index 000000000000..1340fbc04d3e
--- /dev/null
+++ b/arch/ia64/include/asm/sembuf.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_IA64_SEMBUF_H
+#define _ASM_IA64_SEMBUF_H
+
+/*
+ * The semid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _ASM_IA64_SEMBUF_H */
diff --git a/arch/ia64/include/asm/serial.h b/arch/ia64/include/asm/serial.h
new file mode 100644
index 000000000000..068be11583df
--- /dev/null
+++ b/arch/ia64/include/asm/serial.h
@@ -0,0 +1,17 @@
+/*
+ * Derived from the i386 version.
+ */
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+/*
+ * All legacy serial ports should be enumerated via ACPI namespace, so
+ * we need not list them here.
+ */
diff --git a/arch/ia64/include/asm/setup.h b/arch/ia64/include/asm/setup.h
new file mode 100644
index 000000000000..4399a44355b3
--- /dev/null
+++ b/arch/ia64/include/asm/setup.h
@@ -0,0 +1,6 @@
+#ifndef __IA64_SETUP_H
+#define __IA64_SETUP_H
+
+#define COMMAND_LINE_SIZE 2048
+
+#endif
diff --git a/arch/ia64/include/asm/shmbuf.h b/arch/ia64/include/asm/shmbuf.h
new file mode 100644
index 000000000000..585002a77acd
--- /dev/null
+++ b/arch/ia64/include/asm/shmbuf.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_IA64_SHMBUF_H
+#define _ASM_IA64_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for IA-64 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 2 miscellaneous 64-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ __kernel_time_t shm_dtime; /* last detach time */
+ __kernel_time_t shm_ctime; /* last change time */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_IA64_SHMBUF_H */
diff --git a/arch/ia64/include/asm/shmparam.h b/arch/ia64/include/asm/shmparam.h
new file mode 100644
index 000000000000..d07508dc54ae
--- /dev/null
+++ b/arch/ia64/include/asm/shmparam.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_IA64_SHMPARAM_H
+#define _ASM_IA64_SHMPARAM_H
+
+/*
+ * SHMLBA controls minimum alignment at which shared memory segments
+ * get attached. The IA-64 architecture says that there may be a
+ * performance degradation when there are virtual aliases within 1MB.
+ * To reduce the chance of this, we set SHMLBA to 1MB. --davidm 00/12/20
+ */
+#define SHMLBA (1024*1024)
+
+#endif /* _ASM_IA64_SHMPARAM_H */
diff --git a/arch/ia64/include/asm/sigcontext.h b/arch/ia64/include/asm/sigcontext.h
new file mode 100644
index 000000000000..57ff777bcc40
--- /dev/null
+++ b/arch/ia64/include/asm/sigcontext.h
@@ -0,0 +1,70 @@
+#ifndef _ASM_IA64_SIGCONTEXT_H
+#define _ASM_IA64_SIGCONTEXT_H
+
+/*
+ * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
+ * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/fpu.h>
+
+#define IA64_SC_FLAG_ONSTACK_BIT 0 /* is handler running on signal stack? */
+#define IA64_SC_FLAG_IN_SYSCALL_BIT 1 /* did signal interrupt a syscall? */
+#define IA64_SC_FLAG_FPH_VALID_BIT 2 /* is state in f[32]-f[127] valid? */
+
+#define IA64_SC_FLAG_ONSTACK (1 << IA64_SC_FLAG_ONSTACK_BIT)
+#define IA64_SC_FLAG_IN_SYSCALL (1 << IA64_SC_FLAG_IN_SYSCALL_BIT)
+#define IA64_SC_FLAG_FPH_VALID (1 << IA64_SC_FLAG_FPH_VALID_BIT)
+
+# ifndef __ASSEMBLY__
+
+/*
+ * Note on handling of register backing store: sc_ar_bsp contains the address that would
+ * be found in ar.bsp after executing a "cover" instruction the context in which the
+ * signal was raised. If signal delivery required switching to an alternate signal stack
+ * (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after executing the
+ * imaginary "cover" instruction) is backed by the *alternate* signal stack, not the
+ * original one. In this case, sc_rbs_base contains the base address of the new register
+ * backing store. The number of registers in the dirty partition can be calculated as:
+ *
+ * ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16))
+ *
+ */
+
+struct sigcontext {
+ unsigned long sc_flags; /* see manifest constants above */
+ unsigned long sc_nat; /* bit i == 1 iff scratch reg gr[i] is a NaT */
+ stack_t sc_stack; /* previously active stack */
+
+ unsigned long sc_ip; /* instruction pointer */
+ unsigned long sc_cfm; /* current frame marker */
+ unsigned long sc_um; /* user mask bits */
+ unsigned long sc_ar_rsc; /* register stack configuration register */
+ unsigned long sc_ar_bsp; /* backing store pointer */
+ unsigned long sc_ar_rnat; /* RSE NaT collection register */
+ unsigned long sc_ar_ccv; /* compare and exchange compare value register */
+ unsigned long sc_ar_unat; /* ar.unat of interrupted context */
+ unsigned long sc_ar_fpsr; /* floating-point status register */
+ unsigned long sc_ar_pfs; /* previous function state */
+ unsigned long sc_ar_lc; /* loop count register */
+ unsigned long sc_pr; /* predicate registers */
+ unsigned long sc_br[8]; /* branch registers */
+ /* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */
+ unsigned long sc_gr[32]; /* general registers (static partition) */
+ struct ia64_fpreg sc_fr[128]; /* floating-point registers */
+
+ unsigned long sc_rbs_base; /* NULL or new base of sighandler's rbs */
+ unsigned long sc_loadrs; /* see description above */
+
+ unsigned long sc_ar25; /* cmp8xchg16 uses this */
+ unsigned long sc_ar26; /* rsvd for scratch use */
+ unsigned long sc_rsvd[12]; /* reserved for future use */
+ /*
+ * The mask must come last so we can increase _NSIG_WORDS
+ * without breaking binary compatibility.
+ */
+ sigset_t sc_mask; /* signal mask to restore after handler returns */
+};
+
+# endif /* __ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGCONTEXT_H */
diff --git a/arch/ia64/include/asm/siginfo.h b/arch/ia64/include/asm/siginfo.h
new file mode 100644
index 000000000000..9294e4b0c8bc
--- /dev/null
+++ b/arch/ia64/include/asm/siginfo.h
@@ -0,0 +1,139 @@
+#ifndef _ASM_IA64_SIGINFO_H
+#define _ASM_IA64_SIGINFO_H
+
+/*
+ * Based on <asm-i386/siginfo.h>.
+ *
+ * Modified 1998-2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+
+#define HAVE_ARCH_SIGINFO_T
+#define HAVE_ARCH_COPY_SIGINFO
+#define HAVE_ARCH_COPY_SIGINFO_TO_USER
+
+#include <asm-generic/siginfo.h>
+
+typedef struct siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+ int __pad0;
+
+ union {
+ int _pad[SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ char _pad[sizeof(__ARCH_SI_UID_T) - sizeof(int)];
+ sigval_t _sigval; /* must overlay ._rt._sigval! */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ uid_t _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ clock_t _utime;
+ clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ void __user *_addr; /* faulting insn/memory ref. */
+ int _imm; /* immediate value for "break" */
+ unsigned int _flags; /* see below */
+ unsigned long _isr; /* isr */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t;
+
+#define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */
+#define si_flags _sifields._sigfault._flags
+/*
+ * si_isr is valid for SIGILL, SIGFPE, SIGSEGV, SIGBUS, and SIGTRAP provided that
+ * si_code is non-zero and __ISR_VALID is set in si_flags.
+ */
+#define si_isr _sifields._sigfault._isr
+
+/*
+ * Flag values for si_flags:
+ */
+#define __ISR_VALID_BIT 0
+#define __ISR_VALID (1 << __ISR_VALID_BIT)
+
+/*
+ * SIGILL si_codes
+ */
+#define ILL_BADIADDR (__SI_FAULT|9) /* unimplemented instruction address */
+#define __ILL_BREAK (__SI_FAULT|10) /* illegal break */
+#define __ILL_BNDMOD (__SI_FAULT|11) /* bundle-update (modification) in progress */
+#undef NSIGILL
+#define NSIGILL 11
+
+/*
+ * SIGFPE si_codes
+ */
+#define __FPE_DECOVF (__SI_FAULT|9) /* decimal overflow */
+#define __FPE_DECDIV (__SI_FAULT|10) /* decimal division by zero */
+#define __FPE_DECERR (__SI_FAULT|11) /* packed decimal error */
+#define __FPE_INVASC (__SI_FAULT|12) /* invalid ASCII digit */
+#define __FPE_INVDEC (__SI_FAULT|13) /* invalid decimal digit */
+#undef NSIGFPE
+#define NSIGFPE 13
+
+/*
+ * SIGSEGV si_codes
+ */
+#define __SEGV_PSTKOVF (__SI_FAULT|3) /* paragraph stack overflow */
+#undef NSIGSEGV
+#define NSIGSEGV 3
+
+/*
+ * SIGTRAP si_codes
+ */
+#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
+#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */
+#undef NSIGTRAP
+#define NSIGTRAP 4
+
+#ifdef __KERNEL__
+#include <linux/string.h>
+
+static inline void
+copy_siginfo (siginfo_t *to, siginfo_t *from)
+{
+ if (from->si_code < 0)
+ memcpy(to, from, sizeof(siginfo_t));
+ else
+ /* _sigchld is currently the largest know union member */
+ memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld));
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_SIGINFO_H */
diff --git a/arch/ia64/include/asm/signal.h b/arch/ia64/include/asm/signal.h
new file mode 100644
index 000000000000..4f5ca5643cb1
--- /dev/null
+++ b/arch/ia64/include/asm/signal.h
@@ -0,0 +1,160 @@
+#ifndef _ASM_IA64_SIGNAL_H
+#define _ASM_IA64_SIGNAL_H
+
+/*
+ * Modified 1998-2001, 2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * Unfortunately, this file is being included by bits/signal.h in
+ * glibc-2.x. Hence the #ifdef __KERNEL__ ugliness.
+ */
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for backwards compatibility */
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX _NSIG
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+/*
+ * The minimum stack size needs to be fairly large because we want to
+ * be sure that an app compiled for today's CPUs will continue to run
+ * on all future CPU models. The CPU model matters because the signal
+ * frame needs to have space for the complete machine state, including
+ * all physical stacked registers. The number of physical stacked
+ * registers is CPU model dependent, but given that the width of
+ * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
+ * more than 16KB of space.
+ */
+#if 1
+ /*
+ * This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it
+ * in wrong. ;-( To preserve backwards compatibility, we leave the kernel at the
+ * incorrect value and fix libc only.
+ */
+# define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */
+#else
+# define MINSIGSTKSZ 131072 /* min. stack size for sigaltstack() */
+#endif
+#define SIGSTKSZ 262144 /* default stack size for sigaltstack() */
+
+#ifdef __KERNEL__
+
+#define _NSIG 64
+#define _NSIG_BPW 64
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/signal.h>
+
+# ifndef __ASSEMBLY__
+
+# include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+typedef unsigned long old_sigset_t;
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+# include <asm/sigcontext.h>
+
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#endif /* __KERNEL__ */
+
+# endif /* !__ASSEMBLY__ */
+#endif /* _ASM_IA64_SIGNAL_H */
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
new file mode 100644
index 000000000000..12d96e0cd513
--- /dev/null
+++ b/arch/ia64/include/asm/smp.h
@@ -0,0 +1,138 @@
+/*
+ * SMP Support
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P.
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+#ifndef _ASM_IA64_SMP_H
+#define _ASM_IA64_SMP_H
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+#include <linux/irqreturn.h>
+
+#include <asm/io.h>
+#include <asm/param.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+static inline unsigned int
+ia64_get_lid (void)
+{
+ union {
+ struct {
+ unsigned long reserved : 16;
+ unsigned long eid : 8;
+ unsigned long id : 8;
+ unsigned long ignored : 32;
+ } f;
+ unsigned long bits;
+ } lid;
+
+ lid.bits = ia64_getreg(_IA64_REG_CR_LID);
+ return lid.f.id << 8 | lid.f.eid;
+}
+
+#define hard_smp_processor_id() ia64_get_lid()
+
+#ifdef CONFIG_SMP
+
+#define XTP_OFFSET 0x1e0008
+
+#define SMP_IRQ_REDIRECTION (1 << 0)
+#define SMP_IPI_REDIRECTION (1 << 1)
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+extern struct smp_boot_data {
+ int cpu_count;
+ int cpu_phys_id[NR_CPUS];
+} smp_boot_data __initdata;
+
+extern char no_int_routing __devinitdata;
+
+extern cpumask_t cpu_online_map;
+extern cpumask_t cpu_core_map[NR_CPUS];
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+extern int smp_num_siblings;
+extern void __iomem *ipi_base_addr;
+extern unsigned char smp_int_redirect;
+
+extern volatile int ia64_cpu_to_sapicid[];
+#define cpu_physical_id(i) ia64_cpu_to_sapicid[i]
+
+extern unsigned long ap_wakeup_vector;
+
+/*
+ * Function to map hard smp processor id to logical id. Slow, so don't use this in
+ * performance-critical code.
+ */
+static inline int
+cpu_logical_id (int cpuid)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; ++i)
+ if (cpu_physical_id(i) == cpuid)
+ break;
+ return i;
+}
+
+/*
+ * XTP control functions:
+ * min_xtp : route all interrupts to this CPU
+ * normal_xtp: nominal XTP value
+ * max_xtp : never deliver interrupts to this CPU.
+ */
+
+static inline void
+min_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
+}
+
+static inline void
+normal_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
+}
+
+static inline void
+max_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
+}
+
+/* Upping and downing of CPUs */
+extern int __cpu_disable (void);
+extern void __cpu_die (unsigned int cpu);
+extern void cpu_die (void) __attribute__ ((noreturn));
+extern void __init smp_build_cpu_map(void);
+
+extern void __init init_smp_config (void);
+extern void smp_do_timer (struct pt_regs *regs);
+
+extern irqreturn_t handle_IPI(int irq, void *dev_id);
+extern void smp_send_reschedule (int cpu);
+extern void identify_siblings (struct cpuinfo_ia64 *);
+extern int is_multithreading_enabled(void);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
+#else /* CONFIG_SMP */
+
+#define cpu_logical_id(i) 0
+#define cpu_physical_id(i) ia64_get_lid()
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_IA64_SMP_H */
diff --git a/arch/ia64/include/asm/sn/acpi.h b/arch/ia64/include/asm/sn/acpi.h
new file mode 100644
index 000000000000..9ce2801cbd57
--- /dev/null
+++ b/arch/ia64/include/asm/sn/acpi.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_ACPI_H
+#define _ASM_IA64_SN_ACPI_H
+
+#include "acpi/acglobal.h"
+
+extern int sn_acpi_rev;
+#define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101)
+
+#endif /* _ASM_IA64_SN_ACPI_H */
diff --git a/arch/ia64/include/asm/sn/addrs.h b/arch/ia64/include/asm/sn/addrs.h
new file mode 100644
index 000000000000..e715c794b186
--- /dev/null
+++ b/arch/ia64/include/asm/sn/addrs.h
@@ -0,0 +1,299 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_ADDRS_H
+#define _ASM_IA64_SN_ADDRS_H
+
+#include <asm/percpu.h>
+#include <asm/sn/types.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/pda.h>
+
+/*
+ * Memory/SHUB Address Format:
+ * +-+---------+--+--------------+
+ * |0| NASID |AS| NodeOffset |
+ * +-+---------+--+--------------+
+ *
+ * NASID: (low NASID bit is 0) Memory and SHUB MMRs
+ * AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
+ * 00: Local Resources and MMR space
+ * Top bit of NodeOffset
+ * 0: Local resources space
+ * node id:
+ * 0: IA64/NT compatibility space
+ * 2: Local MMR Space
+ * 4: Local memory, regardless of local node id
+ * 1: Global MMR space
+ * 01: GET space.
+ * 10: AMO space.
+ * 11: Cacheable memory space.
+ *
+ * NodeOffset: byte offset
+ *
+ *
+ * TIO address format:
+ * +-+----------+--+--------------+
+ * |0| NASID |AS| Nodeoffset |
+ * +-+----------+--+--------------+
+ *
+ * NASID: (low NASID bit is 1) TIO
+ * AS: 2-bit Chiplet Identifier
+ * 00: TIO LB (Indicates TIO MMR access.)
+ * 01: TIO ICE (indicates coretalk space access.)
+ *
+ * NodeOffset: top bit must be set.
+ *
+ *
+ * Note that in both of the above address formats, the low
+ * NASID bit indicates if the reference is to the SHUB or TIO MMRs.
+ */
+
+
+/*
+ * Define basic shift & mask constants for manipulating NASIDs and AS values.
+ */
+#define NASID_BITMASK (sn_hub_info->nasid_bitmask)
+#define NASID_SHIFT (sn_hub_info->nasid_shift)
+#define AS_SHIFT (sn_hub_info->as_shift)
+#define AS_BITMASK 0x3UL
+
+#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT)
+#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT)
+
+
+/*
+ * AS values. These are the same on both SHUB1 & SHUB2.
+ */
+#define AS_GET_VAL 1UL
+#define AS_AMO_VAL 2UL
+#define AS_CAC_VAL 3UL
+#define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT)
+#define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT)
+#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT)
+
+
+/*
+ * Virtual Mode Local & Global MMR space.
+ */
+#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL
+#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL
+#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
+#define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
+#define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
+
+#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL
+#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL
+#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
+#define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
+
+/*
+ * Physical mode addresses
+ */
+#define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
+
+
+/*
+ * Clear region & AS bits.
+ */
+#define TO_PHYS_MASK (~(RGN_BITS | AS_MASK))
+
+
+/*
+ * Misc NASID manipulation.
+ */
+#define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT)
+#define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a))
+#define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1))
+#define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT)
+#define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK)
+#define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a))
+#define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
+#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
+#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a))
+#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
+#define IS_TIO_NASID(n) ((n) & 1)
+
+
+/* non-II mmr's start at top of big window space (4G) */
+#define BWIN_TOP 0x0000000100000000UL
+
+/*
+ * general address defines
+ */
+#define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE)
+#define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
+#define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
+#define GET_BASE (PAGE_OFFSET | AS_GET_SPACE)
+
+/*
+ * Convert Memory addresses between various addressing modes.
+ */
+#define TO_PHYS(x) (TO_PHYS_MASK & (x))
+#define TO_CAC(x) (CAC_BASE | TO_PHYS(x))
+#ifdef CONFIG_SGI_SN
+#define TO_AMO(x) (AMO_BASE | TO_PHYS(x))
+#define TO_GET(x) (GET_BASE | TO_PHYS(x))
+#else
+#define TO_AMO(x) ({ BUG(); x; })
+#define TO_GET(x) ({ BUG(); x; })
+#endif
+
+/*
+ * Covert from processor physical address to II/TIO physical address:
+ * II - squeeze out the AS bits
+ * TIO- requires a chiplet id in bits 38-39. For DMA to memory,
+ * the chiplet id is zero. If we implement TIO-TIO dma, we might need
+ * to insert a chiplet id into this macro. However, it is our belief
+ * right now that this chiplet id will be ICE, which is also zero.
+ */
+#define SH1_TIO_PHYS_TO_DMA(x) \
+ ((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
+
+#define SH2_NETWORK_BANK_OFFSET(x) \
+ ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
+
+#define SH2_NETWORK_BANK_SELECT(x) \
+ ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4))) \
+ >> (sn_hub_info->nasid_shift - 4)) << 36)
+
+#define SH2_NETWORK_ADDRESS(x) \
+ (SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
+
+#define SH2_TIO_PHYS_TO_DMA(x) \
+ (((u64)(NASID_GET(x)) << 40) | SH2_NETWORK_ADDRESS(x))
+
+#define PHYS_TO_TIODMA(x) \
+ (is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
+
+#define PHYS_TO_DMA(x) \
+ ((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
+
+
+/*
+ * Macros to test for address type.
+ */
+#define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
+#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
+
+
+/*
+ * The following definitions pertain to the IO special address
+ * space. They define the location of the big and little windows
+ * of any given node.
+ */
+#define BWIN_SIZE_BITS 29 /* big window size: 512M */
+#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */
+#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
+ : RAW_NODE_SWIN_BASE(n, w))
+#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \
+ ((u64) (w) << TIO_SWIN_SIZE_BITS))
+#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
+#define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
+#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
+#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
+#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
+#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
+#define BWIN_WIDGET_MASK 0x7
+#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+#define SH1_IS_BIG_WINDOW_ADDR(x) ((x) & BWIN_TOP)
+
+#define TIO_BWIN_WINDOW_SELECT_MASK 0x7
+#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
+
+#define TIO_HWIN_SHIFT_BITS 33
+#define TIO_HWIN(x) (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
+
+/*
+ * The following definitions pertain to the IO special address
+ * space. They define the location of the big and little windows
+ * of any given node.
+ */
+
+#define SWIN_SIZE_BITS 24
+#define SWIN_WIDGET_MASK 0xF
+
+#define TIO_SWIN_SIZE_BITS 28
+#define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS)
+#define TIO_SWIN_WIDGET_MASK 0x3
+
+/*
+ * Convert smallwindow address to xtalk address.
+ *
+ * 'addr' can be physical or virtual address, but will be converted
+ * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
+ */
+#define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
+
+
+/*
+ * The following macros produce the correct base virtual address for
+ * the hub registers. The REMOTE_HUB_* macro produce
+ * the address for the specified hub's registers. The intent is
+ * that the appropriate PI, MD, NI, or II register would be substituted
+ * for x.
+ *
+ * WARNING:
+ * When certain Hub chip workaround are defined, it's not sufficient
+ * to dereference the *_HUB_ADDR() macros. You should instead use
+ * HUB_L() and HUB_S() if you must deal with pointers to hub registers.
+ * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
+ * They're always safe.
+ */
+/* Shub1 TIO & MMR addressing macros */
+#define SH1_TIO_IOSPACE_ADDR(n,x) \
+ GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_BWIN_MMR(n,x) \
+ GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_SWIN_MMR(n,x) \
+ (NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
+
+#define SH1_REMOTE_MMR(n,x) \
+ (SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) : \
+ SH1_REMOTE_SWIN_MMR(n,x))
+
+/* Shub1 TIO & MMR addressing macros */
+#define SH2_TIO_IOSPACE_ADDR(n,x) \
+ ((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
+
+#define SH2_REMOTE_MMR(n,x) \
+ GLOBAL_MMR_ADDR(n,x)
+
+
+/* TIO & MMR addressing macros that work on both shub1 & shub2 */
+#define TIO_IOSPACE_ADDR(n,x) \
+ ((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) : \
+ SH2_TIO_IOSPACE_ADDR(n,x)))
+
+#define SH_REMOTE_MMR(n,x) \
+ (is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
+
+#define REMOTE_HUB_ADDR(n,x) \
+ (IS_TIO_NASID(n) ? ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) : \
+ ((volatile u64*)SH_REMOTE_MMR(n,x)))
+
+
+#define HUB_L(x) (*((volatile typeof(*x) *)x))
+#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d))
+
+#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a)))
+#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
+
+/*
+ * Coretalk address breakdown
+ */
+#define CTALK_NASID_SHFT 40
+#define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT)
+#define CTALK_CID_SHFT 38
+#define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT)
+#define CTALK_NODE_OFFSET 0x3FFFFFFFFF
+
+#endif /* _ASM_IA64_SN_ADDRS_H */
diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h
new file mode 100644
index 000000000000..7caa1f44cd95
--- /dev/null
+++ b/arch/ia64/include/asm/sn/arch.h
@@ -0,0 +1,86 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI specific setup.
+ *
+ * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_IA64_SN_ARCH_H
+#define _ASM_IA64_SN_ARCH_H
+
+#include <linux/numa.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sn_cpuid.h>
+
+/*
+ * This is the maximum number of NUMALINK nodes that can be part of a single
+ * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
+ * remote partitions are NOT included in this number.
+ * The number of compact nodes cannot exceed size of a coherency domain.
+ * The purpose of this define is to specify a node count that includes
+ * all C/M/TIO nodes in an SSI system.
+ *
+ * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes.
+ *
+ * Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
+ * to ACPI3.0, this limit will be removed. The notion of "compact nodes"
+ * should be deleted and TIOs should be included in MAX_NUMNODES.
+ */
+#define MAX_TIO_NODES MAX_NUMNODES
+#define MAX_COMPACT_NODES (MAX_NUMNODES + MAX_TIO_NODES)
+
+/*
+ * Maximum number of nodes in all partitions and in all coherency domains.
+ * This is the total number of nodes accessible in the numalink fabric. It
+ * includes all C & M bricks, plus all TIOs.
+ *
+ * This value is also the value of the maximum number of NASIDs in the numalink
+ * fabric.
+ */
+#define MAX_NUMALINK_NODES 16384
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced. They are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct sn_hub_info_s {
+ u8 shub2;
+ u8 nasid_shift;
+ u8 as_shift;
+ u8 shub_1_1_found;
+ u16 nasid_bitmask;
+};
+DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
+#define sn_hub_info (&__get_cpu_var(__sn_hub_info))
+#define is_shub2() (sn_hub_info->shub2)
+#define is_shub1() (sn_hub_info->shub2 == 0)
+
+/*
+ * Use this macro to test if shub 1.1 wars should be enabled
+ */
+#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found)
+
+
+/*
+ * Compact node ID to nasid mappings kept in the per-cpu data areas of each
+ * cpu.
+ */
+DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
+
+
+extern u8 sn_partition_id;
+extern u8 sn_system_size;
+extern u8 sn_sharing_domain_size;
+extern u8 sn_region_size;
+
+extern void sn_flush_all_caches(long addr, long bytes);
+extern bool sn_cpu_disable_allowed(int cpu);
+
+#endif /* _ASM_IA64_SN_ARCH_H */
diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h
new file mode 100644
index 000000000000..a0d214f43115
--- /dev/null
+++ b/arch/ia64/include/asm/sn/bte.h
@@ -0,0 +1,233 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+
+#ifndef _ASM_IA64_SN_BTE_H
+#define _ASM_IA64_SN_BTE_H
+
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/types.h>
+#include <asm/sn/shub_mmr.h>
+
+#define IBCT_NOTIFY (0x1UL << 4)
+#define IBCT_ZFIL_MODE (0x1UL << 0)
+
+/* #define BTE_DEBUG */
+/* #define BTE_DEBUG_VERBOSE */
+
+#ifdef BTE_DEBUG
+# define BTE_PRINTK(x) printk x /* Terse */
+# ifdef BTE_DEBUG_VERBOSE
+# define BTE_PRINTKV(x) printk x /* Verbose */
+# else
+# define BTE_PRINTKV(x)
+# endif /* BTE_DEBUG_VERBOSE */
+#else
+# define BTE_PRINTK(x)
+# define BTE_PRINTKV(x)
+#endif /* BTE_DEBUG */
+
+
+/* BTE status register only supports 16 bits for length field */
+#define BTE_LEN_BITS (16)
+#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
+#define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES)
+
+
+/* Define hardware */
+#define BTES_PER_NODE (is_shub2() ? 4 : 2)
+#define MAX_BTES_PER_NODE 4
+
+#define BTE2OFF_CTRL 0
+#define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0)
+#define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0)
+#define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0)
+
+#define BTE_BASE_ADDR(interface) \
+ (is_shub2() ? (interface == 0) ? SH2_BT_ENG_CSR_0 : \
+ (interface == 1) ? SH2_BT_ENG_CSR_1 : \
+ (interface == 2) ? SH2_BT_ENG_CSR_2 : \
+ SH2_BT_ENG_CSR_3 \
+ : (interface == 0) ? IIO_IBLS0 : IIO_IBLS1)
+
+#define BTE_SOURCE_ADDR(base) \
+ (is_shub2() ? base + (BTE2OFF_SRC/8) \
+ : base + (BTEOFF_SRC/8))
+
+#define BTE_DEST_ADDR(base) \
+ (is_shub2() ? base + (BTE2OFF_DEST/8) \
+ : base + (BTEOFF_DEST/8))
+
+#define BTE_CTRL_ADDR(base) \
+ (is_shub2() ? base + (BTE2OFF_CTRL/8) \
+ : base + (BTEOFF_CTRL/8))
+
+#define BTE_NOTIF_ADDR(base) \
+ (is_shub2() ? base + (BTE2OFF_NOTIFY/8) \
+ : base + (BTEOFF_NOTIFY/8))
+
+/* Define hardware modes */
+#define BTE_NOTIFY IBCT_NOTIFY
+#define BTE_NORMAL BTE_NOTIFY
+#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
+/* Use a reserved bit to let the caller specify a wait for any BTE */
+#define BTE_WACQUIRE 0x4000
+/* Use the BTE on the node with the destination memory */
+#define BTE_USE_DEST (BTE_WACQUIRE << 1)
+/* Use any available BTE interface on any node for the transfer */
+#define BTE_USE_ANY (BTE_USE_DEST << 1)
+/* macro to force the IBCT0 value valid */
+#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
+
+#define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR)
+#define BTE_WORD_AVAILABLE (IBLS_BUSY << 1)
+#define BTE_WORD_BUSY (~BTE_WORD_AVAILABLE)
+
+/*
+ * Some macros to simplify reading.
+ * Start with macros to locate the BTE control registers.
+ */
+#define BTE_LNSTAT_LOAD(_bte) \
+ HUB_L(_bte->bte_base_addr)
+#define BTE_LNSTAT_STORE(_bte, _x) \
+ HUB_S(_bte->bte_base_addr, (_x))
+#define BTE_SRC_STORE(_bte, _x) \
+({ \
+ u64 __addr = ((_x) & ~AS_MASK); \
+ if (is_shub2()) \
+ __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
+ HUB_S(_bte->bte_source_addr, __addr); \
+})
+#define BTE_DEST_STORE(_bte, _x) \
+({ \
+ u64 __addr = ((_x) & ~AS_MASK); \
+ if (is_shub2()) \
+ __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
+ HUB_S(_bte->bte_destination_addr, __addr); \
+})
+#define BTE_CTRL_STORE(_bte, _x) \
+ HUB_S(_bte->bte_control_addr, (_x))
+#define BTE_NOTIF_STORE(_bte, _x) \
+({ \
+ u64 __addr = ia64_tpa((_x) & ~AS_MASK); \
+ if (is_shub2()) \
+ __addr = SH2_TIO_PHYS_TO_DMA(__addr); \
+ HUB_S(_bte->bte_notify_addr, __addr); \
+})
+
+#define BTE_START_TRANSFER(_bte, _len, _mode) \
+ is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \
+ : BTE_LNSTAT_STORE(_bte, _len); \
+ BTE_CTRL_STORE(_bte, _mode)
+
+/* Possible results from bte_copy and bte_unaligned_copy */
+/* The following error codes map into the BTE hardware codes
+ * IIO_ICRB_ECODE_* (in shubio.h). The hardware uses
+ * an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero
+ * to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error
+ * codes to give the following error codes.
+ */
+#define BTEFAIL_OFFSET 1
+
+typedef enum {
+ BTE_SUCCESS, /* 0 is success */
+ BTEFAIL_DIR, /* Directory error due to IIO access*/
+ BTEFAIL_POISON, /* poison error on IO access (write to poison page) */
+ BTEFAIL_WERR, /* Write error (ie WINV to a Read only line) */
+ BTEFAIL_ACCESS, /* access error (protection violation) */
+ BTEFAIL_PWERR, /* Partial Write Error */
+ BTEFAIL_PRERR, /* Partial Read Error */
+ BTEFAIL_TOUT, /* CRB Time out */
+ BTEFAIL_XTERR, /* Incoming xtalk pkt had error bit */
+ BTEFAIL_NOTAVAIL, /* BTE not available */
+} bte_result_t;
+
+#define BTEFAIL_SH2_RESP_SHORT 0x1 /* bit 000001 */
+#define BTEFAIL_SH2_RESP_LONG 0x2 /* bit 000010 */
+#define BTEFAIL_SH2_RESP_DSP 0x4 /* bit 000100 */
+#define BTEFAIL_SH2_RESP_ACCESS 0x8 /* bit 001000 */
+#define BTEFAIL_SH2_CRB_TO 0x10 /* bit 010000 */
+#define BTEFAIL_SH2_NACK_LIMIT 0x20 /* bit 100000 */
+#define BTEFAIL_SH2_ALL 0x3F /* bit 111111 */
+
+#define BTE_ERR_BITS 0x3FUL
+#define BTE_ERR_SHIFT 36
+#define BTE_ERR_MASK (BTE_ERR_BITS << BTE_ERR_SHIFT)
+
+#define BTE_ERROR_RETRY(value) \
+ (is_shub2() ? (value != BTEFAIL_SH2_CRB_TO) \
+ : (value != BTEFAIL_TOUT))
+
+/*
+ * On shub1 BTE_ERR_MASK will always be false, so no need for is_shub2()
+ */
+#define BTE_SHUB2_ERROR(_status) \
+ ((_status & BTE_ERR_MASK) \
+ ? (((_status >> BTE_ERR_SHIFT) & BTE_ERR_BITS) | IBLS_ERROR) \
+ : _status)
+
+#define BTE_GET_ERROR_STATUS(_status) \
+ (BTE_SHUB2_ERROR(_status) & ~IBLS_ERROR)
+
+#define BTE_VALID_SH2_ERROR(value) \
+ ((value >= BTEFAIL_SH2_RESP_SHORT) && (value <= BTEFAIL_SH2_ALL))
+
+/*
+ * Structure defining a bte. An instance of this
+ * structure is created in the nodepda for each
+ * bte on that node (as defined by BTES_PER_NODE)
+ * This structure contains everything necessary
+ * to work with a BTE.
+ */
+struct bteinfo_s {
+ volatile u64 notify ____cacheline_aligned;
+ u64 *bte_base_addr ____cacheline_aligned;
+ u64 *bte_source_addr;
+ u64 *bte_destination_addr;
+ u64 *bte_control_addr;
+ u64 *bte_notify_addr;
+ spinlock_t spinlock;
+ cnodeid_t bte_cnode; /* cnode */
+ int bte_error_count; /* Number of errors encountered */
+ int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
+ int cleanup_active; /* Interface is locked for cleanup */
+ volatile bte_result_t bh_error; /* error while processing */
+ volatile u64 *most_rcnt_na;
+ struct bteinfo_s *btes_to_try[MAX_BTES_PER_NODE];
+};
+
+
+/*
+ * Function prototypes (functions defined in bte.c, used elsewhere)
+ */
+extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
+extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
+extern void bte_error_handler(unsigned long);
+
+#define bte_zero(dest, len, mode, notification) \
+ bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
+
+/*
+ * The following is the prefered way of calling bte_unaligned_copy
+ * If the copy is fully cache line aligned, then bte_copy is
+ * used instead. Since bte_copy is inlined, this saves a call
+ * stack. NOTE: bte_copy is called synchronously and does block
+ * until the transfer is complete. In order to get the asynch
+ * version of bte_copy, you must perform this check yourself.
+ */
+#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
+ (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \
+ (dest & L1_CACHE_MASK)) ? \
+ bte_unaligned_copy(src, dest, len, mode) : \
+ bte_copy(src, dest, len, mode, NULL))
+
+
+#endif /* _ASM_IA64_SN_BTE_H */
diff --git a/arch/ia64/include/asm/sn/clksupport.h b/arch/ia64/include/asm/sn/clksupport.h
new file mode 100644
index 000000000000..d340c365a824
--- /dev/null
+++ b/arch/ia64/include/asm/sn/clksupport.h
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * This file contains definitions for accessing a platform supported high resolution
+ * clock. The clock is monitonically increasing and can be accessed from any node
+ * in the system. The clock is synchronized across nodes - all nodes see the
+ * same value.
+ *
+ * RTC_COUNTER_ADDR - contains the address of the counter
+ *
+ */
+
+#ifndef _ASM_IA64_SN_CLKSUPPORT_H
+#define _ASM_IA64_SN_CLKSUPPORT_H
+
+extern unsigned long sn_rtc_cycles_per_second;
+
+#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
+
+#define rtc_time() (*RTC_COUNTER_ADDR)
+
+#endif /* _ASM_IA64_SN_CLKSUPPORT_H */
diff --git a/arch/ia64/include/asm/sn/geo.h b/arch/ia64/include/asm/sn/geo.h
new file mode 100644
index 000000000000..f083c9434066
--- /dev/null
+++ b/arch/ia64/include/asm/sn/geo.h
@@ -0,0 +1,132 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_GEO_H
+#define _ASM_IA64_SN_GEO_H
+
+/* The geoid_t implementation below is based loosely on the pcfg_t
+ implementation in sys/SN/promcfg.h. */
+
+/* Type declaractions */
+
+/* Size of a geoid_t structure (must be before decl. of geoid_u) */
+#define GEOID_SIZE 8 /* Would 16 be better? The size can
+ be different on different platforms. */
+
+#define MAX_SLOTS 0xf /* slots per module */
+#define MAX_SLABS 0xf /* slabs per slot */
+
+typedef unsigned char geo_type_t;
+
+/* Fields common to all substructures */
+typedef struct geo_common_s {
+ moduleid_t module; /* The module (box) this h/w lives in */
+ geo_type_t type; /* What type of h/w is named by this geoid_t */
+ slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */
+ slotid_t slot:4; /* slot (Blade), 0 .. 15 within module */
+} geo_common_t;
+
+/* Additional fields for particular types of hardware */
+typedef struct geo_node_s {
+ geo_common_t common; /* No additional fields needed */
+} geo_node_t;
+
+typedef struct geo_rtr_s {
+ geo_common_t common; /* No additional fields needed */
+} geo_rtr_t;
+
+typedef struct geo_iocntl_s {
+ geo_common_t common; /* No additional fields needed */
+} geo_iocntl_t;
+
+typedef struct geo_pcicard_s {
+ geo_iocntl_t common;
+ char bus; /* Bus/widget number */
+ char slot; /* PCI slot number */
+} geo_pcicard_t;
+
+/* Subcomponents of a node */
+typedef struct geo_cpu_s {
+ geo_node_t node;
+ char slice; /* Which CPU on the node */
+} geo_cpu_t;
+
+typedef struct geo_mem_s {
+ geo_node_t node;
+ char membus; /* The memory bus on the node */
+ char memslot; /* The memory slot on the bus */
+} geo_mem_t;
+
+
+typedef union geoid_u {
+ geo_common_t common;
+ geo_node_t node;
+ geo_iocntl_t iocntl;
+ geo_pcicard_t pcicard;
+ geo_rtr_t rtr;
+ geo_cpu_t cpu;
+ geo_mem_t mem;
+ char padsize[GEOID_SIZE];
+} geoid_t;
+
+
+/* Preprocessor macros */
+
+#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad:
+ module/001c07/slab/5/node/memory/2/slot/4 */
+
+/* Values for geo_type_t */
+#define GEO_TYPE_INVALID 0
+#define GEO_TYPE_MODULE 1
+#define GEO_TYPE_NODE 2
+#define GEO_TYPE_RTR 3
+#define GEO_TYPE_IOCNTL 4
+#define GEO_TYPE_IOCARD 5
+#define GEO_TYPE_CPU 6
+#define GEO_TYPE_MEM 7
+#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
+
+/* Parameter for hwcfg_format_geoid_compt() */
+#define GEO_COMPT_MODULE 1
+#define GEO_COMPT_SLAB 2
+#define GEO_COMPT_IOBUS 3
+#define GEO_COMPT_IOSLOT 4
+#define GEO_COMPT_CPU 5
+#define GEO_COMPT_MEMBUS 6
+#define GEO_COMPT_MEMSLOT 7
+
+#define GEO_INVALID_STR "<invalid>"
+
+#define INVALID_NASID ((nasid_t)-1)
+#define INVALID_CNODEID ((cnodeid_t)-1)
+#define INVALID_PNODEID ((pnodeid_t)-1)
+#define INVALID_SLAB (slabid_t)-1
+#define INVALID_SLOT (slotid_t)-1
+#define INVALID_MODULE ((moduleid_t)-1)
+
+static inline slabid_t geo_slab(geoid_t g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ INVALID_SLAB : g.common.slab;
+}
+
+static inline slotid_t geo_slot(geoid_t g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ INVALID_SLOT : g.common.slot;
+}
+
+static inline moduleid_t geo_module(geoid_t g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ INVALID_MODULE : g.common.module;
+}
+
+extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
+
+#endif /* _ASM_IA64_SN_GEO_H */
diff --git a/arch/ia64/include/asm/sn/intr.h b/arch/ia64/include/asm/sn/intr.h
new file mode 100644
index 000000000000..e0487aa97418
--- /dev/null
+++ b/arch/ia64/include/asm/sn/intr.h
@@ -0,0 +1,68 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_INTR_H
+#define _ASM_IA64_SN_INTR_H
+
+#include <linux/rcupdate.h>
+#include <asm/sn/types.h>
+
+#define SGI_UART_VECTOR 0xe9
+
+/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
+#define SGI_XPC_ACTIVATE 0x30
+#define SGI_II_ERROR 0x31
+#define SGI_XBOW_ERROR 0x32
+#define SGI_PCIASIC_ERROR 0x33
+#define SGI_ACPI_SCI_INT 0x34
+#define SGI_TIOCA_ERROR 0x35
+#define SGI_TIO_ERROR 0x36
+#define SGI_TIOCX_ERROR 0x37
+#define SGI_MMTIMER_VECTOR 0x38
+#define SGI_XPC_NOTIFY 0xe7
+
+#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c
+#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6
+
+#define SN2_IRQ_RESERVED 0x1
+#define SN2_IRQ_CONNECTED 0x2
+#define SN2_IRQ_SHARED 0x4
+
+// The SN PROM irq struct
+struct sn_irq_info {
+ struct sn_irq_info *irq_next; /* deprecated DO NOT USE */
+ short irq_nasid; /* Nasid IRQ is assigned to */
+ int irq_slice; /* slice IRQ is assigned to */
+ int irq_cpuid; /* kernel logical cpuid */
+ int irq_irq; /* the IRQ number */
+ int irq_int_bit; /* Bridge interrupt pin */
+ /* <0 means MSI */
+ u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
+ int irq_bridge_type;/* pciio asic type (pciio.h) */
+ void *irq_bridge; /* bridge generating irq */
+ void *irq_pciioinfo; /* associated pciio_info_t */
+ int irq_last_intr; /* For Shub lb lost intr WAR */
+ int irq_cookie; /* unique cookie */
+ int irq_flags; /* flags */
+ int irq_share_cnt; /* num devices sharing IRQ */
+ struct list_head list; /* list of sn_irq_info structs */
+ struct rcu_head rcu; /* rcu callback list */
+};
+
+extern void sn_send_IPI_phys(int, long, int, int);
+extern u64 sn_intr_alloc(nasid_t, int,
+ struct sn_irq_info *,
+ int, nasid_t, int);
+extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
+extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
+extern void sn_set_err_irq_affinity(unsigned int);
+extern struct list_head **sn_irq_lh;
+
+#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
+
+#endif /* _ASM_IA64_SN_INTR_H */
diff --git a/arch/ia64/include/asm/sn/io.h b/arch/ia64/include/asm/sn/io.h
new file mode 100644
index 000000000000..41c73a735628
--- /dev/null
+++ b/arch/ia64/include/asm/sn/io.h
@@ -0,0 +1,274 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_SN_IO_H
+#define _ASM_SN_IO_H
+#include <linux/compiler.h>
+#include <asm/intrinsics.h>
+
+extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
+extern void __sn_mmiowb(void); /* Forward definition */
+
+extern int num_cnodes;
+
+#define __sn_mf_a() ia64_mfa()
+
+extern void sn_dma_flush(unsigned long);
+
+#define __sn_inb ___sn_inb
+#define __sn_inw ___sn_inw
+#define __sn_inl ___sn_inl
+#define __sn_outb ___sn_outb
+#define __sn_outw ___sn_outw
+#define __sn_outl ___sn_outl
+#define __sn_readb ___sn_readb
+#define __sn_readw ___sn_readw
+#define __sn_readl ___sn_readl
+#define __sn_readq ___sn_readq
+#define __sn_readb_relaxed ___sn_readb_relaxed
+#define __sn_readw_relaxed ___sn_readw_relaxed
+#define __sn_readl_relaxed ___sn_readl_relaxed
+#define __sn_readq_relaxed ___sn_readq_relaxed
+
+/*
+ * Convenience macros for setting/clearing bits using the above accessors
+ */
+
+#define __sn_setq_relaxed(addr, val) \
+ writeq((__sn_readq_relaxed(addr) | (val)), (addr))
+#define __sn_clrq_relaxed(addr, val) \
+ writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
+
+/*
+ * The following routines are SN Platform specific, called when
+ * a reference is made to inX/outX set macros. SN Platform
+ * inX set of macros ensures that Posted DMA writes on the
+ * Bridge is flushed.
+ *
+ * The routines should be self explainatory.
+ */
+
+static inline unsigned int
+___sn_inb (unsigned long port)
+{
+ volatile unsigned char *addr;
+ unsigned char ret = -1;
+
+ if ((addr = sn_io_addr(port))) {
+ ret = *addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ }
+ return ret;
+}
+
+static inline unsigned int
+___sn_inw (unsigned long port)
+{
+ volatile unsigned short *addr;
+ unsigned short ret = -1;
+
+ if ((addr = sn_io_addr(port))) {
+ ret = *addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ }
+ return ret;
+}
+
+static inline unsigned int
+___sn_inl (unsigned long port)
+{
+ volatile unsigned int *addr;
+ unsigned int ret = -1;
+
+ if ((addr = sn_io_addr(port))) {
+ ret = *addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ }
+ return ret;
+}
+
+static inline void
+___sn_outb (unsigned char val, unsigned long port)
+{
+ volatile unsigned char *addr;
+
+ if ((addr = sn_io_addr(port))) {
+ *addr = val;
+ __sn_mmiowb();
+ }
+}
+
+static inline void
+___sn_outw (unsigned short val, unsigned long port)
+{
+ volatile unsigned short *addr;
+
+ if ((addr = sn_io_addr(port))) {
+ *addr = val;
+ __sn_mmiowb();
+ }
+}
+
+static inline void
+___sn_outl (unsigned int val, unsigned long port)
+{
+ volatile unsigned int *addr;
+
+ if ((addr = sn_io_addr(port))) {
+ *addr = val;
+ __sn_mmiowb();
+ }
+}
+
+/*
+ * The following routines are SN Platform specific, called when
+ * a reference is made to readX/writeX set macros. SN Platform
+ * readX set of macros ensures that Posted DMA writes on the
+ * Bridge is flushed.
+ *
+ * The routines should be self explainatory.
+ */
+
+static inline unsigned char
+___sn_readb (const volatile void __iomem *addr)
+{
+ unsigned char val;
+
+ val = *(volatile unsigned char __force *)addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ return val;
+}
+
+static inline unsigned short
+___sn_readw (const volatile void __iomem *addr)
+{
+ unsigned short val;
+
+ val = *(volatile unsigned short __force *)addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ return val;
+}
+
+static inline unsigned int
+___sn_readl (const volatile void __iomem *addr)
+{
+ unsigned int val;
+
+ val = *(volatile unsigned int __force *)addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ return val;
+}
+
+static inline unsigned long
+___sn_readq (const volatile void __iomem *addr)
+{
+ unsigned long val;
+
+ val = *(volatile unsigned long __force *)addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ return val;
+}
+
+/*
+ * For generic and SN2 kernels, we have a set of fast access
+ * PIO macros. These macros are provided on SN Platform
+ * because the normal inX and readX macros perform an
+ * additional task of flushing Post DMA request on the Bridge.
+ *
+ * These routines should be self explainatory.
+ */
+
+static inline unsigned int
+sn_inb_fast (unsigned long port)
+{
+ volatile unsigned char *addr = (unsigned char *)port;
+ unsigned char ret;
+
+ ret = *addr;
+ __sn_mf_a();
+ return ret;
+}
+
+static inline unsigned int
+sn_inw_fast (unsigned long port)
+{
+ volatile unsigned short *addr = (unsigned short *)port;
+ unsigned short ret;
+
+ ret = *addr;
+ __sn_mf_a();
+ return ret;
+}
+
+static inline unsigned int
+sn_inl_fast (unsigned long port)
+{
+ volatile unsigned int *addr = (unsigned int *)port;
+ unsigned int ret;
+
+ ret = *addr;
+ __sn_mf_a();
+ return ret;
+}
+
+static inline unsigned char
+___sn_readb_relaxed (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short
+___sn_readw_relaxed (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int
+___sn_readl_relaxed (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *) addr;
+}
+
+static inline unsigned long
+___sn_readq_relaxed (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned long __force *) addr;
+}
+
+struct pci_dev;
+
+static inline int
+sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
+{
+
+ if (vchan > 1) {
+ return -1;
+ }
+
+ if (!(*addr >> 32)) /* Using a mask here would be cleaner */
+ return 0; /* but this generates better code */
+
+ if (vchan == 1) {
+ /* Set Bit 57 */
+ *addr |= (1UL << 57);
+ } else {
+ /* Clear Bit 57 */
+ *addr &= ~(1UL << 57);
+ }
+
+ return 0;
+}
+
+#endif /* _ASM_SN_IO_H */
diff --git a/arch/ia64/include/asm/sn/ioc3.h b/arch/ia64/include/asm/sn/ioc3.h
new file mode 100644
index 000000000000..95ed6cc83cf1
--- /dev/null
+++ b/arch/ia64/include/asm/sn/ioc3.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2005 Silicon Graphics, Inc.
+ */
+#ifndef IA64_SN_IOC3_H
+#define IA64_SN_IOC3_H
+
+/* serial port register map */
+struct ioc3_serialregs {
+ uint32_t sscr;
+ uint32_t stpir;
+ uint32_t stcir;
+ uint32_t srpir;
+ uint32_t srcir;
+ uint32_t srtr;
+ uint32_t shadow;
+};
+
+/* SUPERIO uart register map */
+struct ioc3_uartregs {
+ char iu_lcr;
+ union {
+ char iir; /* read only */
+ char fcr; /* write only */
+ } u3;
+ union {
+ char ier; /* DLAB == 0 */
+ char dlm; /* DLAB == 1 */
+ } u2;
+ union {
+ char rbr; /* read only, DLAB == 0 */
+ char thr; /* write only, DLAB == 0 */
+ char dll; /* DLAB == 1 */
+ } u1;
+ char iu_scr;
+ char iu_msr;
+ char iu_lsr;
+ char iu_mcr;
+};
+
+#define iu_rbr u1.rbr
+#define iu_thr u1.thr
+#define iu_dll u1.dll
+#define iu_ier u2.ier
+#define iu_dlm u2.dlm
+#define iu_iir u3.iir
+#define iu_fcr u3.fcr
+
+struct ioc3_sioregs {
+ char fill[0x170];
+ struct ioc3_uartregs uartb;
+ struct ioc3_uartregs uarta;
+};
+
+/* PCI IO/mem space register map */
+struct ioc3 {
+ uint32_t pci_id;
+ uint32_t pci_scr;
+ uint32_t pci_rev;
+ uint32_t pci_lat;
+ uint32_t pci_addr;
+ uint32_t pci_err_addr_l;
+ uint32_t pci_err_addr_h;
+
+ uint32_t sio_ir;
+ /* these registers are read-only for general kernel code. To
+ * modify them use the functions in ioc3.c
+ */
+ uint32_t sio_ies;
+ uint32_t sio_iec;
+ uint32_t sio_cr;
+ uint32_t int_out;
+ uint32_t mcr;
+ uint32_t gpcr_s;
+ uint32_t gpcr_c;
+ uint32_t gpdr;
+ uint32_t gppr[9];
+ char fill[0x4c];
+
+ /* serial port registers */
+ uint32_t sbbr_h;
+ uint32_t sbbr_l;
+
+ struct ioc3_serialregs port_a;
+ struct ioc3_serialregs port_b;
+ char fill1[0x1ff10];
+ /* superio registers */
+ struct ioc3_sioregs sregs;
+};
+
+/* These don't exist on the ioc3 serial card... */
+#define eier fill1[8]
+#define eisr fill1[4]
+
+#define PCI_LAT 0xc /* Latency Timer */
+#define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */
+#define UARTA_BASE 0x178
+#define UARTB_BASE 0x170
+
+
+/* bitmasks for serial RX status byte */
+#define RXSB_OVERRUN 0x01 /* char(s) lost */
+#define RXSB_PAR_ERR 0x02 /* parity error */
+#define RXSB_FRAME_ERR 0x04 /* framing error */
+#define RXSB_BREAK 0x08 /* break character */
+#define RXSB_CTS 0x10 /* state of CTS */
+#define RXSB_DCD 0x20 /* state of DCD */
+#define RXSB_MODEM_VALID 0x40 /* DCD, CTS and OVERRUN are valid */
+#define RXSB_DATA_VALID 0x80 /* FRAME_ERR PAR_ERR & BREAK valid */
+
+/* bitmasks for serial TX control byte */
+#define TXCB_INT_WHEN_DONE 0x20 /* interrupt after this byte is sent */
+#define TXCB_INVALID 0x00 /* byte is invalid */
+#define TXCB_VALID 0x40 /* byte is valid */
+#define TXCB_MCR 0x80 /* data<7:0> to modem cntrl register */
+#define TXCB_DELAY 0xc0 /* delay data<7:0> mSec */
+
+/* bitmasks for SBBR_L */
+#define SBBR_L_SIZE 0x00000001 /* 0 1KB rings, 1 4KB rings */
+
+/* bitmasks for SSCR_<A:B> */
+#define SSCR_RX_THRESHOLD 0x000001ff /* hiwater mark */
+#define SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
+#define SSCR_HFC_EN 0x00020000 /* h/w flow cntrl enabled */
+#define SSCR_RX_RING_DCD 0x00040000 /* postRX record on delta-DCD */
+#define SSCR_RX_RING_CTS 0x00080000 /* postRX record on delta-CTS */
+#define SSCR_HIGH_SPD 0x00100000 /* 4X speed */
+#define SSCR_DIAG 0x00200000 /* bypass clock divider */
+#define SSCR_RX_DRAIN 0x08000000 /* drain RX buffer to memory */
+#define SSCR_DMA_EN 0x10000000 /* enable ring buffer DMA */
+#define SSCR_DMA_PAUSE 0x20000000 /* pause DMA */
+#define SSCR_PAUSE_STATE 0x40000000 /* set when PAUSE takes effect*/
+#define SSCR_RESET 0x80000000 /* reset DMA channels */
+
+/* all producer/comsumer pointers are the same bitfield */
+#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
+#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
+#define PROD_CONS_PTR_OFF 3
+
+/* bitmasks for SRCIR_<A:B> */
+#define SRCIR_ARM 0x80000000 /* arm RX timer */
+
+/* bitmasks for SHADOW_<A:B> */
+#define SHADOW_DR 0x00000001 /* data ready */
+#define SHADOW_OE 0x00000002 /* overrun error */
+#define SHADOW_PE 0x00000004 /* parity error */
+#define SHADOW_FE 0x00000008 /* framing error */
+#define SHADOW_BI 0x00000010 /* break interrupt */
+#define SHADOW_THRE 0x00000020 /* transmit holding reg empty */
+#define SHADOW_TEMT 0x00000040 /* transmit shift reg empty */
+#define SHADOW_RFCE 0x00000080 /* char in RX fifo has error */
+#define SHADOW_DCTS 0x00010000 /* delta clear to send */
+#define SHADOW_DDCD 0x00080000 /* delta data carrier detect */
+#define SHADOW_CTS 0x00100000 /* clear to send */
+#define SHADOW_DCD 0x00800000 /* data carrier detect */
+#define SHADOW_DTR 0x01000000 /* data terminal ready */
+#define SHADOW_RTS 0x02000000 /* request to send */
+#define SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
+#define SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
+#define SHADOW_LOOP 0x10000000 /* loopback enabled */
+
+/* bitmasks for SRTR_<A:B> */
+#define SRTR_CNT 0x00000fff /* reload value for RX timer */
+#define SRTR_CNT_VAL 0x0fff0000 /* current value of RX timer */
+#define SRTR_CNT_VAL_SHIFT 16
+#define SRTR_HZ 16000 /* SRTR clock frequency */
+
+/* bitmasks for SIO_IR, SIO_IEC and SIO_IES */
+#define SIO_IR_SA_TX_MT 0x00000001 /* Serial port A TX empty */
+#define SIO_IR_SA_RX_FULL 0x00000002 /* port A RX buf full */
+#define SIO_IR_SA_RX_HIGH 0x00000004 /* port A RX hiwat */
+#define SIO_IR_SA_RX_TIMER 0x00000008 /* port A RX timeout */
+#define SIO_IR_SA_DELTA_DCD 0x00000010 /* port A delta DCD */
+#define SIO_IR_SA_DELTA_CTS 0x00000020 /* port A delta CTS */
+#define SIO_IR_SA_INT 0x00000040 /* port A pass-thru intr */
+#define SIO_IR_SA_TX_EXPLICIT 0x00000080 /* port A explicit TX thru */
+#define SIO_IR_SA_MEMERR 0x00000100 /* port A PCI error */
+#define SIO_IR_SB_TX_MT 0x00000200
+#define SIO_IR_SB_RX_FULL 0x00000400
+#define SIO_IR_SB_RX_HIGH 0x00000800
+#define SIO_IR_SB_RX_TIMER 0x00001000
+#define SIO_IR_SB_DELTA_DCD 0x00002000
+#define SIO_IR_SB_DELTA_CTS 0x00004000
+#define SIO_IR_SB_INT 0x00008000
+#define SIO_IR_SB_TX_EXPLICIT 0x00010000
+#define SIO_IR_SB_MEMERR 0x00020000
+#define SIO_IR_PP_INT 0x00040000 /* P port pass-thru intr */
+#define SIO_IR_PP_INTA 0x00080000 /* PP context A thru */
+#define SIO_IR_PP_INTB 0x00100000 /* PP context B thru */
+#define SIO_IR_PP_MEMERR 0x00200000 /* PP PCI error */
+#define SIO_IR_KBD_INT 0x00400000 /* kbd/mouse intr */
+#define SIO_IR_RT_INT 0x08000000 /* RT output pulse */
+#define SIO_IR_GEN_INT1 0x10000000 /* RT input pulse */
+#define SIO_IR_GEN_INT_SHIFT 28
+
+/* per device interrupt masks */
+#define SIO_IR_SA (SIO_IR_SA_TX_MT | \
+ SIO_IR_SA_RX_FULL | \
+ SIO_IR_SA_RX_HIGH | \
+ SIO_IR_SA_RX_TIMER | \
+ SIO_IR_SA_DELTA_DCD | \
+ SIO_IR_SA_DELTA_CTS | \
+ SIO_IR_SA_INT | \
+ SIO_IR_SA_TX_EXPLICIT | \
+ SIO_IR_SA_MEMERR)
+
+#define SIO_IR_SB (SIO_IR_SB_TX_MT | \
+ SIO_IR_SB_RX_FULL | \
+ SIO_IR_SB_RX_HIGH | \
+ SIO_IR_SB_RX_TIMER | \
+ SIO_IR_SB_DELTA_DCD | \
+ SIO_IR_SB_DELTA_CTS | \
+ SIO_IR_SB_INT | \
+ SIO_IR_SB_TX_EXPLICIT | \
+ SIO_IR_SB_MEMERR)
+
+#define SIO_IR_PP (SIO_IR_PP_INT | SIO_IR_PP_INTA | \
+ SIO_IR_PP_INTB | SIO_IR_PP_MEMERR)
+#define SIO_IR_RT (SIO_IR_RT_INT | SIO_IR_GEN_INT1)
+
+/* bitmasks for SIO_CR */
+#define SIO_CR_CMD_PULSE_SHIFT 15
+#define SIO_CR_SER_A_BASE_SHIFT 1
+#define SIO_CR_SER_B_BASE_SHIFT 8
+#define SIO_CR_ARB_DIAG 0x00380000 /* cur !enet PCI requet (ro) */
+#define SIO_CR_ARB_DIAG_TXA 0x00000000
+#define SIO_CR_ARB_DIAG_RXA 0x00080000
+#define SIO_CR_ARB_DIAG_TXB 0x00100000
+#define SIO_CR_ARB_DIAG_RXB 0x00180000
+#define SIO_CR_ARB_DIAG_PP 0x00200000
+#define SIO_CR_ARB_DIAG_IDLE 0x00400000 /* 0 -> active request (ro) */
+
+/* defs for some of the generic I/O pins */
+#define GPCR_PHY_RESET 0x20 /* pin is output to PHY reset */
+#define GPCR_UARTB_MODESEL 0x40 /* pin is output to port B mode sel */
+#define GPCR_UARTA_MODESEL 0x80 /* pin is output to port A mode sel */
+
+#define GPPR_PHY_RESET_PIN 5 /* GIO pin controlling phy reset */
+#define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin cntrling uartb modeselect */
+#define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin cntrling uarta modeselect */
+
+#endif /* IA64_SN_IOC3_H */
diff --git a/arch/ia64/include/asm/sn/klconfig.h b/arch/ia64/include/asm/sn/klconfig.h
new file mode 100644
index 000000000000..bcbf209d63be
--- /dev/null
+++ b/arch/ia64/include/asm/sn/klconfig.h
@@ -0,0 +1,246 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/klconfig.h>.
+ *
+ * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_IA64_SN_KLCONFIG_H
+#define _ASM_IA64_SN_KLCONFIG_H
+
+/*
+ * The KLCONFIG structures store info about the various BOARDs found
+ * during Hardware Discovery. In addition, it stores info about the
+ * components found on the BOARDs.
+ */
+
+typedef s32 klconf_off_t;
+
+
+/* Functions/macros needed to use this structure */
+
+typedef struct kl_config_hdr {
+ char pad[20];
+ klconf_off_t ch_board_info; /* the link list of boards */
+ char pad0[88];
+} kl_config_hdr_t;
+
+
+#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
+
+/*
+ * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
+ * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to
+ * the LOCAL/current NODE. REMOTE means it is attached to a different
+ * node.(TBD - Need a way to treat ROUTER boards.)
+ *
+ * There are 2 different structures to represent these boards -
+ * lboard - Local board, rboard - remote board. These 2 structures
+ * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
+ * Figure below). The first byte of the rboard or lboard structure
+ * is used to find out its type - no unions are used.
+ * If it is a lboard, then the config info of this board will be found
+ * on the local node. (LOCAL NODE BASE + offset value gives pointer to
+ * the structure.
+ * If it is a rboard, the local structure contains the node number
+ * and the offset of the beginning of the LINKED LIST on the remote node.
+ * The details of the hardware on a remote node can be built locally,
+ * if required, by reading the LINKED LIST on the remote node and
+ * ignoring all the rboards on that node.
+ *
+ * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the
+ * First board info on the remote node. The remote node list is
+ * traversed as the local list, using the REMOTE BASE ADDRESS and not
+ * the local base address and ignoring all rboard values.
+ *
+ *
+ KLCONFIG
+
+ +------------+ +------------+ +------------+ +------------+
+ | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | board info | | | board info | | |errinfo,bptr| | | board info |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
+ +------------+ +------------+ +------------+ +------------+
+
+
+ +------------+
+ | board info |
+ +------------+ +--------------------------------+
+ | compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
+ +------------+ +--------------------------------+
+ | compt 2 |--+
+ +------------+ | +--------------------------------+
+ | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
+ +------------+ +--------------------------------+
+ | errinfo |--+
+ +------------+ | +--------------------------------+
+ +--->|r/l brd errinfo,compt err flags |
+ +--------------------------------+
+
+ *
+ * Each BOARD consists of COMPONENTs and the BOARD structure has
+ * pointers (offsets) to its COMPONENT structure.
+ * The COMPONENT structure has version info, size and speed info, revision,
+ * error info and the NIC info. This structure can accommodate any
+ * BOARD with arbitrary COMPONENT composition.
+ *
+ * The ERRORINFO part of each BOARD has error information
+ * that describes errors about the BOARD itself. It also has flags to
+ * indicate the COMPONENT(s) on the board that have errors. The error
+ * information specific to the COMPONENT is present in the respective
+ * COMPONENT structure.
+ *
+ * The ERRORINFO structure is also treated like a COMPONENT, ie. the
+ * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
+ * structure also has a pointer to the ERRORINFO structure. This is
+ * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
+ * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where
+ * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
+ * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info
+ * which is present on the REMOTE NODE.(TBD)
+ * REMOTE ERRINFO can be stored on any of the nearest nodes
+ * or on all the nearest nodes.(TBD)
+ * Like BOARD structures, REMOTE ERRINFO structures can be built locally
+ * using the rboard errinfo pointer.
+ *
+ * In order to get useful information from this Data organization, a set of
+ * interface routines are provided (TBD). The important thing to remember while
+ * manipulating the structures, is that, the NODE number information should
+ * be used. If the NODE is non-zero (remote) then each offset should
+ * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR.
+ * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
+ *
+ * Note that these structures do not provide much info about connectivity.
+ * That info will be part of HWGRAPH, which is an extension of the cfg_t
+ * data structure. (ref IP27prom/cfg.h) It has to be extended to include
+ * the IO part of the Network(TBD).
+ *
+ * The data structures below define the above concepts.
+ */
+
+
+/*
+ * BOARD classes
+ */
+
+#define KLCLASS_MASK 0xf0
+#define KLCLASS_NONE 0x00
+#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
+#define KLCLASS_CPU KLCLASS_NODE
+#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
+ and the non-graphics widget boards */
+#define KLCLASS_ROUTER 0x30 /* Router board */
+#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
+ so that we can record error info */
+#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */
+#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */
+
+#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
+
+
+/*
+ * board types
+ */
+
+#define KLTYPE_MASK 0x0f
+#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
+
+#define KLTYPE_SNIA (KLCLASS_CPU | 0x1)
+#define KLTYPE_TIO (KLCLASS_CPU | 0x2)
+
+#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
+#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
+
+#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
+
+#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
+#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4)
+#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6)
+#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7)
+#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8)
+#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9)
+#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa)
+#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb)
+#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc)
+#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd)
+
+
+/*
+ * board structures
+ */
+
+#define MAX_COMPTS_PER_BRD 24
+
+typedef struct lboard_s {
+ klconf_off_t brd_next_any; /* Next BOARD */
+ unsigned char struct_type; /* type of structure, local or remote */
+ unsigned char brd_type; /* type+class */
+ unsigned char brd_sversion; /* version of this structure */
+ unsigned char brd_brevision; /* board revision */
+ unsigned char brd_promver; /* board prom version, if any */
+ unsigned char brd_flags; /* Enabled, Disabled etc */
+ unsigned char brd_slot; /* slot number */
+ unsigned short brd_debugsw; /* Debug switches */
+ geoid_t brd_geoid; /* geo id */
+ partid_t brd_partition; /* Partition number */
+ unsigned short brd_diagval; /* diagnostic value */
+ unsigned short brd_diagparm; /* diagnostic parameter */
+ unsigned char brd_inventory; /* inventory history */
+ unsigned char brd_numcompts; /* Number of components */
+ nic_t brd_nic; /* Number in CAN */
+ nasid_t brd_nasid; /* passed parameter */
+ klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
+ klconf_off_t brd_errinfo; /* Board's error information */
+ struct lboard_s *brd_parent; /* Logical parent for this brd */
+ char pad0[4];
+ unsigned char brd_confidence; /* confidence that the board is bad */
+ nasid_t brd_owner; /* who owns this board */
+ unsigned char brd_nic_flags; /* To handle 8 more NICs */
+ char pad1[24]; /* future expansion */
+ char brd_name[32];
+ nasid_t brd_next_same_host; /* host of next brd w/same nasid */
+ klconf_off_t brd_next_same; /* Next BOARD with same nasid */
+} lboard_t;
+
+/*
+ * Generic info structure. This stores common info about a
+ * component.
+ */
+
+typedef struct klinfo_s { /* Generic info */
+ unsigned char struct_type; /* type of this structure */
+ unsigned char struct_version; /* version of this structure */
+ unsigned char flags; /* Enabled, disabled etc */
+ unsigned char revision; /* component revision */
+ unsigned short diagval; /* result of diagnostics */
+ unsigned short diagparm; /* diagnostic parameter */
+ unsigned char inventory; /* previous inventory status */
+ unsigned short partid; /* widget part number */
+ nic_t nic; /* MUst be aligned properly */
+ unsigned char physid; /* physical id of component */
+ unsigned int virtid; /* virtual id as seen by system */
+ unsigned char widid; /* Widget id - if applicable */
+ nasid_t nasid; /* node number - from parent */
+ char pad1; /* pad out structure. */
+ char pad2; /* pad out structure. */
+ void *data;
+ klconf_off_t errinfo; /* component specific errors */
+ unsigned short pad3; /* pci fields have moved over to */
+ unsigned short pad4; /* klbri_t */
+} klinfo_t ;
+
+
+static inline lboard_t *find_lboard_next(lboard_t * brd)
+{
+ if (brd && brd->brd_next_any)
+ return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any);
+ return NULL;
+}
+
+#endif /* _ASM_IA64_SN_KLCONFIG_H */
diff --git a/arch/ia64/include/asm/sn/l1.h b/arch/ia64/include/asm/sn/l1.h
new file mode 100644
index 000000000000..344bf44bb356
--- /dev/null
+++ b/arch/ia64/include/asm/sn/l1.h
@@ -0,0 +1,51 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#ifndef _ASM_IA64_SN_L1_H
+#define _ASM_IA64_SN_L1_H
+
+/* brick type response codes */
+#define L1_BRICKTYPE_PX 0x23 /* # */
+#define L1_BRICKTYPE_PE 0x25 /* % */
+#define L1_BRICKTYPE_N_p0 0x26 /* & */
+#define L1_BRICKTYPE_IP45 0x34 /* 4 */
+#define L1_BRICKTYPE_IP41 0x35 /* 5 */
+#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */
+#define L1_BRICKTYPE_IX 0x3d /* = */
+#define L1_BRICKTYPE_IP34 0x61 /* a */
+#define L1_BRICKTYPE_GA 0x62 /* b */
+#define L1_BRICKTYPE_C 0x63 /* c */
+#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */
+#define L1_BRICKTYPE_I 0x69 /* i */
+#define L1_BRICKTYPE_N 0x6e /* n */
+#define L1_BRICKTYPE_OPUS 0x6f /* o */
+#define L1_BRICKTYPE_P 0x70 /* p */
+#define L1_BRICKTYPE_R 0x72 /* r */
+#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
+#define L1_BRICKTYPE_X 0x78 /* x */
+#define L1_BRICKTYPE_X2 0x79 /* y */
+#define L1_BRICKTYPE_SA 0x5e /* ^ */
+#define L1_BRICKTYPE_PA 0x6a /* j */
+#define L1_BRICKTYPE_IA 0x6b /* k */
+#define L1_BRICKTYPE_ATHENA 0x2b /* + */
+#define L1_BRICKTYPE_DAYTONA 0x7a /* z */
+#define L1_BRICKTYPE_1932 0x2c /* . */
+#define L1_BRICKTYPE_191010 0x2e /* , */
+
+/* board type response codes */
+#define L1_BOARDTYPE_IP69 0x0100 /* CA */
+#define L1_BOARDTYPE_IP63 0x0200 /* CB */
+#define L1_BOARDTYPE_BASEIO 0x0300 /* IB */
+#define L1_BOARDTYPE_PCIE2SLOT 0x0400 /* IC */
+#define L1_BOARDTYPE_PCIX3SLOT 0x0500 /* ID */
+#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600 /* IE */
+#define L1_BOARDTYPE_ABACUS 0x0700 /* AB */
+#define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */
+#define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */
+
+#endif /* _ASM_IA64_SN_L1_H */
diff --git a/arch/ia64/include/asm/sn/leds.h b/arch/ia64/include/asm/sn/leds.h
new file mode 100644
index 000000000000..66cf8c4d92c9
--- /dev/null
+++ b/arch/ia64/include/asm/sn/leds.h
@@ -0,0 +1,33 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_LEDS_H
+#define _ASM_IA64_SN_LEDS_H
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/shub_mmr.h>
+
+#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
+#define LED_CPU_SHIFT 16
+
+#define LED_CPU_HEARTBEAT 0x01
+#define LED_CPU_ACTIVITY 0x02
+#define LED_ALWAYS_SET 0x00
+
+/*
+ * Basic macros for flashing the LEDS on an SGI SN.
+ */
+
+static __inline__ void
+set_led_bits(u8 value, u8 mask)
+{
+ pda->led_state = (pda->led_state & ~mask) | (value & mask);
+ *pda->led_address = (short) pda->led_state;
+}
+
+#endif /* _ASM_IA64_SN_LEDS_H */
+
diff --git a/arch/ia64/include/asm/sn/module.h b/arch/ia64/include/asm/sn/module.h
new file mode 100644
index 000000000000..734e980ece2f
--- /dev/null
+++ b/arch/ia64/include/asm/sn/module.h
@@ -0,0 +1,127 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_MODULE_H
+#define _ASM_IA64_SN_MODULE_H
+
+/* parameter for format_module_id() */
+#define MODULE_FORMAT_BRIEF 1
+#define MODULE_FORMAT_LONG 2
+#define MODULE_FORMAT_LCD 3
+
+/*
+ * Module id format
+ *
+ * 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int)
+ * 15-8 Brick type (8-bit ascii character)
+ * 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int)
+ *
+ */
+
+/*
+ * Macros for getting the brick type
+ */
+#define MODULE_BTYPE_MASK 0xff00
+#define MODULE_BTYPE_SHFT 8
+#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
+#define MODULE_BT_TO_CHAR(_b) ((char)(_b))
+#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
+
+/*
+ * Macros for getting the rack ID.
+ */
+#define MODULE_RACK_MASK 0xffff0000
+#define MODULE_RACK_SHFT 16
+#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
+
+/*
+ * Macros for getting the brick position
+ */
+#define MODULE_BPOS_MASK 0x00ff
+#define MODULE_BPOS_SHFT 0
+#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ * class (0==CPU/mixed, 1==I/O), group, number
+ *
+ * Rack number is stored just as it is displayed on the screen:
+ * a 3-decimal-digit number.
+ */
+#define RACK_CLASS_DVDR 100
+#define RACK_GROUP_DVDR 10
+#define RACK_NUM_DVDR 1
+
+#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \
+ (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
+
+#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR)
+#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \
+ RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
+#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \
+ RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \
+ RACK_GROUP_DVDR) / RACK_NUM_DVDR)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ * class 1 bit, 0==CPU/mixed, 1==I/O
+ * group 2 bits for CPU/mixed, 3 bits for I/O
+ * number 3 bits for CPU/mixed, 2 bits for I/O (1 based)
+ */
+#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2)
+#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3)
+
+#define RACK_CLASS_MASK(_r) 0x20
+#define RACK_CLASS_SHFT(_r) 5
+#define RACK_ADD_CLASS(_r, _c) \
+ ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
+
+#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r)
+#define RACK_GROUP_MASK(_r) \
+ ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
+#define RACK_ADD_GROUP(_r, _g) \
+ ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
+
+#define RACK_NUM_SHFT(_r) 0
+#define RACK_NUM_MASK(_r) \
+ ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
+#define RACK_ADD_NUM(_r, _n) \
+ ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
+
+
+/*
+ * Brick type definitions
+ */
+#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */
+
+extern char brick_types[];
+
+#define MODULE_CBRICK 0
+#define MODULE_RBRICK 1
+#define MODULE_IBRICK 2
+#define MODULE_KBRICK 3
+#define MODULE_XBRICK 4
+#define MODULE_DBRICK 5
+#define MODULE_PBRICK 6
+#define MODULE_NBRICK 7
+#define MODULE_PEBRICK 8
+#define MODULE_PXBRICK 9
+#define MODULE_IXBRICK 10
+#define MODULE_CGBRICK 11
+#define MODULE_OPUSBRICK 12
+#define MODULE_SABRICK 13 /* TIO BringUp Brick */
+#define MODULE_IABRICK 14
+#define MODULE_PABRICK 15
+#define MODULE_GABRICK 16
+#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */
+
+extern char brick_types[];
+extern void format_module_id(char *, moduleid_t, int);
+
+#endif /* _ASM_IA64_SN_MODULE_H */
diff --git a/arch/ia64/include/asm/sn/mspec.h b/arch/ia64/include/asm/sn/mspec.h
new file mode 100644
index 000000000000..c1d3c50c3223
--- /dev/null
+++ b/arch/ia64/include/asm/sn/mspec.h
@@ -0,0 +1,59 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_MSPEC_H
+#define _ASM_IA64_SN_MSPEC_H
+
+#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
+
+#define FETCHOP_LOAD 0
+#define FETCHOP_INCREMENT 8
+#define FETCHOP_DECREMENT 16
+#define FETCHOP_CLEAR 24
+
+#define FETCHOP_STORE 0
+#define FETCHOP_AND 24
+#define FETCHOP_OR 32
+
+#define FETCHOP_CLEAR_CACHE 56
+
+#define FETCHOP_LOAD_OP(addr, op) ( \
+ *(volatile long *)((char*) (addr) + (op)))
+
+#define FETCHOP_STORE_OP(addr, op, x) ( \
+ *(volatile long *)((char*) (addr) + (op)) = (long) (x))
+
+#ifdef __KERNEL__
+
+/*
+ * Each Atomic Memory Operation (amo, formerly known as fetchop)
+ * variable is 64 bytes long. The first 8 bytes are used. The
+ * remaining 56 bytes are unaddressable due to the operation taking
+ * that portion of the address.
+ *
+ * NOTE: The amo structure _MUST_ be placed in either the first or second
+ * half of the cache line. The cache line _MUST NOT_ be used for anything
+ * other than additional amo entries. This is because there are two
+ * addresses which reference the same physical cache line. One will
+ * be a cached entry with the memory type bits all set. This address
+ * may be loaded into processor cache. The amo will be referenced
+ * uncached via the memory special memory type. If any portion of the
+ * cached cache-line is modified, when that line is flushed, it will
+ * overwrite the uncached value in physical memory and lead to
+ * inconsistency.
+ */
+struct amo {
+ u64 variable;
+ u64 unused[7];
+};
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_SN_MSPEC_H */
diff --git a/arch/ia64/include/asm/sn/nodepda.h b/arch/ia64/include/asm/sn/nodepda.h
new file mode 100644
index 000000000000..ee118b901de4
--- /dev/null
+++ b/arch/ia64/include/asm/sn/nodepda.h
@@ -0,0 +1,82 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_NODEPDA_H
+#define _ASM_IA64_SN_NODEPDA_H
+
+
+#include <asm/irq.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/bte.h>
+
+/*
+ * NUMA Node-Specific Data structures are defined in this file.
+ * In particular, this is the location of the node PDA.
+ * A pointer to the right node PDA is saved in each CPU PDA.
+ */
+
+/*
+ * Node-specific data structure.
+ *
+ * One of these structures is allocated on each node of a NUMA system.
+ *
+ * This structure provides a convenient way of keeping together
+ * all per-node data structures.
+ */
+struct phys_cpuid {
+ short nasid;
+ char subnode;
+ char slice;
+};
+
+struct nodepda_s {
+ void *pdinfo; /* Platform-dependent per-node info */
+
+ /*
+ * The BTEs on this node are shared by the local cpus
+ */
+ struct bteinfo_s bte_if[MAX_BTES_PER_NODE]; /* Virtual Interface */
+ struct timer_list bte_recovery_timer;
+ spinlock_t bte_recovery_lock;
+
+ /*
+ * Array of pointers to the nodepdas for each node.
+ */
+ struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES];
+
+ /*
+ * Array of physical cpu identifiers. Indexed by cpuid.
+ */
+ struct phys_cpuid phys_cpuid[NR_CPUS];
+ spinlock_t ptc_lock ____cacheline_aligned_in_smp;
+};
+
+typedef struct nodepda_s nodepda_t;
+
+/*
+ * Access Functions for node PDA.
+ * Since there is one nodepda for each node, we need a convenient mechanism
+ * to access these nodepdas without cluttering code with #ifdefs.
+ * The next set of definitions provides this.
+ * Routines are expected to use
+ *
+ * sn_nodepda - to access node PDA for the node on which code is running
+ * NODEPDA(cnodeid) - to access node PDA for cnodeid
+ */
+
+DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
+#define sn_nodepda (__get_cpu_var(__sn_nodepda))
+#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
+
+/*
+ * Check if given a compact node id the corresponding node has all the
+ * cpus disabled.
+ */
+#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0)
+
+#endif /* _ASM_IA64_SN_NODEPDA_H */
diff --git a/arch/ia64/include/asm/sn/pcibr_provider.h b/arch/ia64/include/asm/sn/pcibr_provider.h
new file mode 100644
index 000000000000..da205b7cdaac
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pcibr_provider.h
@@ -0,0 +1,150 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibus_provider_defs.h>
+
+/* Workarounds */
+#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
+
+#define BUSTYPE_MASK 0x1
+
+/* Macros given a pcibus structure */
+#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
+#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
+ asic == PCIIO_ASIC_TYPE_TIOCP)
+#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
+#define IS_TIOCP_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_TIOCP)
+
+
+/*
+ * The different PCI Bridge types supported on the SGI Altix platforms
+ */
+#define PCIBR_BRIDGETYPE_UNKNOWN -1
+#define PCIBR_BRIDGETYPE_PIC 2
+#define PCIBR_BRIDGETYPE_TIOCP 3
+
+/*
+ * Bridge 64bit Direct Map Attributes
+ */
+#define PCI64_ATTR_PREF (1ull << 59)
+#define PCI64_ATTR_PREC (1ull << 58)
+#define PCI64_ATTR_VIRTUAL (1ull << 57)
+#define PCI64_ATTR_BAR (1ull << 56)
+#define PCI64_ATTR_SWAP (1ull << 55)
+#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
+
+#define PCI32_LOCAL_BASE 0
+#define PCI32_MAPPED_BASE 0x40000000
+#define PCI32_DIRECT_BASE 0x80000000
+
+#define IS_PCI32_MAPPED(x) ((u64)(x) < PCI32_DIRECT_BASE && \
+ (u64)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI32_DIRECT(x) ((u64)(x) >= PCI32_MAPPED_BASE)
+
+
+/*
+ * Bridge PMU Address Transaltion Entry Attibutes
+ */
+#define PCI32_ATE_V (0x1 << 0)
+#define PCI32_ATE_CO (0x1 << 1) /* PIC ASIC ONLY */
+#define PCI32_ATE_PIO (0x1 << 1) /* TIOCP ASIC ONLY */
+#define PCI32_ATE_MSI (0x1 << 2)
+#define PCI32_ATE_PREF (0x1 << 3)
+#define PCI32_ATE_BAR (0x1 << 4)
+#define PCI32_ATE_ADDR_SHFT 12
+
+#define MINIMAL_ATES_REQUIRED(addr, size) \
+ (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
+
+#define MINIMAL_ATE_FLAG(addr, size) \
+ (MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0)
+
+/* bit 29 of the pci address is the SWAP bit */
+#define ATE_SWAPSHIFT 29
+#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
+#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
+
+/*
+ * I/O page size
+ */
+#if PAGE_SIZE < 16384
+#define IOPFNSHIFT 12 /* 4K per mapped page */
+#else
+#define IOPFNSHIFT 14 /* 16K per mapped page */
+#endif
+
+#define IOPGSIZE (1 << IOPFNSHIFT)
+#define IOPG(x) ((x) >> IOPFNSHIFT)
+#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
+
+#define PCIBR_DEV_SWAP_DIR (1ull << 19)
+#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
+
+/*
+ * PMU resources.
+ */
+struct ate_resource{
+ u64 *ate;
+ u64 num_ate;
+ u64 lowest_free_index;
+};
+
+struct pcibus_info {
+ struct pcibus_bussoft pbi_buscommon; /* common header */
+ u32 pbi_moduleid;
+ short pbi_bridge_type;
+ short pbi_bridge_mode;
+
+ struct ate_resource pbi_int_ate_resource;
+ u64 pbi_int_ate_size;
+
+ u64 pbi_dir_xbase;
+ char pbi_hub_xid;
+
+ u64 pbi_devreg[8];
+
+ u32 pbi_valid_devices;
+ u32 pbi_enabled_devices;
+
+ spinlock_t pbi_lock;
+};
+
+extern int pcibr_init_provider(void);
+extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
+extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
+extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
+extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
+
+/*
+ * prototypes for the bridge asic register access routines in pcibr_reg.c
+ */
+extern void pcireg_control_bit_clr(struct pcibus_info *, u64);
+extern void pcireg_control_bit_set(struct pcibus_info *, u64);
+extern u64 pcireg_tflush_get(struct pcibus_info *);
+extern u64 pcireg_intr_status_get(struct pcibus_info *);
+extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, u64);
+extern void pcireg_intr_enable_bit_set(struct pcibus_info *, u64);
+extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64);
+extern void pcireg_force_intr_set(struct pcibus_info *, int);
+extern u64 pcireg_wrb_flush_get(struct pcibus_info *, int);
+extern void pcireg_int_ate_set(struct pcibus_info *, int, u64);
+extern u64 __iomem * pcireg_int_ate_addr(struct pcibus_info *, int);
+extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
+extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
+extern int pcibr_ate_alloc(struct pcibus_info *, int);
+extern void pcibr_ate_free(struct pcibus_info *, int);
+extern void ate_write(struct pcibus_info *, int, int, u64);
+extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
+ void *resp, char **ssdt);
+extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
+ int action, void *resp);
+extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
+#endif
diff --git a/arch/ia64/include/asm/sn/pcibus_provider_defs.h b/arch/ia64/include/asm/sn/pcibus_provider_defs.h
new file mode 100644
index 000000000000..8f7c83d0f6d3
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pcibus_provider_defs.h
@@ -0,0 +1,68 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+
+/*
+ * SN pci asic types. Do not ever renumber these or reuse values. The
+ * values must agree with what prom thinks they are.
+ */
+
+#define PCIIO_ASIC_TYPE_UNKNOWN 0
+#define PCIIO_ASIC_TYPE_PPB 1
+#define PCIIO_ASIC_TYPE_PIC 2
+#define PCIIO_ASIC_TYPE_TIOCP 3
+#define PCIIO_ASIC_TYPE_TIOCA 4
+#define PCIIO_ASIC_TYPE_TIOCE 5
+
+#define PCIIO_ASIC_MAX_TYPES 6
+
+/*
+ * Common pciio bus provider data. There should be one of these as the
+ * first field in any pciio based provider soft structure (e.g. pcibr_soft
+ * tioca_soft, etc).
+ */
+
+struct pcibus_bussoft {
+ u32 bs_asic_type; /* chipset type */
+ u32 bs_xid; /* xwidget id */
+ u32 bs_persist_busnum; /* Persistent Bus Number */
+ u32 bs_persist_segment; /* Segment Number */
+ u64 bs_legacy_io; /* legacy io pio addr */
+ u64 bs_legacy_mem; /* legacy mem pio addr */
+ u64 bs_base; /* widget base */
+ struct xwidget_info *bs_xwidget_info;
+};
+
+struct pci_controller;
+/*
+ * SN pci bus indirection
+ */
+
+struct sn_pcibus_provider {
+ dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
+ dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
+ void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
+ void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
+ void (*force_interrupt)(struct sn_irq_info *);
+ void (*target_interrupt)(struct sn_irq_info *);
+};
+
+/*
+ * Flags used by the map interfaces
+ * bits 3:0 specifies format of passed in address
+ * bit 4 specifies that address is to be used for MSI
+ */
+
+#define SN_DMA_ADDRTYPE(x) ((x) & 0xf)
+#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */
+#define SN_DMA_ADDR_XIO 2 /* address is phys memory */
+#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */
+
+extern struct sn_pcibus_provider *sn_pci_provider[];
+#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/arch/ia64/include/asm/sn/pcidev.h b/arch/ia64/include/asm/sn/pcidev.h
new file mode 100644
index 000000000000..1c2382cea807
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pcidev.h
@@ -0,0 +1,85 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
+#define _ASM_IA64_SN_PCI_PCIDEV_H
+
+#include <linux/pci.h>
+
+/*
+ * In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
+ * the pcidev_info structs for all devices under a controller, we keep a
+ * list of pcidev_info under pci_controller->platform_data.
+ */
+struct sn_platform_data {
+ void *provider_soft;
+ struct list_head pcidev_info;
+};
+
+#define SN_PLATFORM_DATA(busdev) \
+ ((struct sn_platform_data *)(PCI_CONTROLLER(busdev)->platform_data))
+
+#define SN_PCIDEV_INFO(dev) sn_pcidev_info_get(dev)
+
+/*
+ * Given a pci_bus, return the sn pcibus_bussoft struct. Note that
+ * this only works for root busses, not for busses represented by PPB's.
+ */
+
+#define SN_PCIBUS_BUSSOFT(pci_bus) \
+ ((struct pcibus_bussoft *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
+
+#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
+ ((struct pcibus_info *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
+/*
+ * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
+ * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
+ * due to possible PPB's in the path.
+ */
+
+#define SN_PCIDEV_BUSSOFT(pci_dev) \
+ (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
+
+#define SN_PCIDEV_BUSPROVIDER(pci_dev) \
+ (SN_PCIDEV_INFO(pci_dev)->pdi_provider)
+
+#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
+#define PCIIO_SLOT_NONE 255
+#define PCIIO_FUNC_NONE 255
+#define PCIIO_VENDOR_ID_NONE (-1)
+
+struct pcidev_info {
+ u64 pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
+ u64 pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
+
+ struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
+ struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
+ struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
+
+ struct sn_irq_info *pdi_sn_irq_info;
+ struct sn_pcibus_provider *pdi_provider; /* sn pci ops */
+ struct pci_dev *host_pci_dev; /* host bus link */
+ struct list_head pdi_list; /* List of pcidev_info */
+};
+
+extern void sn_irq_fixup(struct pci_dev *pci_dev,
+ struct sn_irq_info *sn_irq_info);
+extern void sn_irq_unfixup(struct pci_dev *pci_dev);
+extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
+extern void sn_bus_fixup(struct pci_bus *);
+extern void sn_acpi_bus_fixup(struct pci_bus *);
+extern void sn_common_bus_fixup(struct pci_bus *, struct pcibus_bussoft *);
+extern void sn_bus_store_sysdata(struct pci_dev *dev);
+extern void sn_bus_free_sysdata(void);
+extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
+extern void sn_io_slot_fixup(struct pci_dev *);
+extern void sn_acpi_slot_fixup(struct pci_dev *);
+extern void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *,
+ struct sn_irq_info *);
+extern void sn_pci_unfixup_slot(struct pci_dev *dev);
+extern void sn_irq_lh_init(void);
+#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/arch/ia64/include/asm/sn/pda.h b/arch/ia64/include/asm/sn/pda.h
new file mode 100644
index 000000000000..1c5108d44d8b
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pda.h
@@ -0,0 +1,69 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PDA_H
+#define _ASM_IA64_SN_PDA_H
+
+#include <linux/cache.h>
+#include <asm/percpu.h>
+#include <asm/system.h>
+
+
+/*
+ * CPU-specific data structure.
+ *
+ * One of these structures is allocated for each cpu of a NUMA system.
+ *
+ * This structure provides a convenient way of keeping together
+ * all SN per-cpu data structures.
+ */
+
+typedef struct pda_s {
+
+ /*
+ * Support for SN LEDs
+ */
+ volatile short *led_address;
+ u8 led_state;
+ u8 hb_state; /* supports blinking heartbeat leds */
+ unsigned int hb_count;
+
+ unsigned int idle_flag;
+
+ volatile unsigned long *bedrock_rev_id;
+ volatile unsigned long *pio_write_status_addr;
+ unsigned long pio_write_status_val;
+ volatile unsigned long *pio_shub_war_cam_addr;
+
+ unsigned long sn_in_service_ivecs[4];
+ int sn_lb_int_war_ticks;
+ int sn_last_irq;
+ int sn_first_irq;
+} pda_t;
+
+
+#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
+
+/*
+ * PDA
+ * Per-cpu private data area for each cpu. The PDA is located immediately after
+ * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
+ * cpu but only a small amout of the page is actually used. We put the SNIA PDA
+ * in the same page as the cpu_data area. Note that there is a check in the setup
+ * code to verify that we don't overflow the page.
+ *
+ * Seems like we should should cache-line align the pda so that any changes in the
+ * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
+ * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
+ */
+DECLARE_PER_CPU(struct pda_s, pda_percpu);
+
+#define pda (&__ia64_per_cpu_var(pda_percpu))
+
+#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
+
+#endif /* _ASM_IA64_SN_PDA_H */
diff --git a/arch/ia64/include/asm/sn/pic.h b/arch/ia64/include/asm/sn/pic.h
new file mode 100644
index 000000000000..5f9da5fd6e56
--- /dev/null
+++ b/arch/ia64/include/asm/sn/pic.h
@@ -0,0 +1,261 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PIC_H
+#define _ASM_IA64_SN_PCI_PIC_H
+
+/*
+ * PIC AS DEVICE ZERO
+ * ------------------
+ *
+ * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
+ * be designated as 'device 0'. That is a departure from earlier SGI
+ * PCI bridges. Because of that we use config space 1 to access the
+ * config space of the first actual PCI device on the bus.
+ * Here's what the PIC manual says:
+ *
+ * The current PCI-X bus specification now defines that the parent
+ * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
+ * reduced the total number of devices from 8 to 4 and removed the
+ * device registers and windows, now only supporting devices 0,1,2, and
+ * 3. PIC did leave all 8 configuration space windows. The reason was
+ * there was nothing to gain by removing them. Here in lies the problem.
+ * The device numbering we do using 0 through 3 is unrelated to the device
+ * numbering which PCI-X requires in configuration space. In the past we
+ * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
+ * PCI-X requires we start a 1, not 0 and currently the PX brick
+ * does associate our:
+ *
+ * device 0 with configuration space window 1,
+ * device 1 with configuration space window 2,
+ * device 2 with configuration space window 3,
+ * device 3 with configuration space window 4.
+ *
+ * The net effect is that all config space access are off-by-one with
+ * relation to other per-slot accesses on the PIC.
+ * Here is a table that shows some of that:
+ *
+ * Internal Slot#
+ * |
+ * | 0 1 2 3
+ * ----------|---------------------------------------
+ * config | 0x21000 0x22000 0x23000 0x24000
+ * |
+ * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
+ * |
+ * odd rrb | n/a 0[1] n/a 1[1]
+ * |
+ * int dev | 00 01 10 11
+ * |
+ * ext slot# | 1 2 3 4
+ * ----------|---------------------------------------
+ */
+
+#define PIC_ATE_TARGETID_SHFT 8
+#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL
+#define PIC_PCI64_ATTR_TARG_SHFT 60
+
+
+/*****************************************************************************
+ *********************** PIC MMR structure mapping ***************************
+ *****************************************************************************/
+
+/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
+ * of a 64-bit register. When writing PIC registers, always write the
+ * entire 64 bits.
+ */
+
+struct pic {
+
+ /* 0x000000-0x00FFFF -- Local Registers */
+
+ /* 0x000000-0x000057 -- Standard Widget Configuration */
+ u64 p_wid_id; /* 0x000000 */
+ u64 p_wid_stat; /* 0x000008 */
+ u64 p_wid_err_upper; /* 0x000010 */
+ u64 p_wid_err_lower; /* 0x000018 */
+ #define p_wid_err p_wid_err_lower
+ u64 p_wid_control; /* 0x000020 */
+ u64 p_wid_req_timeout; /* 0x000028 */
+ u64 p_wid_int_upper; /* 0x000030 */
+ u64 p_wid_int_lower; /* 0x000038 */
+ #define p_wid_int p_wid_int_lower
+ u64 p_wid_err_cmdword; /* 0x000040 */
+ u64 p_wid_llp; /* 0x000048 */
+ u64 p_wid_tflush; /* 0x000050 */
+
+ /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
+ u64 p_wid_aux_err; /* 0x000058 */
+ u64 p_wid_resp_upper; /* 0x000060 */
+ u64 p_wid_resp_lower; /* 0x000068 */
+ #define p_wid_resp p_wid_resp_lower
+ u64 p_wid_tst_pin_ctrl; /* 0x000070 */
+ u64 p_wid_addr_lkerr; /* 0x000078 */
+
+ /* 0x000080-0x00008F -- PMU & MAP */
+ u64 p_dir_map; /* 0x000080 */
+ u64 _pad_000088; /* 0x000088 */
+
+ /* 0x000090-0x00009F -- SSRAM */
+ u64 p_map_fault; /* 0x000090 */
+ u64 _pad_000098; /* 0x000098 */
+
+ /* 0x0000A0-0x0000AF -- Arbitration */
+ u64 p_arb; /* 0x0000A0 */
+ u64 _pad_0000A8; /* 0x0000A8 */
+
+ /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+ u64 p_ate_parity_err; /* 0x0000B0 */
+ u64 _pad_0000B8; /* 0x0000B8 */
+
+ /* 0x0000C0-0x0000FF -- PCI/GIO */
+ u64 p_bus_timeout; /* 0x0000C0 */
+ u64 p_pci_cfg; /* 0x0000C8 */
+ u64 p_pci_err_upper; /* 0x0000D0 */
+ u64 p_pci_err_lower; /* 0x0000D8 */
+ #define p_pci_err p_pci_err_lower
+ u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
+
+ /* 0x000100-0x0001FF -- Interrupt */
+ u64 p_int_status; /* 0x000100 */
+ u64 p_int_enable; /* 0x000108 */
+ u64 p_int_rst_stat; /* 0x000110 */
+ u64 p_int_mode; /* 0x000118 */
+ u64 p_int_device; /* 0x000120 */
+ u64 p_int_host_err; /* 0x000128 */
+ u64 p_int_addr[8]; /* 0x0001{30,,,68} */
+ u64 p_err_int_view; /* 0x000170 */
+ u64 p_mult_int; /* 0x000178 */
+ u64 p_force_always[8]; /* 0x0001{80,,,B8} */
+ u64 p_force_pin[8]; /* 0x0001{C0,,,F8} */
+
+ /* 0x000200-0x000298 -- Device */
+ u64 p_device[4]; /* 0x0002{00,,,18} */
+ u64 _pad_000220[4]; /* 0x0002{20,,,38} */
+ u64 p_wr_req_buf[4]; /* 0x0002{40,,,58} */
+ u64 _pad_000260[4]; /* 0x0002{60,,,78} */
+ u64 p_rrb_map[2]; /* 0x0002{80,,,88} */
+ #define p_even_resp p_rrb_map[0] /* 0x000280 */
+ #define p_odd_resp p_rrb_map[1] /* 0x000288 */
+ u64 p_resp_status; /* 0x000290 */
+ u64 p_resp_clear; /* 0x000298 */
+
+ u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
+
+ /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+ struct {
+ u64 upper; /* 0x0003{00,,,F0} */
+ u64 lower; /* 0x0003{08,,,F8} */
+ } p_buf_addr_match[16];
+
+ /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+ struct {
+ u64 flush_w_touch; /* 0x000{400,,,5C0} */
+ u64 flush_wo_touch; /* 0x000{408,,,5C8} */
+ u64 inflight; /* 0x000{410,,,5D0} */
+ u64 prefetch; /* 0x000{418,,,5D8} */
+ u64 total_pci_retry; /* 0x000{420,,,5E0} */
+ u64 max_pci_retry; /* 0x000{428,,,5E8} */
+ u64 max_latency; /* 0x000{430,,,5F0} */
+ u64 clear_all; /* 0x000{438,,,5F8} */
+ } p_buf_count[8];
+
+
+ /* 0x000600-0x0009FF -- PCI/X registers */
+ u64 p_pcix_bus_err_addr; /* 0x000600 */
+ u64 p_pcix_bus_err_attr; /* 0x000608 */
+ u64 p_pcix_bus_err_data; /* 0x000610 */
+ u64 p_pcix_pio_split_addr; /* 0x000618 */
+ u64 p_pcix_pio_split_attr; /* 0x000620 */
+ u64 p_pcix_dma_req_err_attr; /* 0x000628 */
+ u64 p_pcix_dma_req_err_addr; /* 0x000630 */
+ u64 p_pcix_timeout; /* 0x000638 */
+
+ u64 _pad_000640[120]; /* 0x000{640,,,9F8} */
+
+ /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+ struct {
+ u64 p_buf_addr; /* 0x000{A00,,,AF0} */
+ u64 p_buf_attr; /* 0X000{A08,,,AF8} */
+ } p_pcix_read_buf_64[16];
+
+ struct {
+ u64 p_buf_addr; /* 0x000{B00,,,BE0} */
+ u64 p_buf_attr; /* 0x000{B08,,,BE8} */
+ u64 p_buf_valid; /* 0x000{B10,,,BF0} */
+ u64 __pad1; /* 0x000{B18,,,BF8} */
+ } p_pcix_write_buf_64[8];
+
+ /* End of Local Registers -- Start of Address Map space */
+
+ char _pad_000c00[0x010000 - 0x000c00];
+
+ /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
+ u64 p_int_ate_ram[1024]; /* 0x010000-0x011fff */
+
+ /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
+ u64 p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
+
+ char _pad_014000[0x18000 - 0x014000];
+
+ /* 0x18000-0x197F8 -- PIC Write Request Ram */
+ u64 p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
+ u64 p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
+ u64 p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
+
+ char _pad_019800[0x20000 - 0x019800];
+
+ /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
+ union {
+ u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
+ u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
+ u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
+ u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
+
+ /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+ union {
+ u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
+ u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
+ u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
+ u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } p_type1_cfg; /* 0x028000-0x029000 */
+
+ char _pad_029000[0x030000-0x029000];
+
+ /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } p_pci_iack; /* 0x030000-0x030007 */
+
+ char _pad_030007[0x040000-0x030008];
+
+ /* 0x040000-0x030007 -- PCIX Special Cycle */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } p_pcix_cycle; /* 0x040000-0x040007 */
+};
+
+#endif /* _ASM_IA64_SN_PCI_PIC_H */
diff --git a/arch/ia64/include/asm/sn/rw_mmr.h b/arch/ia64/include/asm/sn/rw_mmr.h
new file mode 100644
index 000000000000..2d78f4c5a45e
--- /dev/null
+++ b/arch/ia64/include/asm/sn/rw_mmr.h
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_RW_MMR_H
+#define _ASM_IA64_SN_RW_MMR_H
+
+
+/*
+ * This file that access MMRs via uncached physical addresses.
+ * pio_phys_read_mmr - read an MMR
+ * pio_phys_write_mmr - write an MMR
+ * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
+ * Second MMR will be skipped if address is NULL
+ *
+ * Addresses passed to these routines should be uncached physical addresses
+ * ie., 0x80000....
+ */
+
+
+extern long pio_phys_read_mmr(volatile long *mmr);
+extern void pio_phys_write_mmr(volatile long *mmr, long val);
+extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
+
+#endif /* _ASM_IA64_SN_RW_MMR_H */
diff --git a/arch/ia64/include/asm/sn/shub_mmr.h b/arch/ia64/include/asm/sn/shub_mmr.h
new file mode 100644
index 000000000000..7de1d1d4b71a
--- /dev/null
+++ b/arch/ia64/include/asm/sn/shub_mmr.h
@@ -0,0 +1,502 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SHUB_MMR_H
+#define _ASM_IA64_SN_SHUB_MMR_H
+
+/* ==================================================================== */
+/* Register "SH_IPI_INT" */
+/* SHub Inter-Processor Interrupt Registers */
+/* ==================================================================== */
+#define SH1_IPI_INT __IA64_UL_CONST(0x0000000110000380)
+#define SH2_IPI_INT __IA64_UL_CONST(0x0000000010000380)
+
+/* SH_IPI_INT_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_IPI_INT_TYPE_SHFT 0
+#define SH_IPI_INT_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
+
+/* SH_IPI_INT_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_IPI_INT_AGT_SHFT 3
+#define SH_IPI_INT_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
+
+/* SH_IPI_INT_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_IPI_INT_PID_SHFT 4
+#define SH_IPI_INT_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
+
+/* SH_IPI_INT_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_IPI_INT_BASE_SHFT 21
+#define SH_IPI_INT_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
+
+/* SH_IPI_INT_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_IPI_INT_IDX_SHFT 52
+#define SH_IPI_INT_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
+
+/* SH_IPI_INT_SEND */
+/* Description: Send Interrupt Message to PI, This generates a puls */
+#define SH_IPI_INT_SEND_SHFT 63
+#define SH_IPI_INT_SEND_MASK __IA64_UL_CONST(0x8000000000000000)
+
+/* ==================================================================== */
+/* Register "SH_EVENT_OCCURRED" */
+/* SHub Interrupt Event Occurred */
+/* ==================================================================== */
+#define SH1_EVENT_OCCURRED __IA64_UL_CONST(0x0000000110010000)
+#define SH1_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000110010008)
+#define SH2_EVENT_OCCURRED __IA64_UL_CONST(0x0000000010010000)
+#define SH2_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000010010008)
+
+/* ==================================================================== */
+/* Register "SH_PI_CAM_CONTROL" */
+/* CRB CAM MMR Access Control */
+/* ==================================================================== */
+#define SH1_PI_CAM_CONTROL __IA64_UL_CONST(0x0000000120050300)
+
+/* ==================================================================== */
+/* Register "SH_SHUB_ID" */
+/* SHub ID Number */
+/* ==================================================================== */
+#define SH1_SHUB_ID __IA64_UL_CONST(0x0000000110060580)
+#define SH1_SHUB_ID_REVISION_SHFT 28
+#define SH1_SHUB_ID_REVISION_MASK __IA64_UL_CONST(0x00000000f0000000)
+
+/* ==================================================================== */
+/* Register "SH_RTC" */
+/* Real-time Clock */
+/* ==================================================================== */
+#define SH1_RTC __IA64_UL_CONST(0x00000001101c0000)
+#define SH2_RTC __IA64_UL_CONST(0x00000002101c0000)
+#define SH_RTC_MASK __IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/* Register "SH_PIO_WRITE_STATUS_0|1" */
+/* PIO Write Status for CPU 0 & 1 */
+/* ==================================================================== */
+#define SH1_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000120070200)
+#define SH1_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000120070280)
+#define SH2_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000020070200)
+#define SH2_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000020070280)
+#define SH2_PIO_WRITE_STATUS_2 __IA64_UL_CONST(0x0000000020070300)
+#define SH2_PIO_WRITE_STATUS_3 __IA64_UL_CONST(0x0000000020070380)
+
+/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */
+/* Description: Deadlock response detected */
+#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1
+#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK \
+ __IA64_UL_CONST(0x0000000000000002)
+
+/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */
+/* Description: Count of currently pending PIO writes */
+#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56
+#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK \
+ __IA64_UL_CONST(0x3f00000000000000)
+
+/* ==================================================================== */
+/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */
+/* ==================================================================== */
+#define SH1_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000120070208)
+#define SH2_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000020070208)
+
+/* ==================================================================== */
+/* Register "SH_EVENT_OCCURRED" */
+/* SHub Interrupt Event Occurred */
+/* ==================================================================== */
+/* SH_EVENT_OCCURRED_UART_INT */
+/* Description: Pending Junk Bus UART Interrupt */
+#define SH_EVENT_OCCURRED_UART_INT_SHFT 20
+#define SH_EVENT_OCCURRED_UART_INT_MASK __IA64_UL_CONST(0x0000000000100000)
+
+/* SH_EVENT_OCCURRED_IPI_INT */
+/* Description: Pending IPI Interrupt */
+#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28
+#define SH_EVENT_OCCURRED_IPI_INT_MASK __IA64_UL_CONST(0x0000000010000000)
+
+/* SH_EVENT_OCCURRED_II_INT0 */
+/* Description: Pending II 0 Interrupt */
+#define SH_EVENT_OCCURRED_II_INT0_SHFT 29
+#define SH_EVENT_OCCURRED_II_INT0_MASK __IA64_UL_CONST(0x0000000020000000)
+
+/* SH_EVENT_OCCURRED_II_INT1 */
+/* Description: Pending II 1 Interrupt */
+#define SH_EVENT_OCCURRED_II_INT1_SHFT 30
+#define SH_EVENT_OCCURRED_II_INT1_MASK __IA64_UL_CONST(0x0000000040000000)
+
+/* SH2_EVENT_OCCURRED_EXTIO_INT2 */
+/* Description: Pending SHUB 2 EXT IO INT2 */
+#define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT 33
+#define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK __IA64_UL_CONST(0x0000000200000000)
+
+/* SH2_EVENT_OCCURRED_EXTIO_INT3 */
+/* Description: Pending SHUB 2 EXT IO INT3 */
+#define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT 34
+#define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK __IA64_UL_CONST(0x0000000400000000)
+
+#define SH_ALL_INT_MASK \
+ (SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \
+ SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \
+ SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \
+ SH2_EVENT_OCCURRED_EXTIO_INT3_MASK)
+
+
+/* ==================================================================== */
+/* LEDS */
+/* ==================================================================== */
+#define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL
+#define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL
+#define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL
+#define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL
+
+#define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL
+#define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL
+#define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL
+#define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL
+
+/* ==================================================================== */
+/* Register "SH1_PTC_0" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH1_PTC_0 __IA64_UL_CONST(0x00000001101a0000)
+
+/* SH1_PTC_0_A */
+/* Description: Type */
+#define SH1_PTC_0_A_SHFT 0
+
+/* SH1_PTC_0_PS */
+/* Description: Page Size */
+#define SH1_PTC_0_PS_SHFT 2
+
+/* SH1_PTC_0_RID */
+/* Description: Region ID */
+#define SH1_PTC_0_RID_SHFT 8
+
+/* SH1_PTC_0_START */
+/* Description: Start */
+#define SH1_PTC_0_START_SHFT 63
+
+/* ==================================================================== */
+/* Register "SH1_PTC_1" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH1_PTC_1 __IA64_UL_CONST(0x00000001101a0080)
+
+/* SH1_PTC_1_START */
+/* Description: PTC_1 Start */
+#define SH1_PTC_1_START_SHFT 63
+
+/* ==================================================================== */
+/* Register "SH2_PTC" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH2_PTC __IA64_UL_CONST(0x0000000170000000)
+
+/* SH2_PTC_A */
+/* Description: Type */
+#define SH2_PTC_A_SHFT 0
+
+/* SH2_PTC_PS */
+/* Description: Page Size */
+#define SH2_PTC_PS_SHFT 2
+
+/* SH2_PTC_RID */
+/* Description: Region ID */
+#define SH2_PTC_RID_SHFT 4
+
+/* SH2_PTC_START */
+/* Description: Start */
+#define SH2_PTC_START_SHFT 63
+
+/* SH2_PTC_ADDR_RID */
+/* Description: Region ID */
+#define SH2_PTC_ADDR_SHFT 4
+#define SH2_PTC_ADDR_MASK __IA64_UL_CONST(0x1ffffffffffff000)
+
+/* ==================================================================== */
+/* Register "SH_RTC1_INT_CONFIG" */
+/* SHub RTC 1 Interrupt Config Registers */
+/* ==================================================================== */
+
+#define SH1_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000110001480)
+#define SH2_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000010001480)
+#define SH_RTC1_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC1_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC1_INT_CONFIG_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_RTC1_INT_CONFIG_TYPE_SHFT 0
+#define SH_RTC1_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
+
+/* SH_RTC1_INT_CONFIG_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_RTC1_INT_CONFIG_AGT_SHFT 3
+#define SH_RTC1_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
+
+/* SH_RTC1_INT_CONFIG_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_RTC1_INT_CONFIG_PID_SHFT 4
+#define SH_RTC1_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
+
+/* SH_RTC1_INT_CONFIG_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_RTC1_INT_CONFIG_BASE_SHFT 21
+#define SH_RTC1_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
+
+/* SH_RTC1_INT_CONFIG_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_RTC1_INT_CONFIG_IDX_SHFT 52
+#define SH_RTC1_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/* Register "SH_RTC1_INT_ENABLE" */
+/* SHub RTC 1 Interrupt Enable Registers */
+/* ==================================================================== */
+
+#define SH1_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000110001500)
+#define SH2_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000010001500)
+#define SH_RTC1_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC1_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC1_INT_ENABLE_RTC1_ENABLE */
+/* Description: Enable RTC 1 Interrupt */
+#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0
+#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK \
+ __IA64_UL_CONST(0x0000000000000001)
+
+/* ==================================================================== */
+/* Register "SH_RTC2_INT_CONFIG" */
+/* SHub RTC 2 Interrupt Config Registers */
+/* ==================================================================== */
+
+#define SH1_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000110001580)
+#define SH2_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000010001580)
+#define SH_RTC2_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC2_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC2_INT_CONFIG_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_RTC2_INT_CONFIG_TYPE_SHFT 0
+#define SH_RTC2_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
+
+/* SH_RTC2_INT_CONFIG_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_RTC2_INT_CONFIG_AGT_SHFT 3
+#define SH_RTC2_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
+
+/* SH_RTC2_INT_CONFIG_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_RTC2_INT_CONFIG_PID_SHFT 4
+#define SH_RTC2_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
+
+/* SH_RTC2_INT_CONFIG_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_RTC2_INT_CONFIG_BASE_SHFT 21
+#define SH_RTC2_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
+
+/* SH_RTC2_INT_CONFIG_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_RTC2_INT_CONFIG_IDX_SHFT 52
+#define SH_RTC2_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/* Register "SH_RTC2_INT_ENABLE" */
+/* SHub RTC 2 Interrupt Enable Registers */
+/* ==================================================================== */
+
+#define SH1_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000110001600)
+#define SH2_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000010001600)
+#define SH_RTC2_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC2_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC2_INT_ENABLE_RTC2_ENABLE */
+/* Description: Enable RTC 2 Interrupt */
+#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0
+#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK \
+ __IA64_UL_CONST(0x0000000000000001)
+
+/* ==================================================================== */
+/* Register "SH_RTC3_INT_CONFIG" */
+/* SHub RTC 3 Interrupt Config Registers */
+/* ==================================================================== */
+
+#define SH1_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000110001680)
+#define SH2_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000010001680)
+#define SH_RTC3_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC3_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC3_INT_CONFIG_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_RTC3_INT_CONFIG_TYPE_SHFT 0
+#define SH_RTC3_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
+
+/* SH_RTC3_INT_CONFIG_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_RTC3_INT_CONFIG_AGT_SHFT 3
+#define SH_RTC3_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
+
+/* SH_RTC3_INT_CONFIG_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_RTC3_INT_CONFIG_PID_SHFT 4
+#define SH_RTC3_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
+
+/* SH_RTC3_INT_CONFIG_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_RTC3_INT_CONFIG_BASE_SHFT 21
+#define SH_RTC3_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
+
+/* SH_RTC3_INT_CONFIG_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_RTC3_INT_CONFIG_IDX_SHFT 52
+#define SH_RTC3_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/* Register "SH_RTC3_INT_ENABLE" */
+/* SHub RTC 3 Interrupt Enable Registers */
+/* ==================================================================== */
+
+#define SH1_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000110001700)
+#define SH2_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000010001700)
+#define SH_RTC3_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC3_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC3_INT_ENABLE_RTC3_ENABLE */
+/* Description: Enable RTC 3 Interrupt */
+#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0
+#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK \
+ __IA64_UL_CONST(0x0000000000000001)
+
+/* SH_EVENT_OCCURRED_RTC1_INT */
+/* Description: Pending RTC 1 Interrupt */
+#define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24
+#define SH_EVENT_OCCURRED_RTC1_INT_MASK __IA64_UL_CONST(0x0000000001000000)
+
+/* SH_EVENT_OCCURRED_RTC2_INT */
+/* Description: Pending RTC 2 Interrupt */
+#define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25
+#define SH_EVENT_OCCURRED_RTC2_INT_MASK __IA64_UL_CONST(0x0000000002000000)
+
+/* SH_EVENT_OCCURRED_RTC3_INT */
+/* Description: Pending RTC 3 Interrupt */
+#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26
+#define SH_EVENT_OCCURRED_RTC3_INT_MASK __IA64_UL_CONST(0x0000000004000000)
+
+/* ==================================================================== */
+/* Register "SH_IPI_ACCESS" */
+/* CPU interrupt Access Permission Bits */
+/* ==================================================================== */
+
+#define SH1_IPI_ACCESS __IA64_UL_CONST(0x0000000110060480)
+#define SH2_IPI_ACCESS0 __IA64_UL_CONST(0x0000000010060c00)
+#define SH2_IPI_ACCESS1 __IA64_UL_CONST(0x0000000010060c80)
+#define SH2_IPI_ACCESS2 __IA64_UL_CONST(0x0000000010060d00)
+#define SH2_IPI_ACCESS3 __IA64_UL_CONST(0x0000000010060d80)
+
+/* ==================================================================== */
+/* Register "SH_INT_CMPB" */
+/* RTC Compare Value for Processor B */
+/* ==================================================================== */
+
+#define SH1_INT_CMPB __IA64_UL_CONST(0x00000001101b0080)
+#define SH2_INT_CMPB __IA64_UL_CONST(0x00000000101b0080)
+#define SH_INT_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPB_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_INT_CMPB_REAL_TIME_CMPB */
+/* Description: Real Time Clock Compare */
+#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
+#define SH_INT_CMPB_REAL_TIME_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/* Register "SH_INT_CMPC" */
+/* RTC Compare Value for Processor C */
+/* ==================================================================== */
+
+#define SH1_INT_CMPC __IA64_UL_CONST(0x00000001101b0100)
+#define SH2_INT_CMPC __IA64_UL_CONST(0x00000000101b0100)
+#define SH_INT_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPC_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_INT_CMPC_REAL_TIME_CMPC */
+/* Description: Real Time Clock Compare */
+#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define SH_INT_CMPC_REAL_TIME_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/* Register "SH_INT_CMPD" */
+/* RTC Compare Value for Processor D */
+/* ==================================================================== */
+
+#define SH1_INT_CMPD __IA64_UL_CONST(0x00000001101b0180)
+#define SH2_INT_CMPD __IA64_UL_CONST(0x00000000101b0180)
+#define SH_INT_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPD_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_INT_CMPD_REAL_TIME_CMPD */
+/* Description: Real Time Clock Compare */
+#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define SH_INT_CMPD_REAL_TIME_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */
+/* privilege vector for acc=0 */
+/* ==================================================================== */
+#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100030300)
+
+/* ==================================================================== */
+/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */
+/* privilege vector for acc=0 */
+/* ==================================================================== */
+#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100050300)
+
+/* ==================================================================== */
+/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
+/* and SHUB2 that it makes sense to define a geberic name for the MMR. */
+/* It is acceptible to use (for example) SH_IPI_INT to reference the */
+/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */
+/* on the type of the SHUB. Do not use these #defines in performance */
+/* critical code or loops - there is a small performance penalty. */
+/* ==================================================================== */
+#define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b)
+
+#define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0)
+#define SH_IPI_INT shubmmr(SH, IPI_INT)
+#define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED)
+#define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS)
+#define SH_RTC shubmmr(SH, RTC)
+#define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG)
+#define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE)
+#define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG)
+#define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE)
+#define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG)
+#define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE)
+#define SH_INT_CMPB shubmmr(SH, INT_CMPB)
+#define SH_INT_CMPC shubmmr(SH, INT_CMPC)
+#define SH_INT_CMPD shubmmr(SH, INT_CMPD)
+
+/* ========================================================================== */
+/* Register "SH2_BT_ENG_CSR_0" */
+/* Engine 0 Control and Status Register */
+/* ========================================================================== */
+
+#define SH2_BT_ENG_CSR_0 __IA64_UL_CONST(0x0000000030040000)
+#define SH2_BT_ENG_SRC_ADDR_0 __IA64_UL_CONST(0x0000000030040080)
+#define SH2_BT_ENG_DEST_ADDR_0 __IA64_UL_CONST(0x0000000030040100)
+#define SH2_BT_ENG_NOTIF_ADDR_0 __IA64_UL_CONST(0x0000000030040180)
+
+/* ========================================================================== */
+/* BTE interfaces 1-3 */
+/* ========================================================================== */
+
+#define SH2_BT_ENG_CSR_1 __IA64_UL_CONST(0x0000000030050000)
+#define SH2_BT_ENG_CSR_2 __IA64_UL_CONST(0x0000000030060000)
+#define SH2_BT_ENG_CSR_3 __IA64_UL_CONST(0x0000000030070000)
+
+#endif /* _ASM_IA64_SN_SHUB_MMR_H */
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
new file mode 100644
index 000000000000..22a6f18a5313
--- /dev/null
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -0,0 +1,3358 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SHUBIO_H
+#define _ASM_IA64_SN_SHUBIO_H
+
+#define HUB_WIDGET_ID_MAX 0xf
+#define IIO_NUM_ITTES 7
+#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
+
+#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */
+ /* This register is also accessible from
+ * Crosstalk at address 0x0. */
+#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */
+#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */
+#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */
+#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */
+#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */
+#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */
+#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */
+#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */
+#define IIO_ILLR 0x00400130 /* IO LLP Log Register */
+#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */
+
+#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */
+#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */
+
+#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */
+#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */
+
+#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */
+#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */
+#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */
+#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */
+#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */
+#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */
+#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */
+
+#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */
+#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */
+#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */
+#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */
+#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */
+#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */
+#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */
+#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */
+#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */
+
+#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */
+#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */
+#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */
+#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */
+#define IIO_IBCR 0x00400200 /* IO BTE Control Register */
+
+#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */
+#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */
+
+#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */
+
+#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */
+#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */
+
+#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */
+#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */
+
+#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */
+#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */
+#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */
+#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */
+#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */
+
+#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */
+
+#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */
+#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */
+#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */
+#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */
+#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */
+#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */
+#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */
+#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */
+
+#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */
+#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */
+#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */
+#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */
+#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */
+#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */
+#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */
+#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */
+
+#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */
+#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */
+#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */
+#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */
+#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */
+#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */
+#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */
+#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */
+
+#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */
+#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */
+#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */
+#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */
+#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */
+
+#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */
+#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */
+#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */
+#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */
+#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */
+
+#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */
+#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */
+#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */
+#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */
+#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */
+
+#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */
+#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */
+#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */
+#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */
+#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */
+
+#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */
+#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */
+#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */
+#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */
+#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */
+
+#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */
+#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */
+#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */
+#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */
+#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */
+
+#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */
+#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */
+#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */
+#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */
+#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */
+
+#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */
+#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */
+#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */
+#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */
+#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */
+
+#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */
+#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */
+#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */
+#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */
+#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */
+
+#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */
+#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */
+#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */
+#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */
+#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */
+
+#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */
+#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */
+#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */
+#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */
+#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */
+
+#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */
+#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */
+#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */
+#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */
+#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */
+
+#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */
+#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */
+#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */
+#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */
+#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */
+
+#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */
+#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */
+#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */
+#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */
+#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */
+
+#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */
+#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */
+#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */
+#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */
+#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */
+
+#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */
+#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */
+#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */
+
+#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */
+
+#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */
+#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */
+#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */
+#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */
+#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */
+#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */
+#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */
+#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */
+#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */
+#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */
+#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */
+#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */
+
+#define IIO_IPCR 0x00430000 /* IO Performance Control */
+#define IIO_IPPR 0x00430008 /* IO Performance Profiling */
+
+/************************************************************************
+ * *
+ * Description: This register echoes some information from the *
+ * LB_REV_ID register. It is available through Crosstalk as described *
+ * above. The REV_NUM and MFG_NUM fields receive their values from *
+ * the REVISION and MANUFACTURER fields in the LB_REV_ID register. *
+ * The PART_NUM field's value is the Crosstalk device ID number that *
+ * Steve Miller assigned to the SHub chip. *
+ * *
+ ************************************************************************/
+
+typedef union ii_wid_u {
+ u64 ii_wid_regval;
+ struct {
+ u64 w_rsvd_1:1;
+ u64 w_mfg_num:11;
+ u64 w_part_num:16;
+ u64 w_rev_num:4;
+ u64 w_rsvd:32;
+ } ii_wid_fld_s;
+} ii_wid_u_t;
+
+/************************************************************************
+ * *
+ * The fields in this register are set upon detection of an error *
+ * and cleared by various mechanisms, as explained in the *
+ * description. *
+ * *
+ ************************************************************************/
+
+typedef union ii_wstat_u {
+ u64 ii_wstat_regval;
+ struct {
+ u64 w_pending:4;
+ u64 w_xt_crd_to:1;
+ u64 w_xt_tail_to:1;
+ u64 w_rsvd_3:3;
+ u64 w_tx_mx_rty:1;
+ u64 w_rsvd_2:6;
+ u64 w_llp_tx_cnt:8;
+ u64 w_rsvd_1:8;
+ u64 w_crazy:1;
+ u64 w_rsvd:31;
+ } ii_wstat_fld_s;
+} ii_wstat_u_t;
+
+/************************************************************************
+ * *
+ * Description: This is a read-write enabled register. It controls *
+ * various aspects of the Crosstalk flow control. *
+ * *
+ ************************************************************************/
+
+typedef union ii_wcr_u {
+ u64 ii_wcr_regval;
+ struct {
+ u64 w_wid:4;
+ u64 w_tag:1;
+ u64 w_rsvd_1:8;
+ u64 w_dst_crd:3;
+ u64 w_f_bad_pkt:1;
+ u64 w_dir_con:1;
+ u64 w_e_thresh:5;
+ u64 w_rsvd:41;
+ } ii_wcr_fld_s;
+} ii_wcr_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register's value is a bit vector that guards *
+ * access to local registers within the II as well as to external *
+ * Crosstalk widgets. Each bit in the register corresponds to a *
+ * particular region in the system; a region consists of one, two or *
+ * four nodes (depending on the value of the REGION_SIZE field in the *
+ * LB_REV_ID register, which is documented in Section 8.3.1.1). The *
+ * protection provided by this register applies to PIO read *
+ * operations as well as PIO write operations. The II will perform a *
+ * PIO read or write request only if the bit for the requestor's *
+ * region is set; otherwise, the II will not perform the requested *
+ * operation and will return an error response. When a PIO read or *
+ * write request targets an external Crosstalk widget, then not only *
+ * must the bit for the requestor's region be set in the ILAPR, but *
+ * also the target widget's bit in the IOWA register must be set in *
+ * order for the II to perform the requested operation; otherwise, *
+ * the II will return an error response. Hence, the protection *
+ * provided by the IOWA register supplements the protection provided *
+ * by the ILAPR for requests that target external Crosstalk widgets. *
+ * This register itself can be accessed only by the nodes whose *
+ * region ID bits are enabled in this same register. It can also be *
+ * accessed through the IAlias space by the local processors. *
+ * The reset value of this register allows access by all nodes. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ilapr_u {
+ u64 ii_ilapr_regval;
+ struct {
+ u64 i_region:64;
+ } ii_ilapr_fld_s;
+} ii_ilapr_u_t;
+
+/************************************************************************
+ * *
+ * Description: A write to this register of the 64-bit value *
+ * "SGIrules" in ASCII, will cause the bit in the ILAPR register *
+ * corresponding to the region of the requestor to be set (allow *
+ * access). A write of any other value will be ignored. Access *
+ * protection for this register is "SGIrules". *
+ * This register can also be accessed through the IAlias space. *
+ * However, this access will not change the access permissions in the *
+ * ILAPR. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ilapo_u {
+ u64 ii_ilapo_regval;
+ struct {
+ u64 i_io_ovrride:64;
+ } ii_ilapo_fld_s;
+} ii_ilapo_u_t;
+
+/************************************************************************
+ * *
+ * This register qualifies all the PIO and Graphics writes launched *
+ * from the SHUB towards a widget. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iowa_u {
+ u64 ii_iowa_regval;
+ struct {
+ u64 i_w0_oac:1;
+ u64 i_rsvd_1:7;
+ u64 i_wx_oac:8;
+ u64 i_rsvd:48;
+ } ii_iowa_fld_s;
+} ii_iowa_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register qualifies all the requests launched *
+ * from a widget towards the Shub. This register is intended to be *
+ * used by software in case of misbehaving widgets. *
+ * *
+ * *
+ ************************************************************************/
+
+typedef union ii_iiwa_u {
+ u64 ii_iiwa_regval;
+ struct {
+ u64 i_w0_iac:1;
+ u64 i_rsvd_1:7;
+ u64 i_wx_iac:8;
+ u64 i_rsvd:48;
+ } ii_iiwa_fld_s;
+} ii_iiwa_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register qualifies all the operations launched *
+ * from a widget towards the SHub. It allows individual access *
+ * control for up to 8 devices per widget. A device refers to *
+ * individual DMA master hosted by a widget. *
+ * The bits in each field of this register are cleared by the Shub *
+ * upon detection of an error which requires the device to be *
+ * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric *
+ * Crosstalk). Whether or not a device has access rights to this *
+ * Shub is determined by an AND of the device enable bit in the *
+ * appropriate field of this register and the corresponding bit in *
+ * the Wx_IAC field (for the widget which this device belongs to). *
+ * The bits in this field are set by writing a 1 to them. Incoming *
+ * replies from Crosstalk are not subject to this access control *
+ * mechanism. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iidem_u {
+ u64 ii_iidem_regval;
+ struct {
+ u64 i_w8_dxs:8;
+ u64 i_w9_dxs:8;
+ u64 i_wa_dxs:8;
+ u64 i_wb_dxs:8;
+ u64 i_wc_dxs:8;
+ u64 i_wd_dxs:8;
+ u64 i_we_dxs:8;
+ u64 i_wf_dxs:8;
+ } ii_iidem_fld_s;
+} ii_iidem_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the various programmable fields necessary *
+ * for controlling and observing the LLP signals. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ilcsr_u {
+ u64 ii_ilcsr_regval;
+ struct {
+ u64 i_nullto:6;
+ u64 i_rsvd_4:2;
+ u64 i_wrmrst:1;
+ u64 i_rsvd_3:1;
+ u64 i_llp_en:1;
+ u64 i_bm8:1;
+ u64 i_llp_stat:2;
+ u64 i_remote_power:1;
+ u64 i_rsvd_2:1;
+ u64 i_maxrtry:10;
+ u64 i_d_avail_sel:2;
+ u64 i_rsvd_1:4;
+ u64 i_maxbrst:10;
+ u64 i_rsvd:22;
+
+ } ii_ilcsr_fld_s;
+} ii_ilcsr_u_t;
+
+/************************************************************************
+ * *
+ * This is simply a status registers that monitors the LLP error *
+ * rate. *
+ * *
+ ************************************************************************/
+
+typedef union ii_illr_u {
+ u64 ii_illr_regval;
+ struct {
+ u64 i_sn_cnt:16;
+ u64 i_cb_cnt:16;
+ u64 i_rsvd:32;
+ } ii_illr_fld_s;
+} ii_illr_u_t;
+
+/************************************************************************
+ * *
+ * Description: All II-detected non-BTE error interrupts are *
+ * specified via this register. *
+ * NOTE: The PI interrupt register address is hardcoded in the II. If *
+ * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI *
+ * packet) to address offset 0x0180_0090 within the local register *
+ * address space of PI0 on the node specified by the NODE field. If *
+ * PI_ID==1, then the II sends the interrupt request to address *
+ * offset 0x01A0_0090 within the local register address space of PI1 *
+ * on the node specified by the NODE field. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iidsr_u {
+ u64 ii_iidsr_regval;
+ struct {
+ u64 i_level:8;
+ u64 i_pi_id:1;
+ u64 i_node:11;
+ u64 i_rsvd_3:4;
+ u64 i_enable:1;
+ u64 i_rsvd_2:3;
+ u64 i_int_sent:2;
+ u64 i_rsvd_1:2;
+ u64 i_pi0_forward_int:1;
+ u64 i_pi1_forward_int:1;
+ u64 i_rsvd:30;
+ } ii_iidsr_fld_s;
+} ii_iidsr_u_t;
+
+/************************************************************************
+ * *
+ * There are two instances of this register. This register is used *
+ * for matching up the incoming responses from the graphics widget to *
+ * the processor that initiated the graphics operation. The *
+ * write-responses are converted to graphics credits and returned to *
+ * the processor so that the processor interface can manage the flow *
+ * control. *
+ * *
+ ************************************************************************/
+
+typedef union ii_igfx0_u {
+ u64 ii_igfx0_regval;
+ struct {
+ u64 i_w_num:4;
+ u64 i_pi_id:1;
+ u64 i_n_num:12;
+ u64 i_p_num:1;
+ u64 i_rsvd:46;
+ } ii_igfx0_fld_s;
+} ii_igfx0_u_t;
+
+/************************************************************************
+ * *
+ * There are two instances of this register. This register is used *
+ * for matching up the incoming responses from the graphics widget to *
+ * the processor that initiated the graphics operation. The *
+ * write-responses are converted to graphics credits and returned to *
+ * the processor so that the processor interface can manage the flow *
+ * control. *
+ * *
+ ************************************************************************/
+
+typedef union ii_igfx1_u {
+ u64 ii_igfx1_regval;
+ struct {
+ u64 i_w_num:4;
+ u64 i_pi_id:1;
+ u64 i_n_num:12;
+ u64 i_p_num:1;
+ u64 i_rsvd:46;
+ } ii_igfx1_fld_s;
+} ii_igfx1_u_t;
+
+/************************************************************************
+ * *
+ * There are two instances of this registers. These registers are *
+ * used as scratch registers for software use. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iscr0_u {
+ u64 ii_iscr0_regval;
+ struct {
+ u64 i_scratch:64;
+ } ii_iscr0_fld_s;
+} ii_iscr0_u_t;
+
+/************************************************************************
+ * *
+ * There are two instances of this registers. These registers are *
+ * used as scratch registers for software use. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iscr1_u {
+ u64 ii_iscr1_regval;
+ struct {
+ u64 i_scratch:64;
+ } ii_iscr1_fld_s;
+} ii_iscr1_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the SHub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the Shub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte1_u {
+ u64 ii_itte1_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte1_fld_s;
+} ii_itte1_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the Shub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte2_u {
+ u64 ii_itte2_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte2_fld_s;
+} ii_itte2_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the SHub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte3_u {
+ u64 ii_itte3_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte3_fld_s;
+} ii_itte3_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a SHub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the SHub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the SHub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte4_u {
+ u64 ii_itte4_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte4_fld_s;
+} ii_itte4_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a SHub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the Shub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte5_u {
+ u64 ii_itte5_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte5_fld_s;
+} ii_itte5_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the Shub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte6_u {
+ u64 ii_itte6_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte6_fld_s;
+} ii_itte6_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the SHub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte7_u {
+ u64 ii_itte7_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte7_fld_s;
+} ii_itte7_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprb0_u {
+ u64 ii_iprb0_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprb0_fld_s;
+} ii_iprb0_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprb8_u {
+ u64 ii_iprb8_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprb8_fld_s;
+} ii_iprb8_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprb9_u {
+ u64 ii_iprb9_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprb9_fld_s;
+} ii_iprb9_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprba_u {
+ u64 ii_iprba_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprba_fld_s;
+} ii_iprba_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbb_u {
+ u64 ii_iprbb_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbb_fld_s;
+} ii_iprbb_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbc_u {
+ u64 ii_iprbc_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbc_fld_s;
+} ii_iprbc_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbd_u {
+ u64 ii_iprbd_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbd_fld_s;
+} ii_iprbd_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbe_u {
+ u64 ii_iprbe_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbe_fld_s;
+} ii_iprbe_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of Shub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbf_u {
+ u64 ii_iprbf_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbe_fld_s;
+} ii_iprbf_u_t;
+
+/************************************************************************
+ * *
+ * This register specifies the timeout value to use for monitoring *
+ * Crosstalk credits which are used outbound to Crosstalk. An *
+ * internal counter called the Crosstalk Credit Timeout Counter *
+ * increments every 128 II clocks. The counter starts counting *
+ * anytime the credit count drops below a threshold, and resets to *
+ * zero (stops counting) anytime the credit count is at or above the *
+ * threshold. The threshold is 1 credit in direct connect mode and 2 *
+ * in Crossbow connect mode. When the internal Crosstalk Credit *
+ * Timeout Counter reaches the value programmed in this register, a *
+ * Crosstalk Credit Timeout has occurred. The internal counter is not *
+ * readable from software, and stops counting at its maximum value, *
+ * so it cannot cause more than one interrupt. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ixcc_u {
+ u64 ii_ixcc_regval;
+ struct {
+ u64 i_time_out:26;
+ u64 i_rsvd:38;
+ } ii_ixcc_fld_s;
+} ii_ixcc_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register qualifies all the PIO and DMA *
+ * operations launched from widget 0 towards the SHub. In *
+ * addition, it also qualifies accesses by the BTE streams. *
+ * The bits in each field of this register are cleared by the SHub *
+ * upon detection of an error which requires widget 0 or the BTE *
+ * streams to be terminated. Whether or not widget x has access *
+ * rights to this SHub is determined by an AND of the device *
+ * enable bit in the appropriate field of this register and bit 0 in *
+ * the Wx_IAC field. The bits in this field are set by writing a 1 to *
+ * them. Incoming replies from Crosstalk are not subject to this *
+ * access control mechanism. *
+ * *
+ ************************************************************************/
+
+typedef union ii_imem_u {
+ u64 ii_imem_regval;
+ struct {
+ u64 i_w0_esd:1;
+ u64 i_rsvd_3:3;
+ u64 i_b0_esd:1;
+ u64 i_rsvd_2:3;
+ u64 i_b1_esd:1;
+ u64 i_rsvd_1:3;
+ u64 i_clr_precise:1;
+ u64 i_rsvd:51;
+ } ii_imem_fld_s;
+} ii_imem_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register specifies the timeout value to use for *
+ * monitoring Crosstalk tail flits coming into the Shub in the *
+ * TAIL_TO field. An internal counter associated with this register *
+ * is incremented every 128 II internal clocks (7 bits). The counter *
+ * starts counting anytime a header micropacket is received and stops *
+ * counting (and resets to zero) any time a micropacket with a Tail *
+ * bit is received. Once the counter reaches the threshold value *
+ * programmed in this register, it generates an interrupt to the *
+ * processor that is programmed into the IIDSR. The counter saturates *
+ * (does not roll over) at its maximum value, so it cannot cause *
+ * another interrupt until after it is cleared. *
+ * The register also contains the Read Response Timeout values. The *
+ * Prescalar is 23 bits, and counts II clocks. An internal counter *
+ * increments on every II clock and when it reaches the value in the *
+ * Prescalar field, all IPRTE registers with their valid bits set *
+ * have their Read Response timers bumped. Whenever any of them match *
+ * the value in the RRSP_TO field, a Read Response Timeout has *
+ * occurred, and error handling occurs as described in the Error *
+ * Handling section of this document. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ixtt_u {
+ u64 ii_ixtt_regval;
+ struct {
+ u64 i_tail_to:26;
+ u64 i_rsvd_1:6;
+ u64 i_rrsp_ps:23;
+ u64 i_rrsp_to:5;
+ u64 i_rsvd:4;
+ } ii_ixtt_fld_s;
+} ii_ixtt_u_t;
+
+/************************************************************************
+ * *
+ * Writing a 1 to the fields of this register clears the appropriate *
+ * error bits in other areas of SHub. Note that when the *
+ * E_PRB_x bits are used to clear error bits in PRB registers, *
+ * SPUR_RD and SPUR_WR may persist, because they require additional *
+ * action to clear them. See the IPRBx and IXSS Register *
+ * specifications. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ieclr_u {
+ u64 ii_ieclr_regval;
+ struct {
+ u64 i_e_prb_0:1;
+ u64 i_rsvd:7;
+ u64 i_e_prb_8:1;
+ u64 i_e_prb_9:1;
+ u64 i_e_prb_a:1;
+ u64 i_e_prb_b:1;
+ u64 i_e_prb_c:1;
+ u64 i_e_prb_d:1;
+ u64 i_e_prb_e:1;
+ u64 i_e_prb_f:1;
+ u64 i_e_crazy:1;
+ u64 i_e_bte_0:1;
+ u64 i_e_bte_1:1;
+ u64 i_reserved_1:10;
+ u64 i_spur_rd_hdr:1;
+ u64 i_cam_intr_to:1;
+ u64 i_cam_overflow:1;
+ u64 i_cam_read_miss:1;
+ u64 i_ioq_rep_underflow:1;
+ u64 i_ioq_req_underflow:1;
+ u64 i_ioq_rep_overflow:1;
+ u64 i_ioq_req_overflow:1;
+ u64 i_iiq_rep_overflow:1;
+ u64 i_iiq_req_overflow:1;
+ u64 i_ii_xn_rep_cred_overflow:1;
+ u64 i_ii_xn_req_cred_overflow:1;
+ u64 i_ii_xn_invalid_cmd:1;
+ u64 i_xn_ii_invalid_cmd:1;
+ u64 i_reserved_2:21;
+ } ii_ieclr_fld_s;
+} ii_ieclr_u_t;
+
+/************************************************************************
+ * *
+ * This register controls both BTEs. SOFT_RESET is intended for *
+ * recovery after an error. COUNT controls the total number of CRBs *
+ * that both BTEs (combined) can use, which affects total BTE *
+ * bandwidth. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibcr_u {
+ u64 ii_ibcr_regval;
+ struct {
+ u64 i_count:4;
+ u64 i_rsvd_1:4;
+ u64 i_soft_reset:1;
+ u64 i_rsvd:55;
+ } ii_ibcr_fld_s;
+} ii_ibcr_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the header of a spurious read response *
+ * received from Crosstalk. A spurious read response is defined as a *
+ * read response received by II from a widget for which (1) the SIDN *
+ * has a value between 1 and 7, inclusive (II never sends requests to *
+ * these widgets (2) there is no valid IPRTE register which *
+ * corresponds to the TNUM, or (3) the widget indicated in SIDN is *
+ * not the same as the widget recorded in the IPRTE register *
+ * referenced by the TNUM. If this condition is true, and if the *
+ * IXSS[VALID] bit is clear, then the header of the spurious read *
+ * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The *
+ * errant header is thereby captured, and no further spurious read *
+ * respones are captured until IXSS[VALID] is cleared by setting the *
+ * appropriate bit in IECLR.Everytime a spurious read response is *
+ * detected, the SPUR_RD bit of the PRB corresponding to the incoming *
+ * message's SIDN field is set. This always happens, regarless of *
+ * whether a header is captured. The programmer should check *
+ * IXSM[SIDN] to determine which widget sent the spurious response, *
+ * because there may be more than one SPUR_RD bit set in the PRB *
+ * registers. The widget indicated by IXSM[SIDN] was the first *
+ * spurious read response to be received since the last time *
+ * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB *
+ * will be set. Any SPUR_RD bits in any other PRB registers indicate *
+ * spurious messages from other widets which were detected after the *
+ * header was captured.. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ixsm_u {
+ u64 ii_ixsm_regval;
+ struct {
+ u64 i_byte_en:32;
+ u64 i_reserved:1;
+ u64 i_tag:3;
+ u64 i_alt_pactyp:4;
+ u64 i_bo:1;
+ u64 i_error:1;
+ u64 i_vbpm:1;
+ u64 i_gbr:1;
+ u64 i_ds:2;
+ u64 i_ct:1;
+ u64 i_tnum:5;
+ u64 i_pactyp:4;
+ u64 i_sidn:4;
+ u64 i_didn:4;
+ } ii_ixsm_fld_s;
+} ii_ixsm_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the sideband bits of a spurious read *
+ * response received from Crosstalk. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ixss_u {
+ u64 ii_ixss_regval;
+ struct {
+ u64 i_sideband:8;
+ u64 i_rsvd:55;
+ u64 i_valid:1;
+ } ii_ixss_fld_s;
+} ii_ixss_u_t;
+
+/************************************************************************
+ * *
+ * This register enables software to access the II LLP's test port. *
+ * Refer to the LLP 2.5 documentation for an explanation of the test *
+ * port. Software can write to this register to program the values *
+ * for the control fields (TestErrCapture, TestClear, TestFlit, *
+ * TestMask and TestSeed). Similarly, software can read from this *
+ * register to obtain the values of the test port's status outputs *
+ * (TestCBerr, TestValid and TestData). *
+ * *
+ ************************************************************************/
+
+typedef union ii_ilct_u {
+ u64 ii_ilct_regval;
+ struct {
+ u64 i_test_seed:20;
+ u64 i_test_mask:8;
+ u64 i_test_data:20;
+ u64 i_test_valid:1;
+ u64 i_test_cberr:1;
+ u64 i_test_flit:3;
+ u64 i_test_clear:1;
+ u64 i_test_err_capture:1;
+ u64 i_rsvd:9;
+ } ii_ilct_fld_s;
+} ii_ilct_u_t;
+
+/************************************************************************
+ * *
+ * If the II detects an illegal incoming Duplonet packet (request or *
+ * reply) when VALID==0 in the IIEPH1 register, then it saves the *
+ * contents of the packet's header flit in the IIEPH1 and IIEPH2 *
+ * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit, *
+ * and assigns a value to the ERR_TYPE field which indicates the *
+ * specific nature of the error. The II recognizes four different *
+ * types of errors: short request packets (ERR_TYPE==2), short reply *
+ * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long *
+ * reply packets (ERR_TYPE==5). The encodings for these types of *
+ * errors were chosen to be consistent with the same types of errors *
+ * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in *
+ * the LB unit). If the II detects an illegal incoming Duplonet *
+ * packet when VALID==1 in the IIEPH1 register, then it merely sets *
+ * the OVERRUN bit to indicate that a subsequent error has happened, *
+ * and does nothing further. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iieph1_u {
+ u64 ii_iieph1_regval;
+ struct {
+ u64 i_command:7;
+ u64 i_rsvd_5:1;
+ u64 i_suppl:14;
+ u64 i_rsvd_4:1;
+ u64 i_source:14;
+ u64 i_rsvd_3:1;
+ u64 i_err_type:4;
+ u64 i_rsvd_2:4;
+ u64 i_overrun:1;
+ u64 i_rsvd_1:3;
+ u64 i_valid:1;
+ u64 i_rsvd:13;
+ } ii_iieph1_fld_s;
+} ii_iieph1_u_t;
+
+/************************************************************************
+ * *
+ * This register holds the Address field from the header flit of an *
+ * incoming erroneous Duplonet packet, along with the tail bit which *
+ * accompanied this header flit. This register is essentially an *
+ * extension of IIEPH1. Two registers were necessary because the 64 *
+ * bits available in only a single register were insufficient to *
+ * capture the entire header flit of an erroneous packet. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iieph2_u {
+ u64 ii_iieph2_regval;
+ struct {
+ u64 i_rsvd_0:3;
+ u64 i_address:47;
+ u64 i_rsvd_1:10;
+ u64 i_tail:1;
+ u64 i_rsvd:3;
+ } ii_iieph2_fld_s;
+} ii_iieph2_u_t;
+
+/******************************/
+
+/************************************************************************
+ * *
+ * This register's value is a bit vector that guards access from SXBs *
+ * to local registers within the II as well as to external Crosstalk *
+ * widgets *
+ * *
+ ************************************************************************/
+
+typedef union ii_islapr_u {
+ u64 ii_islapr_regval;
+ struct {
+ u64 i_region:64;
+ } ii_islapr_fld_s;
+} ii_islapr_u_t;
+
+/************************************************************************
+ * *
+ * A write to this register of the 56-bit value "Pup+Bun" will cause *
+ * the bit in the ISLAPR register corresponding to the region of the *
+ * requestor to be set (access allowed). (
+ * *
+ ************************************************************************/
+
+typedef union ii_islapo_u {
+ u64 ii_islapo_regval;
+ struct {
+ u64 i_io_sbx_ovrride:56;
+ u64 i_rsvd:8;
+ } ii_islapo_fld_s;
+} ii_islapo_u_t;
+
+/************************************************************************
+ * *
+ * Determines how long the wrapper will wait aftr an interrupt is *
+ * initially issued from the II before it times out the outstanding *
+ * interrupt and drops it from the interrupt queue. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iwi_u {
+ u64 ii_iwi_regval;
+ struct {
+ u64 i_prescale:24;
+ u64 i_rsvd:8;
+ u64 i_timeout:8;
+ u64 i_rsvd1:8;
+ u64 i_intrpt_retry_period:8;
+ u64 i_rsvd2:8;
+ } ii_iwi_fld_s;
+} ii_iwi_u_t;
+
+/************************************************************************
+ * *
+ * Log errors which have occurred in the II wrapper. The errors are *
+ * cleared by writing to the IECLR register. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iwel_u {
+ u64 ii_iwel_regval;
+ struct {
+ u64 i_intr_timed_out:1;
+ u64 i_rsvd:7;
+ u64 i_cam_overflow:1;
+ u64 i_cam_read_miss:1;
+ u64 i_rsvd1:2;
+ u64 i_ioq_rep_underflow:1;
+ u64 i_ioq_req_underflow:1;
+ u64 i_ioq_rep_overflow:1;
+ u64 i_ioq_req_overflow:1;
+ u64 i_iiq_rep_overflow:1;
+ u64 i_iiq_req_overflow:1;
+ u64 i_rsvd2:6;
+ u64 i_ii_xn_rep_cred_over_under:1;
+ u64 i_ii_xn_req_cred_over_under:1;
+ u64 i_rsvd3:6;
+ u64 i_ii_xn_invalid_cmd:1;
+ u64 i_xn_ii_invalid_cmd:1;
+ u64 i_rsvd4:30;
+ } ii_iwel_fld_s;
+} ii_iwel_u_t;
+
+/************************************************************************
+ * *
+ * Controls the II wrapper. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iwc_u {
+ u64 ii_iwc_regval;
+ struct {
+ u64 i_dma_byte_swap:1;
+ u64 i_rsvd:3;
+ u64 i_cam_read_lines_reset:1;
+ u64 i_rsvd1:3;
+ u64 i_ii_xn_cred_over_under_log:1;
+ u64 i_rsvd2:19;
+ u64 i_xn_rep_iq_depth:5;
+ u64 i_rsvd3:3;
+ u64 i_xn_req_iq_depth:5;
+ u64 i_rsvd4:3;
+ u64 i_iiq_depth:6;
+ u64 i_rsvd5:12;
+ u64 i_force_rep_cred:1;
+ u64 i_force_req_cred:1;
+ } ii_iwc_fld_s;
+} ii_iwc_u_t;
+
+/************************************************************************
+ * *
+ * Status in the II wrapper. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iws_u {
+ u64 ii_iws_regval;
+ struct {
+ u64 i_xn_rep_iq_credits:5;
+ u64 i_rsvd:3;
+ u64 i_xn_req_iq_credits:5;
+ u64 i_rsvd1:51;
+ } ii_iws_fld_s;
+} ii_iws_u_t;
+
+/************************************************************************
+ * *
+ * Masks errors in the IWEL register. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iweim_u {
+ u64 ii_iweim_regval;
+ struct {
+ u64 i_intr_timed_out:1;
+ u64 i_rsvd:7;
+ u64 i_cam_overflow:1;
+ u64 i_cam_read_miss:1;
+ u64 i_rsvd1:2;
+ u64 i_ioq_rep_underflow:1;
+ u64 i_ioq_req_underflow:1;
+ u64 i_ioq_rep_overflow:1;
+ u64 i_ioq_req_overflow:1;
+ u64 i_iiq_rep_overflow:1;
+ u64 i_iiq_req_overflow:1;
+ u64 i_rsvd2:6;
+ u64 i_ii_xn_rep_cred_overflow:1;
+ u64 i_ii_xn_req_cred_overflow:1;
+ u64 i_rsvd3:6;
+ u64 i_ii_xn_invalid_cmd:1;
+ u64 i_xn_ii_invalid_cmd:1;
+ u64 i_rsvd4:30;
+ } ii_iweim_fld_s;
+} ii_iweim_u_t;
+
+/************************************************************************
+ * *
+ * A write to this register causes a particular field in the *
+ * corresponding widget's PRB entry to be adjusted up or down by 1. *
+ * This counter should be used when recovering from error and reset *
+ * conditions. Note that software would be capable of causing *
+ * inadvertent overflow or underflow of these counters. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ipca_u {
+ u64 ii_ipca_regval;
+ struct {
+ u64 i_wid:4;
+ u64 i_adjust:1;
+ u64 i_rsvd_1:3;
+ u64 i_field:2;
+ u64 i_rsvd:54;
+ } ii_ipca_fld_s;
+} ii_ipca_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte0a_u {
+ u64 ii_iprte0a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte0a_fld_s;
+} ii_iprte0a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte1a_u {
+ u64 ii_iprte1a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte1a_fld_s;
+} ii_iprte1a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte2a_u {
+ u64 ii_iprte2a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte2a_fld_s;
+} ii_iprte2a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte3a_u {
+ u64 ii_iprte3a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte3a_fld_s;
+} ii_iprte3a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte4a_u {
+ u64 ii_iprte4a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte4a_fld_s;
+} ii_iprte4a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte5a_u {
+ u64 ii_iprte5a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte5a_fld_s;
+} ii_iprte5a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte6a_u {
+ u64 ii_iprte6a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte6a_fld_s;
+} ii_iprte6a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte7a_u {
+ u64 ii_iprte7a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprtea7_fld_s;
+} ii_iprte7a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte0b_u {
+ u64 ii_iprte0b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte0b_fld_s;
+} ii_iprte0b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte1b_u {
+ u64 ii_iprte1b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte1b_fld_s;
+} ii_iprte1b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte2b_u {
+ u64 ii_iprte2b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte2b_fld_s;
+} ii_iprte2b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte3b_u {
+ u64 ii_iprte3b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte3b_fld_s;
+} ii_iprte3b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte4b_u {
+ u64 ii_iprte4b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte4b_fld_s;
+} ii_iprte4b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte5b_u {
+ u64 ii_iprte5b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte5b_fld_s;
+} ii_iprte5b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte6b_u {
+ u64 ii_iprte6b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+
+ } ii_iprte6b_fld_s;
+} ii_iprte6b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte7b_u {
+ u64 ii_iprte7b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte7b_fld_s;
+} ii_iprte7b_u_t;
+
+/************************************************************************
+ * *
+ * Description: SHub II contains a feature which did not exist in *
+ * the Hub which automatically cleans up after a Read Response *
+ * timeout, including deallocation of the IPRTE and recovery of IBuf *
+ * space. The inclusion of this register in SHub is for backward *
+ * compatibility *
+ * A write to this register causes an entry from the table of *
+ * outstanding PIO Read Requests to be freed and returned to the *
+ * stack of free entries. This register is used in handling the *
+ * timeout errors that result in a PIO Reply never returning from *
+ * Crosstalk. *
+ * Note that this register does not affect the contents of the IPRTE *
+ * registers. The Valid bits in those registers have to be *
+ * specifically turned off by software. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ipdr_u {
+ u64 ii_ipdr_regval;
+ struct {
+ u64 i_te:3;
+ u64 i_rsvd_1:1;
+ u64 i_pnd:1;
+ u64 i_init_rpcnt:1;
+ u64 i_rsvd:58;
+ } ii_ipdr_fld_s;
+} ii_ipdr_u_t;
+
+/************************************************************************
+ * *
+ * A write to this register causes a CRB entry to be returned to the *
+ * queue of free CRBs. The entry should have previously been cleared *
+ * (mark bit) via backdoor access to the pertinent CRB entry. This *
+ * register is used in the last step of handling the errors that are *
+ * captured and marked in CRB entries. Briefly: 1) first error for *
+ * DMA write from a particular device, and first error for a *
+ * particular BTE stream, lead to a marked CRB entry, and processor *
+ * interrupt, 2) software reads the error information captured in the *
+ * CRB entry, and presumably takes some corrective action, 3) *
+ * software clears the mark bit, and finally 4) software writes to *
+ * the ICDR register to return the CRB entry to the list of free CRB *
+ * entries. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icdr_u {
+ u64 ii_icdr_regval;
+ struct {
+ u64 i_crb_num:4;
+ u64 i_pnd:1;
+ u64 i_rsvd:59;
+ } ii_icdr_fld_s;
+} ii_icdr_u_t;
+
+/************************************************************************
+ * *
+ * This register provides debug access to two FIFOs inside of II. *
+ * Both IOQ_MAX* fields of this register contain the instantaneous *
+ * depth (in units of the number of available entries) of the *
+ * associated IOQ FIFO. A read of this register will return the *
+ * number of free entries on each FIFO at the time of the read. So *
+ * when a FIFO is idle, the associated field contains the maximum *
+ * depth of the FIFO. This register is writable for debug reasons *
+ * and is intended to be written with the maximum desired FIFO depth *
+ * while the FIFO is idle. Software must assure that II is idle when *
+ * this register is written. If there are any active entries in any *
+ * of these FIFOs when this register is written, the results are *
+ * undefined. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ifdr_u {
+ u64 ii_ifdr_regval;
+ struct {
+ u64 i_ioq_max_rq:7;
+ u64 i_set_ioq_rq:1;
+ u64 i_ioq_max_rp:7;
+ u64 i_set_ioq_rp:1;
+ u64 i_rsvd:48;
+ } ii_ifdr_fld_s;
+} ii_ifdr_u_t;
+
+/************************************************************************
+ * *
+ * This register allows the II to become sluggish in removing *
+ * messages from its inbound queue (IIQ). This will cause messages to *
+ * back up in either virtual channel. Disabling the "molasses" mode *
+ * subsequently allows the II to be tested under stress. In the *
+ * sluggish ("Molasses") mode, the localized effects of congestion *
+ * can be observed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iiap_u {
+ u64 ii_iiap_regval;
+ struct {
+ u64 i_rq_mls:6;
+ u64 i_rsvd_1:2;
+ u64 i_rp_mls:6;
+ u64 i_rsvd:50;
+ } ii_iiap_fld_s;
+} ii_iiap_u_t;
+
+/************************************************************************
+ * *
+ * This register allows several parameters of CRB operation to be *
+ * set. Note that writing to this register can have catastrophic side *
+ * effects, if the CRB is not quiescent, i.e. if the CRB is *
+ * processing protocol messages when the write occurs. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icmr_u {
+ u64 ii_icmr_regval;
+ struct {
+ u64 i_sp_msg:1;
+ u64 i_rd_hdr:1;
+ u64 i_rsvd_4:2;
+ u64 i_c_cnt:4;
+ u64 i_rsvd_3:4;
+ u64 i_clr_rqpd:1;
+ u64 i_clr_rppd:1;
+ u64 i_rsvd_2:2;
+ u64 i_fc_cnt:4;
+ u64 i_crb_vld:15;
+ u64 i_crb_mark:15;
+ u64 i_rsvd_1:2;
+ u64 i_precise:1;
+ u64 i_rsvd:11;
+ } ii_icmr_fld_s;
+} ii_icmr_u_t;
+
+/************************************************************************
+ * *
+ * This register allows control of the table portion of the CRB *
+ * logic via software. Control operations from this register have *
+ * priority over all incoming Crosstalk or BTE requests. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iccr_u {
+ u64 ii_iccr_regval;
+ struct {
+ u64 i_crb_num:4;
+ u64 i_rsvd_1:4;
+ u64 i_cmd:8;
+ u64 i_pending:1;
+ u64 i_rsvd:47;
+ } ii_iccr_fld_s;
+} ii_iccr_u_t;
+
+/************************************************************************
+ * *
+ * This register allows the maximum timeout value to be programmed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icto_u {
+ u64 ii_icto_regval;
+ struct {
+ u64 i_timeout:8;
+ u64 i_rsvd:56;
+ } ii_icto_fld_s;
+} ii_icto_u_t;
+
+/************************************************************************
+ * *
+ * This register allows the timeout prescalar to be programmed. An *
+ * internal counter is associated with this register. When the *
+ * internal counter reaches the value of the PRESCALE field, the *
+ * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] *
+ * field). The internal counter resets to zero, and then continues *
+ * counting. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ictp_u {
+ u64 ii_ictp_regval;
+ struct {
+ u64 i_prescale:24;
+ u64 i_rsvd:40;
+ } ii_ictp_fld_s;
+} ii_ictp_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * The CRB Entry registers can be conceptualized as rows and columns *
+ * (illustrated in the table above). Each row contains the 4 *
+ * registers required for a single CRB Entry. The first doubleword *
+ * (column) for each entry is labeled A, and the second doubleword *
+ * (higher address) is labeled B, the third doubleword is labeled C, *
+ * the fourth doubleword is labeled D and the fifth doubleword is *
+ * labeled E. All CRB entries have their addresses on a quarter *
+ * cacheline aligned boundary. *
+ * Upon reset, only the following fields are initialized: valid *
+ * (VLD), priority count, timeout, timeout valid, and context valid. *
+ * All other bits should be cleared by software before use (after *
+ * recovering any potential error state from before the reset). *
+ * The following four tables summarize the format for the four *
+ * registers that are used for each ICRB# Entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_a_u {
+ u64 ii_icrb0_a_regval;
+ struct {
+ u64 ia_iow:1;
+ u64 ia_vld:1;
+ u64 ia_addr:47;
+ u64 ia_tnum:5;
+ u64 ia_sidn:4;
+ u64 ia_rsvd:6;
+ } ii_icrb0_a_fld_s;
+} ii_icrb0_a_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_b_u {
+ u64 ii_icrb0_b_regval;
+ struct {
+ u64 ib_xt_err:1;
+ u64 ib_mark:1;
+ u64 ib_ln_uce:1;
+ u64 ib_errcode:3;
+ u64 ib_error:1;
+ u64 ib_stall__bte_1:1;
+ u64 ib_stall__bte_0:1;
+ u64 ib_stall__intr:1;
+ u64 ib_stall_ib:1;
+ u64 ib_intvn:1;
+ u64 ib_wb:1;
+ u64 ib_hold:1;
+ u64 ib_ack:1;
+ u64 ib_resp:1;
+ u64 ib_ack_cnt:11;
+ u64 ib_rsvd:7;
+ u64 ib_exc:5;
+ u64 ib_init:3;
+ u64 ib_imsg:8;
+ u64 ib_imsgtype:2;
+ u64 ib_use_old:1;
+ u64 ib_rsvd_1:11;
+ } ii_icrb0_b_fld_s;
+} ii_icrb0_b_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_c_u {
+ u64 ii_icrb0_c_regval;
+ struct {
+ u64 ic_source:15;
+ u64 ic_size:2;
+ u64 ic_ct:1;
+ u64 ic_bte_num:1;
+ u64 ic_gbr:1;
+ u64 ic_resprqd:1;
+ u64 ic_bo:1;
+ u64 ic_suppl:15;
+ u64 ic_rsvd:27;
+ } ii_icrb0_c_fld_s;
+} ii_icrb0_c_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_d_u {
+ u64 ii_icrb0_d_regval;
+ struct {
+ u64 id_pa_be:43;
+ u64 id_bte_op:1;
+ u64 id_pr_psc:4;
+ u64 id_pr_cnt:4;
+ u64 id_sleep:1;
+ u64 id_rsvd:11;
+ } ii_icrb0_d_fld_s;
+} ii_icrb0_d_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_e_u {
+ u64 ii_icrb0_e_regval;
+ struct {
+ u64 ie_timeout:8;
+ u64 ie_context:15;
+ u64 ie_rsvd:1;
+ u64 ie_tvld:1;
+ u64 ie_cvld:1;
+ u64 ie_rsvd_0:38;
+ } ii_icrb0_e_fld_s;
+} ii_icrb0_e_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the lower 64 bits of the header of the *
+ * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
+ * register is set. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icsml_u {
+ u64 ii_icsml_regval;
+ struct {
+ u64 i_tt_addr:47;
+ u64 i_newsuppl_ex:14;
+ u64 i_reserved:2;
+ u64 i_overflow:1;
+ } ii_icsml_fld_s;
+} ii_icsml_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the middle 64 bits of the header of the *
+ * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
+ * register is set. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icsmm_u {
+ u64 ii_icsmm_regval;
+ struct {
+ u64 i_tt_ack_cnt:11;
+ u64 i_reserved:53;
+ } ii_icsmm_fld_s;
+} ii_icsmm_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the microscopic state, all the inputs to *
+ * the protocol table, captured with the spurious message. Valid when *
+ * the SP_MSG bit in the ICMR register is set. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icsmh_u {
+ u64 ii_icsmh_regval;
+ struct {
+ u64 i_tt_vld:1;
+ u64 i_xerr:1;
+ u64 i_ft_cwact_o:1;
+ u64 i_ft_wact_o:1;
+ u64 i_ft_active_o:1;
+ u64 i_sync:1;
+ u64 i_mnusg:1;
+ u64 i_mnusz:1;
+ u64 i_plusz:1;
+ u64 i_plusg:1;
+ u64 i_tt_exc:5;
+ u64 i_tt_wb:1;
+ u64 i_tt_hold:1;
+ u64 i_tt_ack:1;
+ u64 i_tt_resp:1;
+ u64 i_tt_intvn:1;
+ u64 i_g_stall_bte1:1;
+ u64 i_g_stall_bte0:1;
+ u64 i_g_stall_il:1;
+ u64 i_g_stall_ib:1;
+ u64 i_tt_imsg:8;
+ u64 i_tt_imsgtype:2;
+ u64 i_tt_use_old:1;
+ u64 i_tt_respreqd:1;
+ u64 i_tt_bte_num:1;
+ u64 i_cbn:1;
+ u64 i_match:1;
+ u64 i_rpcnt_lt_34:1;
+ u64 i_rpcnt_ge_34:1;
+ u64 i_rpcnt_lt_18:1;
+ u64 i_rpcnt_ge_18:1;
+ u64 i_rpcnt_lt_2:1;
+ u64 i_rpcnt_ge_2:1;
+ u64 i_rqcnt_lt_18:1;
+ u64 i_rqcnt_ge_18:1;
+ u64 i_rqcnt_lt_2:1;
+ u64 i_rqcnt_ge_2:1;
+ u64 i_tt_device:7;
+ u64 i_tt_init:3;
+ u64 i_reserved:5;
+ } ii_icsmh_fld_s;
+} ii_icsmh_u_t;
+
+/************************************************************************
+ * *
+ * The Shub DEBUG unit provides a 3-bit selection signal to the *
+ * II core and a 3-bit selection signal to the fsbclk domain in the II *
+ * wrapper. *
+ * *
+ ************************************************************************/
+
+typedef union ii_idbss_u {
+ u64 ii_idbss_regval;
+ struct {
+ u64 i_iioclk_core_submenu:3;
+ u64 i_rsvd:5;
+ u64 i_fsbclk_wrapper_submenu:3;
+ u64 i_rsvd_1:5;
+ u64 i_iioclk_menu:5;
+ u64 i_rsvd_2:43;
+ } ii_idbss_fld_s;
+} ii_idbss_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register is used to set up the length for a *
+ * transfer and then to monitor the progress of that transfer. This *
+ * register needs to be initialized before a transfer is started. A *
+ * legitimate write to this register will set the Busy bit, clear the *
+ * Error bit, and initialize the length to the value desired. *
+ * While the transfer is in progress, hardware will decrement the *
+ * length field with each successful block that is copied. Once the *
+ * transfer completes, hardware will clear the Busy bit. The length *
+ * field will also contain the number of cache lines left to be *
+ * transferred. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibls0_u {
+ u64 ii_ibls0_regval;
+ struct {
+ u64 i_length:16;
+ u64 i_error:1;
+ u64 i_rsvd_1:3;
+ u64 i_busy:1;
+ u64 i_rsvd:43;
+ } ii_ibls0_fld_s;
+} ii_ibls0_u_t;
+
+/************************************************************************
+ * *
+ * This register should be loaded before a transfer is started. The *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
+ * address as described in Section 1.3, Figure2 and Figure3. Since *
+ * the bottom 7 bits of the address are always taken to be zero, BTE *
+ * transfers are always cacheline-aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibsa0_u {
+ u64 ii_ibsa0_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:42;
+ u64 i_rsvd:15;
+ } ii_ibsa0_fld_s;
+} ii_ibsa0_u_t;
+
+/************************************************************************
+ * *
+ * This register should be loaded before a transfer is started. The *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
+ * address as described in Section 1.3, Figure2 and Figure3. Since *
+ * the bottom 7 bits of the address are always taken to be zero, BTE *
+ * transfers are always cacheline-aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibda0_u {
+ u64 ii_ibda0_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:42;
+ u64 i_rsvd:15;
+ } ii_ibda0_fld_s;
+} ii_ibda0_u_t;
+
+/************************************************************************
+ * *
+ * Writing to this register sets up the attributes of the transfer *
+ * and initiates the transfer operation. Reading this register has *
+ * the side effect of terminating any transfer in progress. Note: *
+ * stopping a transfer midstream could have an adverse impact on the *
+ * other BTE. If a BTE stream has to be stopped (due to error *
+ * handling for example), both BTE streams should be stopped and *
+ * their transfers discarded. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibct0_u {
+ u64 ii_ibct0_regval;
+ struct {
+ u64 i_zerofill:1;
+ u64 i_rsvd_2:3;
+ u64 i_notify:1;
+ u64 i_rsvd_1:3;
+ u64 i_poison:1;
+ u64 i_rsvd:55;
+ } ii_ibct0_fld_s;
+} ii_ibct0_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the address to which the WINV is sent. *
+ * This address has to be cache line aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibna0_u {
+ u64 ii_ibna0_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:42;
+ u64 i_rsvd:15;
+ } ii_ibna0_fld_s;
+} ii_ibna0_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the programmable level as well as the node *
+ * ID and PI unit of the processor to which the interrupt will be *
+ * sent. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibia0_u {
+ u64 ii_ibia0_regval;
+ struct {
+ u64 i_rsvd_2:1;
+ u64 i_node_id:11;
+ u64 i_rsvd_1:4;
+ u64 i_level:7;
+ u64 i_rsvd:41;
+ } ii_ibia0_fld_s;
+} ii_ibia0_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register is used to set up the length for a *
+ * transfer and then to monitor the progress of that transfer. This *
+ * register needs to be initialized before a transfer is started. A *
+ * legitimate write to this register will set the Busy bit, clear the *
+ * Error bit, and initialize the length to the value desired. *
+ * While the transfer is in progress, hardware will decrement the *
+ * length field with each successful block that is copied. Once the *
+ * transfer completes, hardware will clear the Busy bit. The length *
+ * field will also contain the number of cache lines left to be *
+ * transferred. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibls1_u {
+ u64 ii_ibls1_regval;
+ struct {
+ u64 i_length:16;
+ u64 i_error:1;
+ u64 i_rsvd_1:3;
+ u64 i_busy:1;
+ u64 i_rsvd:43;
+ } ii_ibls1_fld_s;
+} ii_ibls1_u_t;
+
+/************************************************************************
+ * *
+ * This register should be loaded before a transfer is started. The *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
+ * address as described in Section 1.3, Figure2 and Figure3. Since *
+ * the bottom 7 bits of the address are always taken to be zero, BTE *
+ * transfers are always cacheline-aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibsa1_u {
+ u64 ii_ibsa1_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:33;
+ u64 i_rsvd:24;
+ } ii_ibsa1_fld_s;
+} ii_ibsa1_u_t;
+
+/************************************************************************
+ * *
+ * This register should be loaded before a transfer is started. The *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
+ * address as described in Section 1.3, Figure2 and Figure3. Since *
+ * the bottom 7 bits of the address are always taken to be zero, BTE *
+ * transfers are always cacheline-aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibda1_u {
+ u64 ii_ibda1_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:33;
+ u64 i_rsvd:24;
+ } ii_ibda1_fld_s;
+} ii_ibda1_u_t;
+
+/************************************************************************
+ * *
+ * Writing to this register sets up the attributes of the transfer *
+ * and initiates the transfer operation. Reading this register has *
+ * the side effect of terminating any transfer in progress. Note: *
+ * stopping a transfer midstream could have an adverse impact on the *
+ * other BTE. If a BTE stream has to be stopped (due to error *
+ * handling for example), both BTE streams should be stopped and *
+ * their transfers discarded. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibct1_u {
+ u64 ii_ibct1_regval;
+ struct {
+ u64 i_zerofill:1;
+ u64 i_rsvd_2:3;
+ u64 i_notify:1;
+ u64 i_rsvd_1:3;
+ u64 i_poison:1;
+ u64 i_rsvd:55;
+ } ii_ibct1_fld_s;
+} ii_ibct1_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the address to which the WINV is sent. *
+ * This address has to be cache line aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibna1_u {
+ u64 ii_ibna1_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:33;
+ u64 i_rsvd:24;
+ } ii_ibna1_fld_s;
+} ii_ibna1_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the programmable level as well as the node *
+ * ID and PI unit of the processor to which the interrupt will be *
+ * sent. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibia1_u {
+ u64 ii_ibia1_regval;
+ struct {
+ u64 i_pi_id:1;
+ u64 i_node_id:8;
+ u64 i_rsvd_1:7;
+ u64 i_level:7;
+ u64 i_rsvd:41;
+ } ii_ibia1_fld_s;
+} ii_ibia1_u_t;
+
+/************************************************************************
+ * *
+ * This register defines the resources that feed information into *
+ * the two performance counters located in the IO Performance *
+ * Profiling Register. There are 17 different quantities that can be *
+ * measured. Given these 17 different options, the two performance *
+ * counters have 15 of them in common; menu selections 0 through 0xE *
+ * are identical for each performance counter. As for the other two *
+ * options, one is available from one performance counter and the *
+ * other is available from the other performance counter. Hence, the *
+ * II supports all 17*16=272 possible combinations of quantities to *
+ * measure. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ipcr_u {
+ u64 ii_ipcr_regval;
+ struct {
+ u64 i_ippr0_c:4;
+ u64 i_ippr1_c:4;
+ u64 i_icct:8;
+ u64 i_rsvd:48;
+ } ii_ipcr_fld_s;
+} ii_ipcr_u_t;
+
+/************************************************************************
+ * *
+ * *
+ * *
+ ************************************************************************/
+
+typedef union ii_ippr_u {
+ u64 ii_ippr_regval;
+ struct {
+ u64 i_ippr0:32;
+ u64 i_ippr1:32;
+ } ii_ippr_fld_s;
+} ii_ippr_u_t;
+
+/************************************************************************
+ * *
+ * The following defines which were not formed into structures are *
+ * probably indentical to another register, and the name of the *
+ * register is provided against each of these registers. This *
+ * information needs to be checked carefully *
+ * *
+ * IIO_ICRB1_A IIO_ICRB0_A *
+ * IIO_ICRB1_B IIO_ICRB0_B *
+ * IIO_ICRB1_C IIO_ICRB0_C *
+ * IIO_ICRB1_D IIO_ICRB0_D *
+ * IIO_ICRB1_E IIO_ICRB0_E *
+ * IIO_ICRB2_A IIO_ICRB0_A *
+ * IIO_ICRB2_B IIO_ICRB0_B *
+ * IIO_ICRB2_C IIO_ICRB0_C *
+ * IIO_ICRB2_D IIO_ICRB0_D *
+ * IIO_ICRB2_E IIO_ICRB0_E *
+ * IIO_ICRB3_A IIO_ICRB0_A *
+ * IIO_ICRB3_B IIO_ICRB0_B *
+ * IIO_ICRB3_C IIO_ICRB0_C *
+ * IIO_ICRB3_D IIO_ICRB0_D *
+ * IIO_ICRB3_E IIO_ICRB0_E *
+ * IIO_ICRB4_A IIO_ICRB0_A *
+ * IIO_ICRB4_B IIO_ICRB0_B *
+ * IIO_ICRB4_C IIO_ICRB0_C *
+ * IIO_ICRB4_D IIO_ICRB0_D *
+ * IIO_ICRB4_E IIO_ICRB0_E *
+ * IIO_ICRB5_A IIO_ICRB0_A *
+ * IIO_ICRB5_B IIO_ICRB0_B *
+ * IIO_ICRB5_C IIO_ICRB0_C *
+ * IIO_ICRB5_D IIO_ICRB0_D *
+ * IIO_ICRB5_E IIO_ICRB0_E *
+ * IIO_ICRB6_A IIO_ICRB0_A *
+ * IIO_ICRB6_B IIO_ICRB0_B *
+ * IIO_ICRB6_C IIO_ICRB0_C *
+ * IIO_ICRB6_D IIO_ICRB0_D *
+ * IIO_ICRB6_E IIO_ICRB0_E *
+ * IIO_ICRB7_A IIO_ICRB0_A *
+ * IIO_ICRB7_B IIO_ICRB0_B *
+ * IIO_ICRB7_C IIO_ICRB0_C *
+ * IIO_ICRB7_D IIO_ICRB0_D *
+ * IIO_ICRB7_E IIO_ICRB0_E *
+ * IIO_ICRB8_A IIO_ICRB0_A *
+ * IIO_ICRB8_B IIO_ICRB0_B *
+ * IIO_ICRB8_C IIO_ICRB0_C *
+ * IIO_ICRB8_D IIO_ICRB0_D *
+ * IIO_ICRB8_E IIO_ICRB0_E *
+ * IIO_ICRB9_A IIO_ICRB0_A *
+ * IIO_ICRB9_B IIO_ICRB0_B *
+ * IIO_ICRB9_C IIO_ICRB0_C *
+ * IIO_ICRB9_D IIO_ICRB0_D *
+ * IIO_ICRB9_E IIO_ICRB0_E *
+ * IIO_ICRBA_A IIO_ICRB0_A *
+ * IIO_ICRBA_B IIO_ICRB0_B *
+ * IIO_ICRBA_C IIO_ICRB0_C *
+ * IIO_ICRBA_D IIO_ICRB0_D *
+ * IIO_ICRBA_E IIO_ICRB0_E *
+ * IIO_ICRBB_A IIO_ICRB0_A *
+ * IIO_ICRBB_B IIO_ICRB0_B *
+ * IIO_ICRBB_C IIO_ICRB0_C *
+ * IIO_ICRBB_D IIO_ICRB0_D *
+ * IIO_ICRBB_E IIO_ICRB0_E *
+ * IIO_ICRBC_A IIO_ICRB0_A *
+ * IIO_ICRBC_B IIO_ICRB0_B *
+ * IIO_ICRBC_C IIO_ICRB0_C *
+ * IIO_ICRBC_D IIO_ICRB0_D *
+ * IIO_ICRBC_E IIO_ICRB0_E *
+ * IIO_ICRBD_A IIO_ICRB0_A *
+ * IIO_ICRBD_B IIO_ICRB0_B *
+ * IIO_ICRBD_C IIO_ICRB0_C *
+ * IIO_ICRBD_D IIO_ICRB0_D *
+ * IIO_ICRBD_E IIO_ICRB0_E *
+ * IIO_ICRBE_A IIO_ICRB0_A *
+ * IIO_ICRBE_B IIO_ICRB0_B *
+ * IIO_ICRBE_C IIO_ICRB0_C *
+ * IIO_ICRBE_D IIO_ICRB0_D *
+ * IIO_ICRBE_E IIO_ICRB0_E *
+ * *
+ ************************************************************************/
+
+/*
+ * Slightly friendlier names for some common registers.
+ */
+#define IIO_WIDGET IIO_WID /* Widget identification */
+#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
+#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
+#define IIO_PROTECT IIO_ILAPR /* IO interface protection */
+#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */
+#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */
+#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */
+#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */
+#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */
+#define IIO_LLP_LOG IIO_ILLR /* LLP log */
+#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout */
+#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
+#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
+#define IIO_IGFX_0 IIO_IGFX0
+#define IIO_IGFX_1 IIO_IGFX1
+#define IIO_IBCT_0 IIO_IBCT0
+#define IIO_IBCT_1 IIO_IBCT1
+#define IIO_IBLS_0 IIO_IBLS0
+#define IIO_IBLS_1 IIO_IBLS1
+#define IIO_IBSA_0 IIO_IBSA0
+#define IIO_IBSA_1 IIO_IBSA1
+#define IIO_IBDA_0 IIO_IBDA0
+#define IIO_IBDA_1 IIO_IBDA1
+#define IIO_IBNA_0 IIO_IBNA0
+#define IIO_IBNA_1 IIO_IBNA1
+#define IIO_IBIA_0 IIO_IBIA0
+#define IIO_IBIA_1 IIO_IBIA1
+#define IIO_IOPRB_0 IIO_IPRB0
+
+#define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x)))
+#define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x)))
+#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
+#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */
+#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */
+
+#define IIO_NUM_IPRBS 9
+
+#define IIO_LLP_CSR_IS_UP 0x00002000
+#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
+#define IIO_LLP_CSR_LLP_STAT_SHFT 12
+
+#define IIO_LLP_CB_MAX 0xffff /* in ILLR CB_CNT, Max Check Bit errors */
+#define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */
+
+/* key to IIO_PROTECT_OVRRD */
+#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
+
+/* BTE register names */
+#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
+#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
+#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
+#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
+#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
+#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
+#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
+#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */
+
+/* BTE register offsets from base */
+#define BTEOFF_STAT 0
+#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
+#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
+#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
+#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
+#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
+
+/* names used in shub diags */
+#define IIO_BASE_BTE0 IIO_IBLS_0
+#define IIO_BASE_BTE1 IIO_IBLS_1
+
+/*
+ * Macro which takes the widget number, and returns the
+ * IO PRB address of that widget.
+ * value _x is expected to be a widget number in the range
+ * 0, 8 - 0xF
+ */
+#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
+ (_x) : \
+ (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
+
+/* GFX Flow Control Node/Widget Register */
+#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */
+#define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1)
+#define IIO_IGFX_W_NUM_SHIFT 0
+#define IIO_IGFX_PI_NUM_BITS 1 /* size of PI num field */
+#define IIO_IGFX_PI_NUM_MASK ((1<<IIO_IGFX_PI_NUM_BITS)-1)
+#define IIO_IGFX_PI_NUM_SHIFT 4
+#define IIO_IGFX_N_NUM_BITS 8 /* size of node num field */
+#define IIO_IGFX_N_NUM_MASK ((1<<IIO_IGFX_N_NUM_BITS)-1)
+#define IIO_IGFX_N_NUM_SHIFT 5
+#define IIO_IGFX_P_NUM_BITS 1 /* size of processor num field */
+#define IIO_IGFX_P_NUM_MASK ((1<<IIO_IGFX_P_NUM_BITS)-1)
+#define IIO_IGFX_P_NUM_SHIFT 16
+#define IIO_IGFX_INIT(widget, pi, node, cpu) (\
+ (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \
+ (((pi) & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)| \
+ (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
+ (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
+
+/* Scratch registers (all bits available) */
+#define IIO_SCRATCH_REG0 IIO_ISCR0
+#define IIO_SCRATCH_REG1 IIO_ISCR1
+#define IIO_SCRATCH_MASK 0xffffffffffffffffUL
+
+#define IIO_SCRATCH_BIT0_0 0x0000000000000001UL
+#define IIO_SCRATCH_BIT0_1 0x0000000000000002UL
+#define IIO_SCRATCH_BIT0_2 0x0000000000000004UL
+#define IIO_SCRATCH_BIT0_3 0x0000000000000008UL
+#define IIO_SCRATCH_BIT0_4 0x0000000000000010UL
+#define IIO_SCRATCH_BIT0_5 0x0000000000000020UL
+#define IIO_SCRATCH_BIT0_6 0x0000000000000040UL
+#define IIO_SCRATCH_BIT0_7 0x0000000000000080UL
+#define IIO_SCRATCH_BIT0_8 0x0000000000000100UL
+#define IIO_SCRATCH_BIT0_9 0x0000000000000200UL
+#define IIO_SCRATCH_BIT0_A 0x0000000000000400UL
+
+#define IIO_SCRATCH_BIT1_0 0x0000000000000001UL
+#define IIO_SCRATCH_BIT1_1 0x0000000000000002UL
+/* IO Translation Table Entries */
+#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */
+ /* Hw manuals number them 1..7! */
+/*
+ * IIO_IMEM Register fields.
+ */
+#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */
+
+/*
+ * As a permanent workaround for a bug in the PI side of the shub, we've
+ * redefined big window 7 as small window 0.
+ XXX does this still apply for SN1??
+ */
+#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
+
+#define ILCSR_WARM_RESET 0x100
+
+/*
+ * CRB manipulation macros
+ * The CRB macros are slightly complicated, since there are up to
+ * four registers associated with each CRB entry.
+ */
+#define IIO_NUM_CRBS 15 /* Number of CRBs */
+#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
+#define IIO_ICRB_OFFSET 8
+#define IIO_ICRB_0 IIO_ICRB0_A
+#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
+/* XXX - This is now tuneable:
+ #define IIO_FIRST_PC_ENTRY 12
+ */
+
+#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x))))
+#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET))
+#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET))
+#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET))
+#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET))
+
+#define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7)
+
+/*
+ * values for "ecode" field
+ */
+#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
+#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
+#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
+ * e.g. WINV to a Read only line. */
+#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
+#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
+#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
+#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
+#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
+
+/*
+ * Values for field imsgtype
+ */
+#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
+#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */
+#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
+
+/*
+ * values for field initiator.
+ */
+#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
+#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
+#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */
+#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
+#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
+
+/*
+ * Number of credits Hub widget has while sending req/response to
+ * xbow.
+ * Value of 3 is required by Xbow 1.1
+ * We may be able to increase this to 4 with Xbow 1.2.
+ */
+#define HUBII_XBOW_CREDIT 3
+#define HUBII_XBOW_REV2_CREDIT 4
+
+/*
+ * Number of credits that xtalk devices should use when communicating
+ * with a SHub (depth of SHub's queue).
+ */
+#define HUB_CREDIT 4
+
+/*
+ * Some IIO_PRB fields
+ */
+#define IIO_PRB_MULTI_ERR (1LL << 63)
+#define IIO_PRB_SPUR_RD (1LL << 51)
+#define IIO_PRB_SPUR_WR (1LL << 50)
+#define IIO_PRB_RD_TO (1LL << 49)
+#define IIO_PRB_ERROR (1LL << 48)
+
+/*************************************************************************
+
+ Some of the IIO field masks and shifts are defined here.
+ This is in order to maintain compatibility in SN0 and SN1 code
+
+**************************************************************************/
+
+/*
+ * ICMR register fields
+ * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not
+ * present in SHub)
+ */
+
+#define IIO_ICMR_CRB_VLD_SHFT 20
+#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
+
+#define IIO_ICMR_FC_CNT_SHFT 16
+#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT)
+
+#define IIO_ICMR_C_CNT_SHFT 4
+#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT)
+
+#define IIO_ICMR_PRECISE (1UL << 52)
+#define IIO_ICMR_CLR_RPPD (1UL << 13)
+#define IIO_ICMR_CLR_RQPD (1UL << 12)
+
+/*
+ * IIO PIO Deallocation register field masks : (IIO_IPDR)
+ XXX present but not needed in bedrock? See the manual.
+ */
+#define IIO_IPDR_PND (1 << 4)
+
+/*
+ * IIO CRB deallocation register field masks: (IIO_ICDR)
+ */
+#define IIO_ICDR_PND (1 << 4)
+
+/*
+ * IO BTE Length/Status (IIO_IBLS) register bit field definitions
+ */
+#define IBLS_BUSY (0x1UL << 20)
+#define IBLS_ERROR_SHFT 16
+#define IBLS_ERROR (0x1UL << IBLS_ERROR_SHFT)
+#define IBLS_LENGTH_MASK 0xffff
+
+/*
+ * IO BTE Control/Terminate register (IBCT) register bit field definitions
+ */
+#define IBCT_POISON (0x1UL << 8)
+#define IBCT_NOTIFY (0x1UL << 4)
+#define IBCT_ZFIL_MODE (0x1UL << 0)
+
+/*
+ * IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2)
+ */
+#define IIEPH1_VALID (1UL << 44)
+#define IIEPH1_OVERRUN (1UL << 40)
+#define IIEPH1_ERR_TYPE_SHFT 32
+#define IIEPH1_ERR_TYPE_MASK 0xf
+#define IIEPH1_SOURCE_SHFT 20
+#define IIEPH1_SOURCE_MASK 11
+#define IIEPH1_SUPPL_SHFT 8
+#define IIEPH1_SUPPL_MASK 11
+#define IIEPH1_CMD_SHFT 0
+#define IIEPH1_CMD_MASK 7
+
+#define IIEPH2_TAIL (1UL << 40)
+#define IIEPH2_ADDRESS_SHFT 0
+#define IIEPH2_ADDRESS_MASK 38
+
+#define IIEPH1_ERR_SHORT_REQ 2
+#define IIEPH1_ERR_SHORT_REPLY 3
+#define IIEPH1_ERR_LONG_REQ 4
+#define IIEPH1_ERR_LONG_REPLY 5
+
+/*
+ * IO Error Clear register bit field definitions
+ */
+#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */
+#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */
+#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */
+#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */
+#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */
+#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */
+#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */
+#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */
+#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */
+#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */
+#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */
+#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */
+#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */
+#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */
+#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */
+
+/*
+ * IIO CRB control register Fields: IIO_ICCR
+ */
+#define IIO_ICCR_PENDING 0x10000
+#define IIO_ICCR_CMD_MASK 0xFF
+#define IIO_ICCR_CMD_SHFT 7
+#define IIO_ICCR_CMD_NOP 0x0 /* No Op */
+#define IIO_ICCR_CMD_WAKE 0x100 /* Reactivate CRB entry and process */
+#define IIO_ICCR_CMD_TIMEOUT 0x200 /* Make CRB timeout & mark invalid */
+#define IIO_ICCR_CMD_EJECT 0x400 /* Contents of entry written to memory
+ * via a WB
+ */
+#define IIO_ICCR_CMD_FLUSH 0x800
+
+/*
+ *
+ * CRB Register description.
+ *
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ *
+ * Many of the fields in CRB are status bits used by hardware
+ * for implementation of the protocol. It's very dangerous to
+ * mess around with the CRB registers.
+ *
+ * It's OK to read the CRB registers and try to make sense out of the
+ * fields in CRB.
+ *
+ * Updating CRB requires all activities in Hub IIO to be quiesced.
+ * otherwise, a write to CRB could corrupt other CRB entries.
+ * CRBs are here only as a back door peek to shub IIO's status.
+ * Quiescing implies no dmas no PIOs
+ * either directly from the cpu or from sn0net.
+ * this is not something that can be done easily. So, AVOID updating
+ * CRBs.
+ */
+
+/*
+ * Easy access macros for CRBs, all 5 registers (A-E)
+ */
+typedef ii_icrb0_a_u_t icrba_t;
+#define a_sidn ii_icrb0_a_fld_s.ia_sidn
+#define a_tnum ii_icrb0_a_fld_s.ia_tnum
+#define a_addr ii_icrb0_a_fld_s.ia_addr
+#define a_valid ii_icrb0_a_fld_s.ia_vld
+#define a_iow ii_icrb0_a_fld_s.ia_iow
+#define a_regvalue ii_icrb0_a_regval
+
+typedef ii_icrb0_b_u_t icrbb_t;
+#define b_use_old ii_icrb0_b_fld_s.ib_use_old
+#define b_imsgtype ii_icrb0_b_fld_s.ib_imsgtype
+#define b_imsg ii_icrb0_b_fld_s.ib_imsg
+#define b_initiator ii_icrb0_b_fld_s.ib_init
+#define b_exc ii_icrb0_b_fld_s.ib_exc
+#define b_ackcnt ii_icrb0_b_fld_s.ib_ack_cnt
+#define b_resp ii_icrb0_b_fld_s.ib_resp
+#define b_ack ii_icrb0_b_fld_s.ib_ack
+#define b_hold ii_icrb0_b_fld_s.ib_hold
+#define b_wb ii_icrb0_b_fld_s.ib_wb
+#define b_intvn ii_icrb0_b_fld_s.ib_intvn
+#define b_stall_ib ii_icrb0_b_fld_s.ib_stall_ib
+#define b_stall_int ii_icrb0_b_fld_s.ib_stall__intr
+#define b_stall_bte_0 ii_icrb0_b_fld_s.ib_stall__bte_0
+#define b_stall_bte_1 ii_icrb0_b_fld_s.ib_stall__bte_1
+#define b_error ii_icrb0_b_fld_s.ib_error
+#define b_ecode ii_icrb0_b_fld_s.ib_errcode
+#define b_lnetuce ii_icrb0_b_fld_s.ib_ln_uce
+#define b_mark ii_icrb0_b_fld_s.ib_mark
+#define b_xerr ii_icrb0_b_fld_s.ib_xt_err
+#define b_regvalue ii_icrb0_b_regval
+
+typedef ii_icrb0_c_u_t icrbc_t;
+#define c_suppl ii_icrb0_c_fld_s.ic_suppl
+#define c_barrop ii_icrb0_c_fld_s.ic_bo
+#define c_doresp ii_icrb0_c_fld_s.ic_resprqd
+#define c_gbr ii_icrb0_c_fld_s.ic_gbr
+#define c_btenum ii_icrb0_c_fld_s.ic_bte_num
+#define c_cohtrans ii_icrb0_c_fld_s.ic_ct
+#define c_xtsize ii_icrb0_c_fld_s.ic_size
+#define c_source ii_icrb0_c_fld_s.ic_source
+#define c_regvalue ii_icrb0_c_regval
+
+typedef ii_icrb0_d_u_t icrbd_t;
+#define d_sleep ii_icrb0_d_fld_s.id_sleep
+#define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt
+#define d_pripsc ii_icrb0_d_fld_s.id_pr_psc
+#define d_bteop ii_icrb0_d_fld_s.id_bte_op
+#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
+#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
+#define d_regvalue ii_icrb0_d_regval
+
+typedef ii_icrb0_e_u_t icrbe_t;
+#define icrbe_ctxtvld ii_icrb0_e_fld_s.ie_cvld
+#define icrbe_toutvld ii_icrb0_e_fld_s.ie_tvld
+#define icrbe_context ii_icrb0_e_fld_s.ie_context
+#define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout
+#define e_regvalue ii_icrb0_e_regval
+
+/* Number of widgets supported by shub */
+#define HUB_NUM_WIDGET 9
+#define HUB_WIDGET_ID_MIN 0x8
+#define HUB_WIDGET_ID_MAX 0xf
+
+#define HUB_WIDGET_PART_NUM 0xc120
+#define MAX_HUBS_PER_XBOW 2
+
+/* A few more #defines for backwards compatibility */
+#define iprb_t ii_iprb0_u_t
+#define iprb_regval ii_iprb0_regval
+#define iprb_mult_err ii_iprb0_fld_s.i_mult_err
+#define iprb_spur_rd ii_iprb0_fld_s.i_spur_rd
+#define iprb_spur_wr ii_iprb0_fld_s.i_spur_wr
+#define iprb_rd_to ii_iprb0_fld_s.i_rd_to
+#define iprb_ovflow ii_iprb0_fld_s.i_of_cnt
+#define iprb_error ii_iprb0_fld_s.i_error
+#define iprb_ff ii_iprb0_fld_s.i_f
+#define iprb_mode ii_iprb0_fld_s.i_m
+#define iprb_bnakctr ii_iprb0_fld_s.i_nb
+#define iprb_anakctr ii_iprb0_fld_s.i_na
+#define iprb_xtalkctr ii_iprb0_fld_s.i_c
+
+#define LNK_STAT_WORKING 0x2 /* LLP is working */
+
+#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
+#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
+#define IIO_WSTAT_TXRETRY_MASK 0x7F /* should be 0xFF?? */
+#define IIO_WSTAT_TXRETRY_SHFT 16
+#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
+ IIO_WSTAT_TXRETRY_MASK)
+
+/* Number of II perf. counters we can multiplex at once */
+
+#define IO_PERF_SETS 32
+
+/* Bit for the widget in inbound access register */
+#define IIO_IIWA_WIDGET(_w) ((u64)(1ULL << _w))
+/* Bit for the widget in outbound access register */
+#define IIO_IOWA_WIDGET(_w) ((u64)(1ULL << _w))
+
+/* NOTE: The following define assumes that we are going to get
+ * widget numbers from 8 thru F and the device numbers within
+ * widget from 0 thru 7.
+ */
+#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((u64)(1ULL << (8 * ((w) - 8) + (d))))
+
+/* IO Interrupt Destination Register */
+#define IIO_IIDSR_SENT_SHIFT 28
+#define IIO_IIDSR_SENT_MASK 0x30000000
+#define IIO_IIDSR_ENB_SHIFT 24
+#define IIO_IIDSR_ENB_MASK 0x01000000
+#define IIO_IIDSR_NODE_SHIFT 9
+#define IIO_IIDSR_NODE_MASK 0x000ff700
+#define IIO_IIDSR_PI_ID_SHIFT 8
+#define IIO_IIDSR_PI_ID_MASK 0x00000100
+#define IIO_IIDSR_LVL_SHIFT 0
+#define IIO_IIDSR_LVL_MASK 0x000000ff
+
+/* Xtalk timeout threshhold register (IIO_IXTT) */
+#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
+#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
+#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
+#define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT)
+#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */
+#define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT)
+
+/*
+ * The IO LLP control status register and widget control register
+ */
+
+typedef union hubii_wcr_u {
+ u64 wcr_reg_value;
+ struct {
+ u64 wcr_widget_id:4, /* LLP crossbar credit */
+ wcr_tag_mode:1, /* Tag mode */
+ wcr_rsvd1:8, /* Reserved */
+ wcr_xbar_crd:3, /* LLP crossbar credit */
+ wcr_f_bad_pkt:1, /* Force bad llp pkt enable */
+ wcr_dir_con:1, /* widget direct connect */
+ wcr_e_thresh:5, /* elasticity threshold */
+ wcr_rsvd:41; /* unused */
+ } wcr_fields_s;
+} hubii_wcr_t;
+
+#define iwcr_dir_con wcr_fields_s.wcr_dir_con
+
+/* The structures below are defined to extract and modify the ii
+performance registers */
+
+/* io_perf_sel allows the caller to specify what tests will be
+ performed */
+
+typedef union io_perf_sel {
+ u64 perf_sel_reg;
+ struct {
+ u64 perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48;
+ } perf_sel_bits;
+} io_perf_sel_t;
+
+/* io_perf_cnt is to extract the count from the shub registers. Due to
+ hardware problems there is only one counter, not two. */
+
+typedef union io_perf_cnt {
+ u64 perf_cnt;
+ struct {
+ u64 perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32;
+ } perf_cnt_bits;
+
+} io_perf_cnt_t;
+
+typedef union iprte_a {
+ u64 entry;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_addr:38;
+ u64 i_init:3;
+ u64 i_source:8;
+ u64 i_rsvd:2;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } iprte_fields;
+} iprte_a_t;
+
+#endif /* _ASM_IA64_SN_SHUBIO_H */
diff --git a/arch/ia64/include/asm/sn/simulator.h b/arch/ia64/include/asm/sn/simulator.h
new file mode 100644
index 000000000000..c2611f6cfe33
--- /dev/null
+++ b/arch/ia64/include/asm/sn/simulator.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SIMULATOR_H
+#define _ASM_IA64_SN_SIMULATOR_H
+
+#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_SGI_UV)
+#define SNMAGIC 0xaeeeeeee8badbeefL
+#define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
+
+#define SIMULATOR_SLEEP() asm("nop.i 0x8beef")
+#define IS_RUNNING_ON_SIMULATOR() (sn_prom_type)
+#define IS_RUNNING_ON_FAKE_PROM() (sn_prom_type == 2)
+extern int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
+#else
+#define IS_MEDUSA() 0
+#define SIMULATOR_SLEEP()
+#define IS_RUNNING_ON_SIMULATOR() 0
+#endif
+
+#endif /* _ASM_IA64_SN_SIMULATOR_H */
diff --git a/arch/ia64/include/asm/sn/sn2/sn_hwperf.h b/arch/ia64/include/asm/sn/sn2/sn_hwperf.h
new file mode 100644
index 000000000000..e61ebac38cdd
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn2/sn_hwperf.h
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
+ *
+ * Data types used by the SN_SAL_HWPERF_OP SAL call for monitoring
+ * SGI Altix node and router hardware
+ *
+ * Mark Goodwin <markgw@sgi.com> Mon Aug 30 12:23:46 EST 2004
+ */
+
+#ifndef SN_HWPERF_H
+#define SN_HWPERF_H
+
+/*
+ * object structure. SN_HWPERF_ENUM_OBJECTS and SN_HWPERF_GET_CPU_INFO
+ * return an array of these. Do not change this without also
+ * changing the corresponding SAL code.
+ */
+#define SN_HWPERF_MAXSTRING 128
+struct sn_hwperf_object_info {
+ u32 id;
+ union {
+ struct {
+ u64 this_part:1;
+ u64 is_shared:1;
+ } fields;
+ struct {
+ u64 flags;
+ u64 reserved;
+ } b;
+ } f;
+ char name[SN_HWPERF_MAXSTRING];
+ char location[SN_HWPERF_MAXSTRING];
+ u32 ports;
+};
+
+#define sn_hwp_this_part f.fields.this_part
+#define sn_hwp_is_shared f.fields.is_shared
+#define sn_hwp_flags f.b.flags
+
+/* macros for object classification */
+#define SN_HWPERF_IS_NODE(x) ((x) && strstr((x)->name, "SHub"))
+#define SN_HWPERF_IS_NODE_SHUB2(x) ((x) && strstr((x)->name, "SHub 2."))
+#define SN_HWPERF_IS_IONODE(x) ((x) && strstr((x)->name, "TIO"))
+#define SN_HWPERF_IS_NL3ROUTER(x) ((x) && strstr((x)->name, "NL3Router"))
+#define SN_HWPERF_IS_NL4ROUTER(x) ((x) && strstr((x)->name, "NL4Router"))
+#define SN_HWPERF_IS_OLDROUTER(x) ((x) && strstr((x)->name, "Router"))
+#define SN_HWPERF_IS_ROUTER(x) (SN_HWPERF_IS_NL3ROUTER(x) || \
+ SN_HWPERF_IS_NL4ROUTER(x) || \
+ SN_HWPERF_IS_OLDROUTER(x))
+#define SN_HWPERF_FOREIGN(x) ((x) && !(x)->sn_hwp_this_part && !(x)->sn_hwp_is_shared)
+#define SN_HWPERF_SAME_OBJTYPE(x,y) ((SN_HWPERF_IS_NODE(x) && SN_HWPERF_IS_NODE(y)) ||\
+ (SN_HWPERF_IS_IONODE(x) && SN_HWPERF_IS_IONODE(y)) ||\
+ (SN_HWPERF_IS_ROUTER(x) && SN_HWPERF_IS_ROUTER(y)))
+
+/* numa port structure, SN_HWPERF_ENUM_PORTS returns an array of these */
+struct sn_hwperf_port_info {
+ u32 port;
+ u32 conn_id;
+ u32 conn_port;
+};
+
+/* for HWPERF_{GET,SET}_MMRS */
+struct sn_hwperf_data {
+ u64 addr;
+ u64 data;
+};
+
+/* user ioctl() argument, see below */
+struct sn_hwperf_ioctl_args {
+ u64 arg; /* argument, usually an object id */
+ u64 sz; /* size of transfer */
+ void *ptr; /* pointer to source/target */
+ u32 v0; /* second return value */
+};
+
+/*
+ * For SN_HWPERF_{GET,SET}_MMRS and SN_HWPERF_OBJECT_DISTANCE,
+ * sn_hwperf_ioctl_args.arg can be used to specify a CPU on which
+ * to call SAL, and whether to use an interprocessor interrupt
+ * or task migration in order to do so. If the CPU specified is
+ * SN_HWPERF_ARG_ANY_CPU, then the current CPU will be used.
+ */
+#define SN_HWPERF_ARG_ANY_CPU 0x7fffffffUL
+#define SN_HWPERF_ARG_CPU_MASK 0x7fffffff00000000ULL
+#define SN_HWPERF_ARG_USE_IPI_MASK 0x8000000000000000ULL
+#define SN_HWPERF_ARG_OBJID_MASK 0x00000000ffffffffULL
+
+/*
+ * ioctl requests on the "sn_hwperf" misc device that call SAL.
+ */
+#define SN_HWPERF_OP_MEM_COPYIN 0x1000
+#define SN_HWPERF_OP_MEM_COPYOUT 0x2000
+#define SN_HWPERF_OP_MASK 0x0fff
+
+/*
+ * Determine mem requirement.
+ * arg don't care
+ * sz 8
+ * p pointer to u64 integer
+ */
+#define SN_HWPERF_GET_HEAPSIZE 1
+
+/*
+ * Install mem for SAL drvr
+ * arg don't care
+ * sz sizeof buffer pointed to by p
+ * p pointer to buffer for scratch area
+ */
+#define SN_HWPERF_INSTALL_HEAP 2
+
+/*
+ * Determine number of objects
+ * arg don't care
+ * sz 8
+ * p pointer to u64 integer
+ */
+#define SN_HWPERF_OBJECT_COUNT (10|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Determine object "distance", relative to a cpu. This operation can
+ * execute on a designated logical cpu number, using either an IPI or
+ * via task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
+ * the current CPU is used. See the SN_HWPERF_ARG_* macros above.
+ *
+ * arg bitmap of IPI flag, cpu number and object id
+ * sz 8
+ * p pointer to u64 integer
+ */
+#define SN_HWPERF_OBJECT_DISTANCE (11|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Enumerate objects. Special case if sz == 8, returns the required
+ * buffer size.
+ * arg don't care
+ * sz sizeof buffer pointed to by p
+ * p pointer to array of struct sn_hwperf_object_info
+ */
+#define SN_HWPERF_ENUM_OBJECTS (12|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Enumerate NumaLink ports for an object. Special case if sz == 8,
+ * returns the required buffer size.
+ * arg object id
+ * sz sizeof buffer pointed to by p
+ * p pointer to array of struct sn_hwperf_port_info
+ */
+#define SN_HWPERF_ENUM_PORTS (13|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * SET/GET memory mapped registers. These operations can execute
+ * on a designated logical cpu number, using either an IPI or via
+ * task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
+ * the current CPU is used. See the SN_HWPERF_ARG_* macros above.
+ *
+ * arg bitmap of ipi flag, cpu number and object id
+ * sz sizeof buffer pointed to by p
+ * p pointer to array of struct sn_hwperf_data
+ */
+#define SN_HWPERF_SET_MMRS (14|SN_HWPERF_OP_MEM_COPYIN)
+#define SN_HWPERF_GET_MMRS (15|SN_HWPERF_OP_MEM_COPYOUT| \
+ SN_HWPERF_OP_MEM_COPYIN)
+/*
+ * Lock a shared object
+ * arg object id
+ * sz don't care
+ * p don't care
+ */
+#define SN_HWPERF_ACQUIRE 16
+
+/*
+ * Unlock a shared object
+ * arg object id
+ * sz don't care
+ * p don't care
+ */
+#define SN_HWPERF_RELEASE 17
+
+/*
+ * Break a lock on a shared object
+ * arg object id
+ * sz don't care
+ * p don't care
+ */
+#define SN_HWPERF_FORCE_RELEASE 18
+
+/*
+ * ioctl requests on "sn_hwperf" that do not call SAL
+ */
+
+/*
+ * get cpu info as an array of hwperf_object_info_t.
+ * id is logical CPU number, name is description, location
+ * is geoid (e.g. 001c04#1c). Special case if sz == 8,
+ * returns the required buffer size.
+ *
+ * arg don't care
+ * sz sizeof buffer pointed to by p
+ * p pointer to array of struct sn_hwperf_object_info
+ */
+#define SN_HWPERF_GET_CPU_INFO (100|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Given an object id, return it's node number (aka cnode).
+ * arg object id
+ * sz 8
+ * p pointer to u64 integer
+ */
+#define SN_HWPERF_GET_OBJ_NODE (101|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Given a node number (cnode), return it's nasid.
+ * arg ordinal node number (aka cnodeid)
+ * sz 8
+ * p pointer to u64 integer
+ */
+#define SN_HWPERF_GET_NODE_NASID (102|SN_HWPERF_OP_MEM_COPYOUT)
+
+/*
+ * Given a node id, determine the id of the nearest node with CPUs
+ * and the id of the nearest node that has memory. The argument
+ * node would normally be a "headless" node, e.g. an "IO node".
+ * Return 0 on success.
+ */
+extern int sn_hwperf_get_nearest_node(cnodeid_t node,
+ cnodeid_t *near_mem, cnodeid_t *near_cpu);
+
+/* return codes */
+#define SN_HWPERF_OP_OK 0
+#define SN_HWPERF_OP_NOMEM 1
+#define SN_HWPERF_OP_NO_PERM 2
+#define SN_HWPERF_OP_IO_ERROR 3
+#define SN_HWPERF_OP_BUSY 4
+#define SN_HWPERF_OP_RECONFIGURE 253
+#define SN_HWPERF_OP_INVAL 254
+
+int sn_topology_open(struct inode *inode, struct file *file);
+int sn_topology_release(struct inode *inode, struct file *file);
+#endif /* SN_HWPERF_H */
diff --git a/arch/ia64/include/asm/sn/sn_cpuid.h b/arch/ia64/include/asm/sn/sn_cpuid.h
new file mode 100644
index 000000000000..a676dd9ace3e
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn_cpuid.h
@@ -0,0 +1,132 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+#ifndef _ASM_IA64_SN_SN_CPUID_H
+#define _ASM_IA64_SN_SN_CPUID_H
+
+#include <linux/smp.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/intrinsics.h>
+
+
+/*
+ * Functions for converting between cpuids, nodeids and NASIDs.
+ *
+ * These are for SGI platforms only.
+ *
+ */
+
+
+
+
+/*
+ * Definitions of terms (these definitions are for IA64 ONLY. Other architectures
+ * use cpuid/cpunum quite defferently):
+ *
+ * CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies
+ * the cpu. The value cpuid has no significance on IA64 other than
+ * the boot cpu is 0.
+ * smp_processor_id() returns the cpuid of the current cpu.
+ *
+ * CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID)
+ * This is the same as 31:24 of the processor LID register
+ * hard_smp_processor_id()- cpu_physical_id of current processor
+ * cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid>
+ * cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid>
+ * * not real efficient - don't use in perf critical code
+ *
+ * SLICE - a number in the range of 0 - 3 (typically) that represents the
+ * cpu number on a brick.
+ *
+ * SUBNODE - (almost obsolete) the number of the FSB that a cpu is
+ * connected to. This is also the same as the PI number. Usually 0 or 1.
+ *
+ * NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no
+ * significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM.
+ *
+ *
+ * The macros convert between cpu physical ids & slice/nasid/cnodeid.
+ * These terms are described below:
+ *
+ *
+ * Brick
+ * ----- ----- ----- ----- CPU
+ * | 0 | | 1 | | 0 | | 1 | SLICE
+ * ----- ----- ----- -----
+ * | | | |
+ * | | | |
+ * 0 | | 2 0 | | 2 FSB SLOT
+ * ------- -------
+ * | |
+ * | |
+ * | |
+ * ------------ -------------
+ * | | | |
+ * | SHUB | | SHUB | NASID (0..MAX_NASIDS)
+ * | |----- | | CNODEID (0..num_compact_nodes-1)
+ * | | | |
+ * | | | |
+ * ------------ -------------
+ * | |
+ *
+ *
+ */
+
+#define get_node_number(addr) NASID_GET(addr)
+
+/*
+ * NOTE: on non-MP systems, only cpuid 0 exists
+ */
+
+extern short physical_node_map[]; /* indexed by nasid to get cnode */
+
+/*
+ * Macros for retrieving info about current cpu
+ */
+#define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
+#define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
+#define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
+#define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
+#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
+
+/*
+ * Macros for retrieving info about an arbitrary cpu
+ * cpuid - logical cpu id
+ */
+#define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid)
+#define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode)
+#define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
+
+
+/*
+ * Dont use the following in performance critical code. They require scans
+ * of potentially large tables.
+ */
+extern int nasid_slice_to_cpuid(int, int);
+
+/*
+ * cnodeid_to_nasid - convert a cnodeid to a NASID
+ */
+#define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid])
+
+/*
+ * nasid_to_cnodeid - convert a NASID to a cnodeid
+ */
+#define nasid_to_cnodeid(nasid) (physical_node_map[nasid])
+
+/*
+ * partition_coherence_id - get the coherence ID of the current partition
+ */
+extern u8 sn_coherency_id;
+#define partition_coherence_id() (sn_coherency_id)
+
+#endif /* _ASM_IA64_SN_SN_CPUID_H */
+
diff --git a/arch/ia64/include/asm/sn/sn_feature_sets.h b/arch/ia64/include/asm/sn/sn_feature_sets.h
new file mode 100644
index 000000000000..8e83ac117ace
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn_feature_sets.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_IA64_SN_FEATURE_SETS_H
+#define _ASM_IA64_SN_FEATURE_SETS_H
+
+/*
+ * SN PROM Features
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+/* --------------------- PROM Features -----------------------------*/
+extern int sn_prom_feature_available(int id);
+
+#define MAX_PROM_FEATURE_SETS 2
+
+/*
+ * The following defines features that may or may not be supported by the
+ * current PROM. The OS uses sn_prom_feature_available(feature) to test for
+ * the presence of a PROM feature. Down rev (old) PROMs will always test
+ * "false" for new features.
+ *
+ * Use:
+ * if (sn_prom_feature_available(PRF_XXX))
+ * ...
+ */
+
+#define PRF_PAL_CACHE_FLUSH_SAFE 0
+#define PRF_DEVICE_FLUSH_LIST 1
+#define PRF_HOTPLUG_SUPPORT 2
+#define PRF_CPU_DISABLE_SUPPORT 3
+
+/* --------------------- OS Features -------------------------------*/
+
+/*
+ * The following defines OS features that are optionally present in
+ * the operating system.
+ * During boot, PROM is notified of these features via a series of calls:
+ *
+ * ia64_sn_set_os_feature(feature1);
+ *
+ * Once enabled, a feature cannot be disabled.
+ *
+ * By default, features are disabled unless explicitly enabled.
+ *
+ * These defines must be kept in sync with the corresponding
+ * PROM definitions in feature_sets.h.
+ */
+#define OSF_MCA_SLV_TO_OS_INIT_SLV 0
+#define OSF_FEAT_LOG_SBES 1
+#define OSF_ACPI_ENABLE 2
+#define OSF_PCISEGMENT_ENABLE 3
+
+
+#endif /* _ASM_IA64_SN_FEATURE_SETS_H */
diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h
new file mode 100644
index 000000000000..57e649d388b8
--- /dev/null
+++ b/arch/ia64/include/asm/sn/sn_sal.h
@@ -0,0 +1,1188 @@
+#ifndef _ASM_IA64_SN_SN_SAL_H
+#define _ASM_IA64_SN_SN_SAL_H
+
+/*
+ * System Abstraction Layer definitions for IA64
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+#include <asm/sal.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/shub_mmr.h>
+
+// SGI Specific Calls
+#define SN_SAL_POD_MODE 0x02000001
+#define SN_SAL_SYSTEM_RESET 0x02000002
+#define SN_SAL_PROBE 0x02000003
+#define SN_SAL_GET_MASTER_NASID 0x02000004
+#define SN_SAL_GET_KLCONFIG_ADDR 0x02000005
+#define SN_SAL_LOG_CE 0x02000006
+#define SN_SAL_REGISTER_CE 0x02000007
+#define SN_SAL_GET_PARTITION_ADDR 0x02000009
+#define SN_SAL_XP_ADDR_REGION 0x0200000f
+#define SN_SAL_NO_FAULT_ZONE_VIRTUAL 0x02000010
+#define SN_SAL_NO_FAULT_ZONE_PHYSICAL 0x02000011
+#define SN_SAL_PRINT_ERROR 0x02000012
+#define SN_SAL_REGISTER_PMI_HANDLER 0x02000014
+#define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant
+#define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant
+#define SN_SAL_GET_SAPIC_INFO 0x0200001d
+#define SN_SAL_GET_SN_INFO 0x0200001e
+#define SN_SAL_CONSOLE_PUTC 0x02000021
+#define SN_SAL_CONSOLE_GETC 0x02000022
+#define SN_SAL_CONSOLE_PUTS 0x02000023
+#define SN_SAL_CONSOLE_GETS 0x02000024
+#define SN_SAL_CONSOLE_GETS_TIMEOUT 0x02000025
+#define SN_SAL_CONSOLE_POLL 0x02000026
+#define SN_SAL_CONSOLE_INTR 0x02000027
+#define SN_SAL_CONSOLE_PUTB 0x02000028
+#define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a
+#define SN_SAL_CONSOLE_READC 0x0200002b
+#define SN_SAL_SYSCTL_OP 0x02000030
+#define SN_SAL_SYSCTL_MODID_GET 0x02000031
+#define SN_SAL_SYSCTL_GET 0x02000032
+#define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033
+#define SN_SAL_SYSCTL_IO_PORTSPEED_GET 0x02000035
+#define SN_SAL_SYSCTL_SLAB_GET 0x02000036
+#define SN_SAL_BUS_CONFIG 0x02000037
+#define SN_SAL_SYS_SERIAL_GET 0x02000038
+#define SN_SAL_PARTITION_SERIAL_GET 0x02000039
+#define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a
+#define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b
+#define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c
+#define SN_SAL_COHERENCE 0x0200003d
+#define SN_SAL_MEMPROTECT 0x0200003e
+#define SN_SAL_SYSCTL_FRU_CAPTURE 0x0200003f
+
+#define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant
+#define SN_SAL_IROUTER_OP 0x02000043
+#define SN_SAL_SYSCTL_EVENT 0x02000044
+#define SN_SAL_IOIF_INTERRUPT 0x0200004a
+#define SN_SAL_HWPERF_OP 0x02000050 // lock
+#define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051
+#define SN_SAL_IOIF_PCI_SAFE 0x02000052
+#define SN_SAL_IOIF_SLOT_ENABLE 0x02000053
+#define SN_SAL_IOIF_SLOT_DISABLE 0x02000054
+#define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055
+#define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056
+#define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057
+#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 // deprecated
+#define SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST 0x0200005a
+
+#define SN_SAL_IOIF_INIT 0x0200005f
+#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
+#define SN_SAL_BTE_RECOVER 0x02000061
+#define SN_SAL_RESERVED_DO_NOT_USE 0x02000062
+#define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064
+
+#define SN_SAL_GET_PROM_FEATURE_SET 0x02000065
+#define SN_SAL_SET_OS_FEATURE_SET 0x02000066
+#define SN_SAL_INJECT_ERROR 0x02000067
+#define SN_SAL_SET_CPU_NUMBER 0x02000068
+
+#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
+
+/*
+ * Service-specific constants
+ */
+
+/* Console interrupt manipulation */
+ /* action codes */
+#define SAL_CONSOLE_INTR_OFF 0 /* turn the interrupt off */
+#define SAL_CONSOLE_INTR_ON 1 /* turn the interrupt on */
+#define SAL_CONSOLE_INTR_STATUS 2 /* retrieve the interrupt status */
+ /* interrupt specification & status return codes */
+#define SAL_CONSOLE_INTR_XMIT 1 /* output interrupt */
+#define SAL_CONSOLE_INTR_RECV 2 /* input interrupt */
+
+/* interrupt handling */
+#define SAL_INTR_ALLOC 1
+#define SAL_INTR_FREE 2
+#define SAL_INTR_REDIRECT 3
+
+/*
+ * operations available on the generic SN_SAL_SYSCTL_OP
+ * runtime service
+ */
+#define SAL_SYSCTL_OP_IOBOARD 0x0001 /* retrieve board type */
+#define SAL_SYSCTL_OP_TIO_JLCK_RST 0x0002 /* issue TIO clock reset */
+
+/*
+ * IRouter (i.e. generalized system controller) operations
+ */
+#define SAL_IROUTER_OPEN 0 /* open a subchannel */
+#define SAL_IROUTER_CLOSE 1 /* close a subchannel */
+#define SAL_IROUTER_SEND 2 /* send part of an IRouter packet */
+#define SAL_IROUTER_RECV 3 /* receive part of an IRouter packet */
+#define SAL_IROUTER_INTR_STATUS 4 /* check the interrupt status for
+ * an open subchannel
+ */
+#define SAL_IROUTER_INTR_ON 5 /* enable an interrupt */
+#define SAL_IROUTER_INTR_OFF 6 /* disable an interrupt */
+#define SAL_IROUTER_INIT 7 /* initialize IRouter driver */
+
+/* IRouter interrupt mask bits */
+#define SAL_IROUTER_INTR_XMIT SAL_CONSOLE_INTR_XMIT
+#define SAL_IROUTER_INTR_RECV SAL_CONSOLE_INTR_RECV
+
+/*
+ * Error Handling Features
+ */
+#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete
+#define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete
+#define SAL_ERR_FEAT_MFR_OVERRIDE 0x4
+#define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000
+
+/*
+ * SAL Error Codes
+ */
+#define SALRET_MORE_PASSES 1
+#define SALRET_OK 0
+#define SALRET_NOT_IMPLEMENTED (-1)
+#define SALRET_INVALID_ARG (-2)
+#define SALRET_ERROR (-3)
+
+#define SN_SAL_FAKE_PROM 0x02009999
+
+/**
+ * sn_sal_revision - get the SGI SAL revision number
+ *
+ * The SGI PROM stores its version in the sal_[ab]_rev_(major|minor).
+ * This routine simply extracts the major and minor values and
+ * presents them in a u32 format.
+ *
+ * For example, version 4.05 would be represented at 0x0405.
+ */
+static inline u32
+sn_sal_rev(void)
+{
+ struct ia64_sal_systab *systab = __va(efi.sal_systab);
+
+ return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor);
+}
+
+/*
+ * Returns the master console nasid, if the call fails, return an illegal
+ * value.
+ */
+static inline u64
+ia64_sn_get_console_nasid(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
+
+ if (ret_stuff.status < 0)
+ return ret_stuff.status;
+
+ /* Master console nasid is in 'v0' */
+ return ret_stuff.v0;
+}
+
+/*
+ * Returns the master baseio nasid, if the call fails, return an illegal
+ * value.
+ */
+static inline u64
+ia64_sn_get_master_baseio_nasid(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 0);
+
+ if (ret_stuff.status < 0)
+ return ret_stuff.status;
+
+ /* Master baseio nasid is in 'v0' */
+ return ret_stuff.v0;
+}
+
+static inline void *
+ia64_sn_get_klconfig_addr(nasid_t nasid)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0);
+ return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
+}
+
+/*
+ * Returns the next console character.
+ */
+static inline u64
+ia64_sn_console_getc(int *ch)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
+
+ /* character is in 'v0' */
+ *ch = (int)ret_stuff.v0;
+
+ return ret_stuff.status;
+}
+
+/*
+ * Read a character from the SAL console device, after a previous interrupt
+ * or poll operation has given us to know that a character is available
+ * to be read.
+ */
+static inline u64
+ia64_sn_console_readc(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
+
+ /* character is in 'v0' */
+ return ret_stuff.v0;
+}
+
+/*
+ * Sends the given character to the console.
+ */
+static inline u64
+ia64_sn_console_putc(char ch)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (u64)ch, 0, 0, 0, 0, 0, 0);
+
+ return ret_stuff.status;
+}
+
+/*
+ * Sends the given buffer to the console.
+ */
+static inline u64
+ia64_sn_console_putb(const char *buf, int len)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (u64)buf, (u64)len, 0, 0, 0, 0, 0);
+
+ if ( ret_stuff.status == 0 ) {
+ return ret_stuff.v0;
+ }
+ return (u64)0;
+}
+
+/*
+ * Print a platform error record
+ */
+static inline u64
+ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (u64)hook, (u64)rec, 0, 0, 0, 0, 0);
+
+ return ret_stuff.status;
+}
+
+/*
+ * Check for Platform errors
+ */
+static inline u64
+ia64_sn_plat_cpei_handler(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
+
+ return ret_stuff.status;
+}
+
+/*
+ * Set Error Handling Features (Obsolete)
+ */
+static inline u64
+ia64_sn_plat_set_error_handling_features(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_REENTRANT(ret_stuff, SN_SAL_SET_ERROR_HANDLING_FEATURES,
+ SAL_ERR_FEAT_LOG_SBES,
+ 0, 0, 0, 0, 0, 0);
+
+ return ret_stuff.status;
+}
+
+/*
+ * Checks for console input.
+ */
+static inline u64
+ia64_sn_console_check(int *result)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
+
+ /* result is in 'v0' */
+ *result = (int)ret_stuff.v0;
+
+ return ret_stuff.status;
+}
+
+/*
+ * Checks console interrupt status
+ */
+static inline u64
+ia64_sn_console_intr_status(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
+ 0, SAL_CONSOLE_INTR_STATUS,
+ 0, 0, 0, 0, 0);
+
+ if (ret_stuff.status == 0) {
+ return ret_stuff.v0;
+ }
+
+ return 0;
+}
+
+/*
+ * Enable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_enable(u64 intr)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
+ intr, SAL_CONSOLE_INTR_ON,
+ 0, 0, 0, 0, 0);
+}
+
+/*
+ * Disable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_disable(u64 intr)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
+ intr, SAL_CONSOLE_INTR_OFF,
+ 0, 0, 0, 0, 0);
+}
+
+/*
+ * Sends a character buffer to the console asynchronously.
+ */
+static inline u64
+ia64_sn_console_xmit_chars(char *buf, int len)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
+ (u64)buf, (u64)len,
+ 0, 0, 0, 0, 0);
+
+ if (ret_stuff.status == 0) {
+ return ret_stuff.v0;
+ }
+
+ return 0;
+}
+
+/*
+ * Returns the iobrick module Id
+ */
+static inline u64
+ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0);
+
+ /* result is in 'v0' */
+ *result = (int)ret_stuff.v0;
+
+ return ret_stuff.status;
+}
+
+/**
+ * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
+ *
+ * SN_SAL_POD_MODE actually takes an argument, but it's always
+ * 0 when we call it from the kernel, so we don't have to expose
+ * it to the caller.
+ */
+static inline u64
+ia64_sn_pod_mode(void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
+ if (isrv.status)
+ return 0;
+ return isrv.v0;
+}
+
+/**
+ * ia64_sn_probe_mem - read from memory safely
+ * @addr: address to probe
+ * @size: number bytes to read (1,2,4,8)
+ * @data_ptr: address to store value read by probe (-1 returned if probe fails)
+ *
+ * Call into the SAL to do a memory read. If the read generates a machine
+ * check, this routine will recover gracefully and return -1 to the caller.
+ * @addr is usually a kernel virtual address in uncached space (i.e. the
+ * address starts with 0xc), but if called in physical mode, @addr should
+ * be a physical address.
+ *
+ * Return values:
+ * 0 - probe successful
+ * 1 - probe failed (generated MCA)
+ * 2 - Bad arg
+ * <0 - PAL error
+ */
+static inline u64
+ia64_sn_probe_mem(long addr, long size, void *data_ptr)
+{
+ struct ia64_sal_retval isrv;
+
+ SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
+
+ if (data_ptr) {
+ switch (size) {
+ case 1:
+ *((u8*)data_ptr) = (u8)isrv.v0;
+ break;
+ case 2:
+ *((u16*)data_ptr) = (u16)isrv.v0;
+ break;
+ case 4:
+ *((u32*)data_ptr) = (u32)isrv.v0;
+ break;
+ case 8:
+ *((u64*)data_ptr) = (u64)isrv.v0;
+ break;
+ default:
+ isrv.status = 2;
+ }
+ }
+ return isrv.status;
+}
+
+/*
+ * Retrieve the system serial number as an ASCII string.
+ */
+static inline u64
+ia64_sn_sys_serial_get(char *buf)
+{
+ struct ia64_sal_retval ret_stuff;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+extern char sn_system_serial_number_string[];
+extern u64 sn_partition_serial_number;
+
+static inline char *
+sn_system_serial_number(void) {
+ if (sn_system_serial_number_string[0]) {
+ return(sn_system_serial_number_string);
+ } else {
+ ia64_sn_sys_serial_get(sn_system_serial_number_string);
+ return(sn_system_serial_number_string);
+ }
+}
+
+
+/*
+ * Returns a unique id number for this system and partition (suitable for
+ * use with license managers), based in part on the system serial number.
+ */
+static inline u64
+ia64_sn_partition_serial_get(void)
+{
+ struct ia64_sal_retval ret_stuff;
+ ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0,
+ 0, 0, 0, 0, 0, 0);
+ if (ret_stuff.status != 0)
+ return 0;
+ return ret_stuff.v0;
+}
+
+static inline u64
+sn_partition_serial_number_val(void) {
+ if (unlikely(sn_partition_serial_number == 0)) {
+ sn_partition_serial_number = ia64_sn_partition_serial_get();
+ }
+ return sn_partition_serial_number;
+}
+
+/*
+ * Returns the partition id of the nasid passed in as an argument,
+ * or INVALID_PARTID if the partition id cannot be retrieved.
+ */
+static inline partid_t
+ia64_sn_sysctl_partition_get(nasid_t nasid)
+{
+ struct ia64_sal_retval ret_stuff;
+ SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
+ 0, 0, 0, 0, 0, 0);
+ if (ret_stuff.status != 0)
+ return -1;
+ return ((partid_t)ret_stuff.v0);
+}
+
+/*
+ * Returns the physical address of the partition's reserved page through
+ * an iterative number of calls.
+ *
+ * On first call, 'cookie' and 'len' should be set to 0, and 'addr'
+ * set to the nasid of the partition whose reserved page's address is
+ * being sought.
+ * On subsequent calls, pass the values, that were passed back on the
+ * previous call.
+ *
+ * While the return status equals SALRET_MORE_PASSES, keep calling
+ * this function after first copying 'len' bytes starting at 'addr'
+ * into 'buf'. Once the return status equals SALRET_OK, 'addr' will
+ * be the physical address of the partition's reserved page. If the
+ * return status equals neither of these, an error as occurred.
+ */
+static inline s64
+sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
+{
+ struct ia64_sal_retval rv;
+ ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie,
+ *addr, buf, *len, 0, 0, 0);
+ *cookie = rv.v0;
+ *addr = rv.v1;
+ *len = rv.v2;
+ return rv.status;
+}
+
+/*
+ * Register or unregister a physical address range being referenced across
+ * a partition boundary for which certain SAL errors should be scanned for,
+ * cleaned up and ignored. This is of value for kernel partitioning code only.
+ * Values for the operation argument:
+ * 1 = register this address range with SAL
+ * 0 = unregister this address range with SAL
+ *
+ * SAL maintains a reference count on an address range in case it is registered
+ * multiple times.
+ *
+ * On success, returns the reference count of the address range after the SAL
+ * call has performed the current registration/unregistration. Returns a
+ * negative value if an error occurred.
+ */
+static inline int
+sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
+{
+ struct ia64_sal_retval ret_stuff;
+ ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len,
+ (u64)operation, 0, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+/*
+ * Register or unregister an instruction range for which SAL errors should
+ * be ignored. If an error occurs while in the registered range, SAL jumps
+ * to return_addr after ignoring the error. Values for the operation argument:
+ * 1 = register this instruction range with SAL
+ * 0 = unregister this instruction range with SAL
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
+ int virtual, int operation)
+{
+ struct ia64_sal_retval ret_stuff;
+ u64 call;
+ if (virtual) {
+ call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
+ } else {
+ call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
+ }
+ ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr,
+ (u64)1, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+/*
+ * Register or unregister a function to handle a PMI received by a CPU.
+ * Before calling the registered handler, SAL sets r1 to the value that
+ * was passed in as the global_pointer.
+ *
+ * If the handler pointer is NULL, then the currently registered handler
+ * will be unregistered.
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_register_pmi_handler(u64 handler, u64 global_pointer)
+{
+ struct ia64_sal_retval ret_stuff;
+ ia64_sal_oemcall(&ret_stuff, SN_SAL_REGISTER_PMI_HANDLER, handler,
+ global_pointer, 0, 0, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+/*
+ * Change or query the coherence domain for this partition. Each cpu-based
+ * nasid is represented by a bit in an array of 64-bit words:
+ * 0 = not in this partition's coherency domain
+ * 1 = in this partition's coherency domain
+ *
+ * It is not possible for the local system's nasids to be removed from
+ * the coherency domain. Purpose of the domain arguments:
+ * new_domain = set the coherence domain to the given nasids
+ * old_domain = return the current coherence domain
+ *
+ * Returns 0 on success, or a negative value if an error occurred.
+ */
+static inline int
+sn_change_coherence(u64 *new_domain, u64 *old_domain)
+{
+ struct ia64_sal_retval ret_stuff;
+ ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain,
+ (u64)old_domain, 0, 0, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+/*
+ * Change memory access protections for a physical address range.
+ * nasid_array is not used on Altix, but may be in future architectures.
+ * Available memory protection access classes are defined after the function.
+ */
+static inline int
+sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len,
+ (u64)nasid_array, perms, 0, 0, 0);
+ return ret_stuff.status;
+}
+#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080
+#define SN_MEMPROT_ACCESS_CLASS_1 0x2520c2
+#define SN_MEMPROT_ACCESS_CLASS_2 0x14a1ca
+#define SN_MEMPROT_ACCESS_CLASS_3 0x14a290
+#define SN_MEMPROT_ACCESS_CLASS_6 0x084080
+#define SN_MEMPROT_ACCESS_CLASS_7 0x021080
+
+/*
+ * Turns off system power.
+ */
+static inline void
+ia64_sn_power_down(void)
+{
+ struct ia64_sal_retval ret_stuff;
+ SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
+ while(1)
+ cpu_relax();
+ /* never returns */
+}
+
+/**
+ * ia64_sn_fru_capture - tell the system controller to capture hw state
+ *
+ * This routine will call the SAL which will tell the system controller(s)
+ * to capture hw mmr information from each SHub in the system.
+ */
+static inline u64
+ia64_sn_fru_capture(void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
+ if (isrv.status)
+ return 0;
+ return isrv.v0;
+}
+
+/*
+ * Performs an operation on a PCI bus or slot -- power up, power down
+ * or reset.
+ */
+static inline u64
+ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type,
+ u64 bus, char slot,
+ u64 action)
+{
+ struct ia64_sal_retval rv = {0, 0, 0, 0};
+
+ SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, action,
+ bus, (u64) slot, 0, 0);
+ if (rv.status)
+ return rv.v0;
+ return 0;
+}
+
+
+/*
+ * Open a subchannel for sending arbitrary data to the system
+ * controller network via the system controller device associated with
+ * 'nasid'. Return the subchannel number or a negative error code.
+ */
+static inline int
+ia64_sn_irtr_open(nasid_t nasid)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
+ 0, 0, 0, 0, 0);
+ return (int) rv.v0;
+}
+
+/*
+ * Close system controller subchannel 'subch' previously opened on 'nasid'.
+ */
+static inline int
+ia64_sn_irtr_close(nasid_t nasid, int subch)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
+ (u64) nasid, (u64) subch, 0, 0, 0, 0);
+ return (int) rv.status;
+}
+
+/*
+ * Read data from system controller associated with 'nasid' on
+ * subchannel 'subch'. The buffer to be filled is pointed to by
+ * 'buf', and its capacity is in the integer pointed to by 'len'. The
+ * referent of 'len' is set to the number of bytes read by the SAL
+ * call. The return value is either SALRET_OK (for bytes read) or
+ * SALRET_ERROR (for error or "no data available").
+ */
+static inline int
+ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
+ (u64) nasid, (u64) subch, (u64) buf, (u64) len,
+ 0, 0);
+ return (int) rv.status;
+}
+
+/*
+ * Write data to the system controller network via the system
+ * controller associated with 'nasid' on suchannel 'subch'. The
+ * buffer to be written out is pointed to by 'buf', and 'len' is the
+ * number of bytes to be written. The return value is either the
+ * number of bytes written (which could be zero) or a negative error
+ * code.
+ */
+static inline int
+ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
+ (u64) nasid, (u64) subch, (u64) buf, (u64) len,
+ 0, 0);
+ return (int) rv.v0;
+}
+
+/*
+ * Check whether any interrupts are pending for the system controller
+ * associated with 'nasid' and its subchannel 'subch'. The return
+ * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
+ * SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr(nasid_t nasid, int subch)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
+ (u64) nasid, (u64) subch, 0, 0, 0, 0);
+ return (int) rv.v0;
+}
+
+/*
+ * Enable the interrupt indicated by the intr parameter (either
+ * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
+ (u64) nasid, (u64) subch, intr, 0, 0, 0);
+ return (int) rv.v0;
+}
+
+/*
+ * Disable the interrupt indicated by the intr parameter (either
+ * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
+ */
+static inline int
+ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
+ (u64) nasid, (u64) subch, intr, 0, 0, 0);
+ return (int) rv.v0;
+}
+
+/*
+ * Set up a node as the point of contact for system controller
+ * environmental event delivery.
+ */
+static inline int
+ia64_sn_sysctl_event_init(nasid_t nasid)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_EVENT, (u64) nasid,
+ 0, 0, 0, 0, 0, 0);
+ return (int) rv.v0;
+}
+
+/*
+ * Ask the system controller on the specified nasid to reset
+ * the CX corelet clock. Only valid on TIO nodes.
+ */
+static inline int
+ia64_sn_sysctl_tio_clock_reset(nasid_t nasid)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_TIO_JLCK_RST,
+ nasid, 0, 0, 0, 0, 0);
+ if (rv.status != 0)
+ return (int)rv.status;
+ if (rv.v0 != 0)
+ return (int)rv.v0;
+
+ return 0;
+}
+
+/*
+ * Get the associated ioboard type for a given nasid.
+ */
+static inline s64
+ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD,
+ nasid, 0, 0, 0, 0, 0);
+ if (isrv.v0 != 0) {
+ *ioboard = isrv.v0;
+ return isrv.status;
+ }
+ if (isrv.v1 != 0) {
+ *ioboard = isrv.v1;
+ return isrv.status;
+ }
+
+ return isrv.status;
+}
+
+/**
+ * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
+ * @nasid: NASID of node to read
+ * @index: FIT entry index to be retrieved (0..n)
+ * @fitentry: 16 byte buffer where FIT entry will be stored.
+ * @banbuf: optional buffer for retrieving banner
+ * @banlen: length of banner buffer
+ *
+ * Access to the physical PROM chips needs to be serialized since reads and
+ * writes can't occur at the same time, so we need to call into the SAL when
+ * we want to look at the FIT entries on the chips.
+ *
+ * Returns:
+ * %SALRET_OK if ok
+ * %SALRET_INVALID_ARG if index too big
+ * %SALRET_NOT_IMPLEMENTED if running on older PROM
+ * ??? if nasid invalid OR banner buffer not large enough
+ */
+static inline int
+ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
+ u64 banlen)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
+ banbuf, banlen, 0, 0);
+ return (int) rv.status;
+}
+
+/*
+ * Initialize the SAL components of the system controller
+ * communication driver; specifically pass in a sizable buffer that
+ * can be used for allocation of subchannel queues as new subchannels
+ * are opened. "buf" points to the buffer, and "len" specifies its
+ * length.
+ */
+static inline int
+ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
+ (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
+ return (int) rv.status;
+}
+
+/*
+ * Returns the nasid, subnode & slice corresponding to a SAPIC ID
+ *
+ * In:
+ * arg0 - SN_SAL_GET_SAPIC_INFO
+ * arg1 - sapicid (lid >> 16)
+ * Out:
+ * v0 - nasid
+ * v1 - subnode
+ * v2 - slice
+ */
+static inline u64
+ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+ if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+ if (nasid) *nasid = sapicid & 0xfff;
+ if (subnode) *subnode = (sapicid >> 13) & 1;
+ if (slice) *slice = (sapicid >> 12) & 3;
+ return 0;
+ }
+/***** END HACK *******/
+
+ if (ret_stuff.status < 0)
+ return ret_stuff.status;
+
+ if (nasid) *nasid = (int) ret_stuff.v0;
+ if (subnode) *subnode = (int) ret_stuff.v1;
+ if (slice) *slice = (int) ret_stuff.v2;
+ return 0;
+}
+
+/*
+ * Returns information about the HUB/SHUB.
+ * In:
+ * arg0 - SN_SAL_GET_SN_INFO
+ * arg1 - 0 (other values reserved for future use)
+ * Out:
+ * v0
+ * [7:0] - shub type (0=shub1, 1=shub2)
+ * [15:8] - Log2 max number of nodes in entire system (includes
+ * C-bricks, I-bricks, etc)
+ * [23:16] - Log2 of nodes per sharing domain
+ * [31:24] - partition ID
+ * [39:32] - coherency_id
+ * [47:40] - regionsize
+ * v1
+ * [15:0] - nasid mask (ex., 0x7ff for 11 bit nasid)
+ * [23:15] - bit position of low nasid bit
+ */
+static inline u64
+ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift,
+ u8 *systemsize, u8 *sharing_domain_size, u8 *partid, u8 *coher, u8 *reg)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+ if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+ int nasid = get_sapicid() & 0xfff;
+#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL
+#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48
+ if (shubtype) *shubtype = 0;
+ if (nasid_bitmask) *nasid_bitmask = 0x7ff;
+ if (nasid_shift) *nasid_shift = 38;
+ if (systemsize) *systemsize = 10;
+ if (sharing_domain_size) *sharing_domain_size = 8;
+ if (partid) *partid = ia64_sn_sysctl_partition_get(nasid);
+ if (coher) *coher = nasid >> 9;
+ if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >>
+ SH_SHUB_ID_NODES_PER_BIT_SHFT;
+ return 0;
+ }
+/***** END HACK *******/
+
+ if (ret_stuff.status < 0)
+ return ret_stuff.status;
+
+ if (shubtype) *shubtype = ret_stuff.v0 & 0xff;
+ if (systemsize) *systemsize = (ret_stuff.v0 >> 8) & 0xff;
+ if (sharing_domain_size) *sharing_domain_size = (ret_stuff.v0 >> 16) & 0xff;
+ if (partid) *partid = (ret_stuff.v0 >> 24) & 0xff;
+ if (coher) *coher = (ret_stuff.v0 >> 32) & 0xff;
+ if (reg) *reg = (ret_stuff.v0 >> 40) & 0xff;
+ if (nasid_bitmask) *nasid_bitmask = (ret_stuff.v1 & 0xffff);
+ if (nasid_shift) *nasid_shift = (ret_stuff.v1 >> 16) & 0xff;
+ return 0;
+}
+
+/*
+ * This is the access point to the Altix PROM hardware performance
+ * and status monitoring interface. For info on using this, see
+ * arch/ia64/include/asm/sn/sn2/sn_hwperf.h
+ */
+static inline int
+ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
+ u64 a3, u64 a4, int *v0)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
+ opcode, a0, a1, a2, a3, a4);
+ if (v0)
+ *v0 = (int) rv.v0;
+ return (int) rv.status;
+}
+
+static inline int
+ia64_sn_ioif_get_pci_topology(u64 buf, u64 len)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, buf, len, 0, 0, 0, 0, 0);
+ return (int) rv.status;
+}
+
+/*
+ * BTE error recovery is implemented in SAL
+ */
+static inline int
+ia64_sn_bte_recovery(nasid_t nasid)
+{
+ struct ia64_sal_retval rv;
+
+ rv.status = 0;
+ SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, (u64)nasid, 0, 0, 0, 0, 0, 0);
+ if (rv.status == SALRET_NOT_IMPLEMENTED)
+ return 0;
+ return (int) rv.status;
+}
+
+static inline int
+ia64_sn_is_fake_prom(void)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_FAKE_PROM, 0, 0, 0, 0, 0, 0, 0);
+ return (rv.status == 0);
+}
+
+static inline int
+ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set)
+{
+ struct ia64_sal_retval rv;
+
+ SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0);
+ if (rv.status != 0)
+ return rv.status;
+ *feature_set = rv.v0;
+ return 0;
+}
+
+static inline int
+ia64_sn_set_os_feature(int feature)
+{
+ struct ia64_sal_retval rv;
+
+ SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0);
+ return rv.status;
+}
+
+static inline int
+sn_inject_error(u64 paddr, u64 *data, u64 *ecc)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data,
+ (u64)ecc, 0, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+static inline int
+ia64_sn_set_cpu_number(int cpu)
+{
+ struct ia64_sal_retval rv;
+
+ SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
+ return rv.status;
+}
+static inline int
+ia64_sn_kernel_launch_event(void)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
+ return rv.status;
+}
+#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/arch/ia64/include/asm/sn/tioca.h b/arch/ia64/include/asm/sn/tioca.h
new file mode 100644
index 000000000000..666222d7f0f6
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tioca.h
@@ -0,0 +1,596 @@
+#ifndef _ASM_IA64_SN_TIO_TIOCA_H
+#define _ASM_IA64_SN_TIO_TIOCA_H
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+#define TIOCA_PART_NUM 0xE020
+#define TIOCA_MFGR_NUM 0x24
+#define TIOCA_REV_A 0x1
+
+/*
+ * Register layout for TIO:CA. See below for bitmasks for each register.
+ */
+
+struct tioca {
+ u64 ca_id; /* 0x000000 */
+ u64 ca_control1; /* 0x000008 */
+ u64 ca_control2; /* 0x000010 */
+ u64 ca_status1; /* 0x000018 */
+ u64 ca_status2; /* 0x000020 */
+ u64 ca_gart_aperature; /* 0x000028 */
+ u64 ca_gfx_detach; /* 0x000030 */
+ u64 ca_inta_dest_addr; /* 0x000038 */
+ u64 ca_intb_dest_addr; /* 0x000040 */
+ u64 ca_err_int_dest_addr; /* 0x000048 */
+ u64 ca_int_status; /* 0x000050 */
+ u64 ca_int_status_alias; /* 0x000058 */
+ u64 ca_mult_error; /* 0x000060 */
+ u64 ca_mult_error_alias; /* 0x000068 */
+ u64 ca_first_error; /* 0x000070 */
+ u64 ca_int_mask; /* 0x000078 */
+ u64 ca_crm_pkterr_type; /* 0x000080 */
+ u64 ca_crm_pkterr_type_alias; /* 0x000088 */
+ u64 ca_crm_ct_error_detail_1; /* 0x000090 */
+ u64 ca_crm_ct_error_detail_2; /* 0x000098 */
+ u64 ca_crm_tnumto; /* 0x0000A0 */
+ u64 ca_gart_err; /* 0x0000A8 */
+ u64 ca_pcierr_type; /* 0x0000B0 */
+ u64 ca_pcierr_addr; /* 0x0000B8 */
+
+ u64 ca_pad_0000C0[3]; /* 0x0000{C0..D0} */
+
+ u64 ca_pci_rd_buf_flush; /* 0x0000D8 */
+ u64 ca_pci_dma_addr_extn; /* 0x0000E0 */
+ u64 ca_agp_dma_addr_extn; /* 0x0000E8 */
+ u64 ca_force_inta; /* 0x0000F0 */
+ u64 ca_force_intb; /* 0x0000F8 */
+ u64 ca_debug_vector_sel; /* 0x000100 */
+ u64 ca_debug_mux_core_sel; /* 0x000108 */
+ u64 ca_debug_mux_pci_sel; /* 0x000110 */
+ u64 ca_debug_domain_sel; /* 0x000118 */
+
+ u64 ca_pad_000120[28]; /* 0x0001{20..F8} */
+
+ u64 ca_gart_ptr_table; /* 0x200 */
+ u64 ca_gart_tlb_addr[8]; /* 0x2{08..40} */
+};
+
+/*
+ * Mask/shift definitions for TIO:CA registers. The convention here is
+ * to mainly use the names as they appear in the "TIO AEGIS Programmers'
+ * Reference" with a CA_ prefix added. Some exceptions were made to fix
+ * duplicate field names or to generalize fields that are common to
+ * different registers (ca_debug_mux_core_sel and ca_debug_mux_pci_sel for
+ * example).
+ *
+ * Fields consisting of a single bit have a single #define have a single
+ * macro declaration to mask the bit. Fields consisting of multiple bits
+ * have two declarations: one to mask the proper bits in a register, and
+ * a second with the suffix "_SHFT" to identify how far the mask needs to
+ * be shifted right to get its base value.
+ */
+
+/* ==== ca_control1 */
+#define CA_SYS_BIG_END (1ull << 0)
+#define CA_DMA_AGP_SWAP (1ull << 1)
+#define CA_DMA_PCI_SWAP (1ull << 2)
+#define CA_PIO_IO_SWAP (1ull << 3)
+#define CA_PIO_MEM_SWAP (1ull << 4)
+#define CA_GFX_WR_SWAP (1ull << 5)
+#define CA_AGP_FW_ENABLE (1ull << 6)
+#define CA_AGP_CAL_CYCLE (0x7ull << 7)
+#define CA_AGP_CAL_CYCLE_SHFT 7
+#define CA_AGP_CAL_PRSCL_BYP (1ull << 10)
+#define CA_AGP_INIT_CAL_ENB (1ull << 11)
+#define CA_INJ_ADDR_PERR (1ull << 12)
+#define CA_INJ_DATA_PERR (1ull << 13)
+ /* bits 15:14 unused */
+#define CA_PCIM_IO_NBE_AD (0x7ull << 16)
+#define CA_PCIM_IO_NBE_AD_SHFT 16
+#define CA_PCIM_FAST_BTB_ENB (1ull << 19)
+ /* bits 23:20 unused */
+#define CA_PIO_ADDR_OFFSET (0xffull << 24)
+#define CA_PIO_ADDR_OFFSET_SHFT 24
+ /* bits 35:32 unused */
+#define CA_AGPDMA_OP_COMBDELAY (0x1full << 36)
+#define CA_AGPDMA_OP_COMBDELAY_SHFT 36
+ /* bit 41 unused */
+#define CA_AGPDMA_OP_ENB_COMBDELAY (1ull << 42)
+#define CA_PCI_INT_LPCNT (0xffull << 44)
+#define CA_PCI_INT_LPCNT_SHFT 44
+ /* bits 63:52 unused */
+
+/* ==== ca_control2 */
+#define CA_AGP_LATENCY_TO (0xffull << 0)
+#define CA_AGP_LATENCY_TO_SHFT 0
+#define CA_PCI_LATENCY_TO (0xffull << 8)
+#define CA_PCI_LATENCY_TO_SHFT 8
+#define CA_PCI_MAX_RETRY (0x3ffull << 16)
+#define CA_PCI_MAX_RETRY_SHFT 16
+ /* bits 27:26 unused */
+#define CA_RT_INT_EN (0x3ull << 28)
+#define CA_RT_INT_EN_SHFT 28
+#define CA_MSI_INT_ENB (1ull << 30)
+#define CA_PCI_ARB_ERR_ENB (1ull << 31)
+#define CA_GART_MEM_PARAM (0x3ull << 32)
+#define CA_GART_MEM_PARAM_SHFT 32
+#define CA_GART_RD_PREFETCH_ENB (1ull << 34)
+#define CA_GART_WR_PREFETCH_ENB (1ull << 35)
+#define CA_GART_FLUSH_TLB (1ull << 36)
+ /* bits 39:37 unused */
+#define CA_CRM_TNUMTO_PERIOD (0x1fffull << 40)
+#define CA_CRM_TNUMTO_PERIOD_SHFT 40
+ /* bits 55:53 unused */
+#define CA_CRM_TNUMTO_ENB (1ull << 56)
+#define CA_CRM_PRESCALER_BYP (1ull << 57)
+ /* bits 59:58 unused */
+#define CA_CRM_MAX_CREDIT (0x7ull << 60)
+#define CA_CRM_MAX_CREDIT_SHFT 60
+ /* bit 63 unused */
+
+/* ==== ca_status1 */
+#define CA_CORELET_ID (0x3ull << 0)
+#define CA_CORELET_ID_SHFT 0
+#define CA_INTA_N (1ull << 2)
+#define CA_INTB_N (1ull << 3)
+#define CA_CRM_CREDIT_AVAIL (0x7ull << 4)
+#define CA_CRM_CREDIT_AVAIL_SHFT 4
+ /* bit 7 unused */
+#define CA_CRM_SPACE_AVAIL (0x7full << 8)
+#define CA_CRM_SPACE_AVAIL_SHFT 8
+ /* bit 15 unused */
+#define CA_GART_TLB_VAL (0xffull << 16)
+#define CA_GART_TLB_VAL_SHFT 16
+ /* bits 63:24 unused */
+
+/* ==== ca_status2 */
+#define CA_GFX_CREDIT_AVAIL (0xffull << 0)
+#define CA_GFX_CREDIT_AVAIL_SHFT 0
+#define CA_GFX_OPQ_AVAIL (0xffull << 8)
+#define CA_GFX_OPQ_AVAIL_SHFT 8
+#define CA_GFX_WRBUFF_AVAIL (0xffull << 16)
+#define CA_GFX_WRBUFF_AVAIL_SHFT 16
+#define CA_ADMA_OPQ_AVAIL (0xffull << 24)
+#define CA_ADMA_OPQ_AVAIL_SHFT 24
+#define CA_ADMA_WRBUFF_AVAIL (0xffull << 32)
+#define CA_ADMA_WRBUFF_AVAIL_SHFT 32
+#define CA_ADMA_RDBUFF_AVAIL (0x7full << 40)
+#define CA_ADMA_RDBUFF_AVAIL_SHFT 40
+#define CA_PCI_PIO_OP_STAT (1ull << 47)
+#define CA_PDMA_OPQ_AVAIL (0xfull << 48)
+#define CA_PDMA_OPQ_AVAIL_SHFT 48
+#define CA_PDMA_WRBUFF_AVAIL (0xfull << 52)
+#define CA_PDMA_WRBUFF_AVAIL_SHFT 52
+#define CA_PDMA_RDBUFF_AVAIL (0x3ull << 56)
+#define CA_PDMA_RDBUFF_AVAIL_SHFT 56
+ /* bits 63:58 unused */
+
+/* ==== ca_gart_aperature */
+#define CA_GART_AP_ENB_AGP (1ull << 0)
+#define CA_GART_PAGE_SIZE (1ull << 1)
+#define CA_GART_AP_ENB_PCI (1ull << 2)
+ /* bits 11:3 unused */
+#define CA_GART_AP_SIZE (0x3ffull << 12)
+#define CA_GART_AP_SIZE_SHFT 12
+#define CA_GART_AP_BASE (0x3ffffffffffull << 22)
+#define CA_GART_AP_BASE_SHFT 22
+
+/* ==== ca_inta_dest_addr
+ ==== ca_intb_dest_addr
+ ==== ca_err_int_dest_addr */
+ /* bits 2:0 unused */
+#define CA_INT_DEST_ADDR (0x7ffffffffffffull << 3)
+#define CA_INT_DEST_ADDR_SHFT 3
+ /* bits 55:54 unused */
+#define CA_INT_DEST_VECT (0xffull << 56)
+#define CA_INT_DEST_VECT_SHFT 56
+
+/* ==== ca_int_status */
+/* ==== ca_int_status_alias */
+/* ==== ca_mult_error */
+/* ==== ca_mult_error_alias */
+/* ==== ca_first_error */
+/* ==== ca_int_mask */
+#define CA_PCI_ERR (1ull << 0)
+ /* bits 3:1 unused */
+#define CA_GART_FETCH_ERR (1ull << 4)
+#define CA_GFX_WR_OVFLW (1ull << 5)
+#define CA_PIO_REQ_OVFLW (1ull << 6)
+#define CA_CRM_PKTERR (1ull << 7)
+#define CA_CRM_DVERR (1ull << 8)
+#define CA_TNUMTO (1ull << 9)
+#define CA_CXM_RSP_CRED_OVFLW (1ull << 10)
+#define CA_CXM_REQ_CRED_OVFLW (1ull << 11)
+#define CA_PIO_INVALID_ADDR (1ull << 12)
+#define CA_PCI_ARB_TO (1ull << 13)
+#define CA_AGP_REQ_OFLOW (1ull << 14)
+#define CA_SBA_TYPE1_ERR (1ull << 15)
+ /* bit 16 unused */
+#define CA_INTA (1ull << 17)
+#define CA_INTB (1ull << 18)
+#define CA_MULT_INTA (1ull << 19)
+#define CA_MULT_INTB (1ull << 20)
+#define CA_GFX_CREDIT_OVFLW (1ull << 21)
+ /* bits 63:22 unused */
+
+/* ==== ca_crm_pkterr_type */
+/* ==== ca_crm_pkterr_type_alias */
+#define CA_CRM_PKTERR_SBERR_HDR (1ull << 0)
+#define CA_CRM_PKTERR_DIDN (1ull << 1)
+#define CA_CRM_PKTERR_PACTYPE (1ull << 2)
+#define CA_CRM_PKTERR_INV_TNUM (1ull << 3)
+#define CA_CRM_PKTERR_ADDR_RNG (1ull << 4)
+#define CA_CRM_PKTERR_ADDR_ALGN (1ull << 5)
+#define CA_CRM_PKTERR_HDR_PARAM (1ull << 6)
+#define CA_CRM_PKTERR_CW_ERR (1ull << 7)
+#define CA_CRM_PKTERR_SBERR_NH (1ull << 8)
+#define CA_CRM_PKTERR_EARLY_TERM (1ull << 9)
+#define CA_CRM_PKTERR_EARLY_TAIL (1ull << 10)
+#define CA_CRM_PKTERR_MSSNG_TAIL (1ull << 11)
+#define CA_CRM_PKTERR_MSSNG_HDR (1ull << 12)
+ /* bits 15:13 unused */
+#define CA_FIRST_CRM_PKTERR_SBERR_HDR (1ull << 16)
+#define CA_FIRST_CRM_PKTERR_DIDN (1ull << 17)
+#define CA_FIRST_CRM_PKTERR_PACTYPE (1ull << 18)
+#define CA_FIRST_CRM_PKTERR_INV_TNUM (1ull << 19)
+#define CA_FIRST_CRM_PKTERR_ADDR_RNG (1ull << 20)
+#define CA_FIRST_CRM_PKTERR_ADDR_ALGN (1ull << 21)
+#define CA_FIRST_CRM_PKTERR_HDR_PARAM (1ull << 22)
+#define CA_FIRST_CRM_PKTERR_CW_ERR (1ull << 23)
+#define CA_FIRST_CRM_PKTERR_SBERR_NH (1ull << 24)
+#define CA_FIRST_CRM_PKTERR_EARLY_TERM (1ull << 25)
+#define CA_FIRST_CRM_PKTERR_EARLY_TAIL (1ull << 26)
+#define CA_FIRST_CRM_PKTERR_MSSNG_TAIL (1ull << 27)
+#define CA_FIRST_CRM_PKTERR_MSSNG_HDR (1ull << 28)
+ /* bits 63:29 unused */
+
+/* ==== ca_crm_ct_error_detail_1 */
+#define CA_PKT_TYPE (0xfull << 0)
+#define CA_PKT_TYPE_SHFT 0
+#define CA_SRC_ID (0x3ull << 4)
+#define CA_SRC_ID_SHFT 4
+#define CA_DATA_SZ (0x3ull << 6)
+#define CA_DATA_SZ_SHFT 6
+#define CA_TNUM (0xffull << 8)
+#define CA_TNUM_SHFT 8
+#define CA_DW_DATA_EN (0xffull << 16)
+#define CA_DW_DATA_EN_SHFT 16
+#define CA_GFX_CRED (0xffull << 24)
+#define CA_GFX_CRED_SHFT 24
+#define CA_MEM_RD_PARAM (0x3ull << 32)
+#define CA_MEM_RD_PARAM_SHFT 32
+#define CA_PIO_OP (1ull << 34)
+#define CA_CW_ERR (1ull << 35)
+ /* bits 62:36 unused */
+#define CA_VALID (1ull << 63)
+
+/* ==== ca_crm_ct_error_detail_2 */
+ /* bits 2:0 unused */
+#define CA_PKT_ADDR (0x1fffffffffffffull << 3)
+#define CA_PKT_ADDR_SHFT 3
+ /* bits 63:56 unused */
+
+/* ==== ca_crm_tnumto */
+#define CA_CRM_TNUMTO_VAL (0xffull << 0)
+#define CA_CRM_TNUMTO_VAL_SHFT 0
+#define CA_CRM_TNUMTO_WR (1ull << 8)
+ /* bits 63:9 unused */
+
+/* ==== ca_gart_err */
+#define CA_GART_ERR_SOURCE (0x3ull << 0)
+#define CA_GART_ERR_SOURCE_SHFT 0
+ /* bits 3:2 unused */
+#define CA_GART_ERR_ADDR (0xfffffffffull << 4)
+#define CA_GART_ERR_ADDR_SHFT 4
+ /* bits 63:40 unused */
+
+/* ==== ca_pcierr_type */
+#define CA_PCIERR_DATA (0xffffffffull << 0)
+#define CA_PCIERR_DATA_SHFT 0
+#define CA_PCIERR_ENB (0xfull << 32)
+#define CA_PCIERR_ENB_SHFT 32
+#define CA_PCIERR_CMD (0xfull << 36)
+#define CA_PCIERR_CMD_SHFT 36
+#define CA_PCIERR_A64 (1ull << 40)
+#define CA_PCIERR_SLV_SERR (1ull << 41)
+#define CA_PCIERR_SLV_WR_PERR (1ull << 42)
+#define CA_PCIERR_SLV_RD_PERR (1ull << 43)
+#define CA_PCIERR_MST_SERR (1ull << 44)
+#define CA_PCIERR_MST_WR_PERR (1ull << 45)
+#define CA_PCIERR_MST_RD_PERR (1ull << 46)
+#define CA_PCIERR_MST_MABT (1ull << 47)
+#define CA_PCIERR_MST_TABT (1ull << 48)
+#define CA_PCIERR_MST_RETRY_TOUT (1ull << 49)
+
+#define CA_PCIERR_TYPES \
+ (CA_PCIERR_A64|CA_PCIERR_SLV_SERR| \
+ CA_PCIERR_SLV_WR_PERR|CA_PCIERR_SLV_RD_PERR| \
+ CA_PCIERR_MST_SERR|CA_PCIERR_MST_WR_PERR|CA_PCIERR_MST_RD_PERR| \
+ CA_PCIERR_MST_MABT|CA_PCIERR_MST_TABT|CA_PCIERR_MST_RETRY_TOUT)
+
+ /* bits 63:50 unused */
+
+/* ==== ca_pci_dma_addr_extn */
+#define CA_UPPER_NODE_OFFSET (0x3full << 0)
+#define CA_UPPER_NODE_OFFSET_SHFT 0
+ /* bits 7:6 unused */
+#define CA_CHIPLET_ID (0x3ull << 8)
+#define CA_CHIPLET_ID_SHFT 8
+ /* bits 11:10 unused */
+#define CA_PCI_DMA_NODE_ID (0xffffull << 12)
+#define CA_PCI_DMA_NODE_ID_SHFT 12
+ /* bits 27:26 unused */
+#define CA_PCI_DMA_PIO_MEM_TYPE (1ull << 28)
+ /* bits 63:29 unused */
+
+
+/* ==== ca_agp_dma_addr_extn */
+ /* bits 19:0 unused */
+#define CA_AGP_DMA_NODE_ID (0xffffull << 20)
+#define CA_AGP_DMA_NODE_ID_SHFT 20
+ /* bits 27:26 unused */
+#define CA_AGP_DMA_PIO_MEM_TYPE (1ull << 28)
+ /* bits 63:29 unused */
+
+/* ==== ca_debug_vector_sel */
+#define CA_DEBUG_MN_VSEL (0xfull << 0)
+#define CA_DEBUG_MN_VSEL_SHFT 0
+#define CA_DEBUG_PP_VSEL (0xfull << 4)
+#define CA_DEBUG_PP_VSEL_SHFT 4
+#define CA_DEBUG_GW_VSEL (0xfull << 8)
+#define CA_DEBUG_GW_VSEL_SHFT 8
+#define CA_DEBUG_GT_VSEL (0xfull << 12)
+#define CA_DEBUG_GT_VSEL_SHFT 12
+#define CA_DEBUG_PD_VSEL (0xfull << 16)
+#define CA_DEBUG_PD_VSEL_SHFT 16
+#define CA_DEBUG_AD_VSEL (0xfull << 20)
+#define CA_DEBUG_AD_VSEL_SHFT 20
+#define CA_DEBUG_CX_VSEL (0xfull << 24)
+#define CA_DEBUG_CX_VSEL_SHFT 24
+#define CA_DEBUG_CR_VSEL (0xfull << 28)
+#define CA_DEBUG_CR_VSEL_SHFT 28
+#define CA_DEBUG_BA_VSEL (0xfull << 32)
+#define CA_DEBUG_BA_VSEL_SHFT 32
+#define CA_DEBUG_PE_VSEL (0xfull << 36)
+#define CA_DEBUG_PE_VSEL_SHFT 36
+#define CA_DEBUG_BO_VSEL (0xfull << 40)
+#define CA_DEBUG_BO_VSEL_SHFT 40
+#define CA_DEBUG_BI_VSEL (0xfull << 44)
+#define CA_DEBUG_BI_VSEL_SHFT 44
+#define CA_DEBUG_AS_VSEL (0xfull << 48)
+#define CA_DEBUG_AS_VSEL_SHFT 48
+#define CA_DEBUG_PS_VSEL (0xfull << 52)
+#define CA_DEBUG_PS_VSEL_SHFT 52
+#define CA_DEBUG_PM_VSEL (0xfull << 56)
+#define CA_DEBUG_PM_VSEL_SHFT 56
+ /* bits 63:60 unused */
+
+/* ==== ca_debug_mux_core_sel */
+/* ==== ca_debug_mux_pci_sel */
+#define CA_DEBUG_MSEL0 (0x7ull << 0)
+#define CA_DEBUG_MSEL0_SHFT 0
+ /* bit 3 unused */
+#define CA_DEBUG_NSEL0 (0x7ull << 4)
+#define CA_DEBUG_NSEL0_SHFT 4
+ /* bit 7 unused */
+#define CA_DEBUG_MSEL1 (0x7ull << 8)
+#define CA_DEBUG_MSEL1_SHFT 8
+ /* bit 11 unused */
+#define CA_DEBUG_NSEL1 (0x7ull << 12)
+#define CA_DEBUG_NSEL1_SHFT 12
+ /* bit 15 unused */
+#define CA_DEBUG_MSEL2 (0x7ull << 16)
+#define CA_DEBUG_MSEL2_SHFT 16
+ /* bit 19 unused */
+#define CA_DEBUG_NSEL2 (0x7ull << 20)
+#define CA_DEBUG_NSEL2_SHFT 20
+ /* bit 23 unused */
+#define CA_DEBUG_MSEL3 (0x7ull << 24)
+#define CA_DEBUG_MSEL3_SHFT 24
+ /* bit 27 unused */
+#define CA_DEBUG_NSEL3 (0x7ull << 28)
+#define CA_DEBUG_NSEL3_SHFT 28
+ /* bit 31 unused */
+#define CA_DEBUG_MSEL4 (0x7ull << 32)
+#define CA_DEBUG_MSEL4_SHFT 32
+ /* bit 35 unused */
+#define CA_DEBUG_NSEL4 (0x7ull << 36)
+#define CA_DEBUG_NSEL4_SHFT 36
+ /* bit 39 unused */
+#define CA_DEBUG_MSEL5 (0x7ull << 40)
+#define CA_DEBUG_MSEL5_SHFT 40
+ /* bit 43 unused */
+#define CA_DEBUG_NSEL5 (0x7ull << 44)
+#define CA_DEBUG_NSEL5_SHFT 44
+ /* bit 47 unused */
+#define CA_DEBUG_MSEL6 (0x7ull << 48)
+#define CA_DEBUG_MSEL6_SHFT 48
+ /* bit 51 unused */
+#define CA_DEBUG_NSEL6 (0x7ull << 52)
+#define CA_DEBUG_NSEL6_SHFT 52
+ /* bit 55 unused */
+#define CA_DEBUG_MSEL7 (0x7ull << 56)
+#define CA_DEBUG_MSEL7_SHFT 56
+ /* bit 59 unused */
+#define CA_DEBUG_NSEL7 (0x7ull << 60)
+#define CA_DEBUG_NSEL7_SHFT 60
+ /* bit 63 unused */
+
+
+/* ==== ca_debug_domain_sel */
+#define CA_DEBUG_DOMAIN_L (1ull << 0)
+#define CA_DEBUG_DOMAIN_H (1ull << 1)
+ /* bits 63:2 unused */
+
+/* ==== ca_gart_ptr_table */
+#define CA_GART_PTR_VAL (1ull << 0)
+ /* bits 11:1 unused */
+#define CA_GART_PTR_ADDR (0xfffffffffffull << 12)
+#define CA_GART_PTR_ADDR_SHFT 12
+ /* bits 63:56 unused */
+
+/* ==== ca_gart_tlb_addr[0-7] */
+#define CA_GART_TLB_ADDR (0xffffffffffffffull << 0)
+#define CA_GART_TLB_ADDR_SHFT 0
+ /* bits 62:56 unused */
+#define CA_GART_TLB_ENTRY_VAL (1ull << 63)
+
+/*
+ * PIO address space ranges for TIO:CA
+ */
+
+/* CA internal registers */
+#define CA_PIO_ADMIN 0x00000000
+#define CA_PIO_ADMIN_LEN 0x00010000
+
+/* GFX Write Buffer - Diagnostics */
+#define CA_PIO_GFX 0x00010000
+#define CA_PIO_GFX_LEN 0x00010000
+
+/* AGP DMA Write Buffer - Diagnostics */
+#define CA_PIO_AGP_DMAWRITE 0x00020000
+#define CA_PIO_AGP_DMAWRITE_LEN 0x00010000
+
+/* AGP DMA READ Buffer - Diagnostics */
+#define CA_PIO_AGP_DMAREAD 0x00030000
+#define CA_PIO_AGP_DMAREAD_LEN 0x00010000
+
+/* PCI Config Type 0 */
+#define CA_PIO_PCI_TYPE0_CONFIG 0x01000000
+#define CA_PIO_PCI_TYPE0_CONFIG_LEN 0x01000000
+
+/* PCI Config Type 1 */
+#define CA_PIO_PCI_TYPE1_CONFIG 0x02000000
+#define CA_PIO_PCI_TYPE1_CONFIG_LEN 0x01000000
+
+/* PCI I/O Cycles - mapped to PCI Address 0x00000000-0x04ffffff */
+#define CA_PIO_PCI_IO 0x03000000
+#define CA_PIO_PCI_IO_LEN 0x05000000
+
+/* PCI MEM Cycles - mapped to PCI with CA_PIO_ADDR_OFFSET of ca_control1 */
+/* use Fast Write if enabled and coretalk packet type is a GFX request */
+#define CA_PIO_PCI_MEM_OFFSET 0x08000000
+#define CA_PIO_PCI_MEM_OFFSET_LEN 0x08000000
+
+/* PCI MEM Cycles - mapped to PCI Address 0x00000000-0xbfffffff */
+/* use Fast Write if enabled and coretalk packet type is a GFX request */
+#define CA_PIO_PCI_MEM 0x40000000
+#define CA_PIO_PCI_MEM_LEN 0xc0000000
+
+/*
+ * DMA space
+ *
+ * The CA aperature (ie. bus address range) mapped by the GART is segmented into
+ * two parts. The lower portion of the aperature is used for mapping 32 bit
+ * PCI addresses which are managed by the dma interfaces in this file. The
+ * upper poprtion of the aperature is used for mapping 48 bit AGP addresses.
+ * The AGP portion of the aperature is managed by the agpgart_be.c driver
+ * in drivers/linux/agp. There are ca-specific hooks in that driver to
+ * manipulate the gart, but management of the AGP portion of the aperature
+ * is the responsibility of that driver.
+ *
+ * CA allows three main types of DMA mapping:
+ *
+ * PCI 64-bit Managed by this driver
+ * PCI 32-bit Managed by this driver
+ * AGP 48-bit Managed by hooks in the /dev/agpgart driver
+ *
+ * All of the above can optionally be remapped through the GART. The following
+ * table lists the combinations of addressing types and GART remapping that
+ * is currently supported by the driver (h/w supports all, s/w limits this):
+ *
+ * PCI64 PCI32 AGP48
+ * GART no yes yes
+ * Direct yes yes no
+ *
+ * GART remapping of PCI64 is not done because there is no need to. The
+ * 64 bit PCI address holds all of the information necessary to target any
+ * memory in the system.
+ *
+ * AGP48 is always mapped through the GART. Management of the AGP48 portion
+ * of the aperature is the responsibility of code in the agpgart_be driver.
+ *
+ * The non-64 bit bus address space will currently be partitioned like this:
+ *
+ * 0xffff_ffff_ffff +--------
+ * | AGP48 direct
+ * | Space managed by this driver
+ * CA_AGP_DIRECT_BASE +--------
+ * | AGP GART mapped (gfx aperature)
+ * | Space managed by /dev/agpgart driver
+ * | This range is exposed to the agpgart
+ * | driver as the "graphics aperature"
+ * CA_AGP_MAPPED_BASE +-----
+ * | PCI GART mapped
+ * | Space managed by this driver
+ * CA_PCI32_MAPPED_BASE +----
+ * | PCI32 direct
+ * | Space managed by this driver
+ * 0xC000_0000 +--------
+ * (CA_PCI32_DIRECT_BASE)
+ *
+ * The bus address range CA_PCI32_MAPPED_BASE through CA_AGP_DIRECT_BASE
+ * is what we call the CA aperature. Addresses falling in this range will
+ * be remapped using the GART.
+ *
+ * The bus address range CA_AGP_MAPPED_BASE through CA_AGP_DIRECT_BASE
+ * is what we call the graphics aperature. This is a subset of the CA
+ * aperature and is under the control of the agpgart_be driver.
+ *
+ * CA_PCI32_MAPPED_BASE, CA_AGP_MAPPED_BASE, and CA_AGP_DIRECT_BASE are
+ * somewhat arbitrary values. The known constraints on choosing these is:
+ *
+ * 1) CA_AGP_DIRECT_BASE-CA_PCI32_MAPPED_BASE+1 (the CA aperature size)
+ * must be one of the values supported by the ca_gart_aperature register.
+ * Currently valid values are: 4MB through 4096MB in powers of 2 increments
+ *
+ * 2) CA_AGP_DIRECT_BASE-CA_AGP_MAPPED_BASE+1 (the gfx aperature size)
+ * must be in MB units since that's what the agpgart driver assumes.
+ */
+
+/*
+ * Define Bus DMA ranges. These are configurable (see constraints above)
+ * and will probably need tuning based on experience.
+ */
+
+
+/*
+ * 11/24/03
+ * CA has an addressing glitch w.r.t. PCI direct 32 bit DMA that makes it
+ * generally unusable. The problem is that for PCI direct 32
+ * DMA's, all 32 bits of the bus address are used to form the lower 32 bits
+ * of the coretalk address, and coretalk bits 38:32 come from a register.
+ * Since only PCI bus addresses 0xC0000000-0xFFFFFFFF (1GB) are available
+ * for DMA (the rest is allocated to PIO), host node addresses need to be
+ * such that their lower 32 bits fall in the 0xC0000000-0xffffffff range
+ * as well. So there can be no PCI32 direct DMA below 3GB!! For this
+ * reason we set the CA_PCI32_DIRECT_SIZE to 0 which essentially makes
+ * tioca_dma_direct32() a noop but preserves the code flow should this issue
+ * be fixed in a respin.
+ *
+ * For now, all PCI32 DMA's must be mapped through the GART.
+ */
+
+#define CA_PCI32_DIRECT_BASE 0xC0000000UL /* BASE not configurable */
+#define CA_PCI32_DIRECT_SIZE 0x00000000UL /* 0 MB */
+
+#define CA_PCI32_MAPPED_BASE 0xC0000000UL
+#define CA_PCI32_MAPPED_SIZE 0x40000000UL /* 2GB */
+
+#define CA_AGP_MAPPED_BASE 0x80000000UL
+#define CA_AGP_MAPPED_SIZE 0x40000000UL /* 2GB */
+
+#define CA_AGP_DIRECT_BASE 0x40000000UL /* 2GB */
+#define CA_AGP_DIRECT_SIZE 0x40000000UL
+
+#define CA_APERATURE_BASE (CA_AGP_MAPPED_BASE)
+#define CA_APERATURE_SIZE (CA_AGP_MAPPED_SIZE+CA_PCI32_MAPPED_SIZE)
+
+#endif /* _ASM_IA64_SN_TIO_TIOCA_H */
diff --git a/arch/ia64/include/asm/sn/tioca_provider.h b/arch/ia64/include/asm/sn/tioca_provider.h
new file mode 100644
index 000000000000..9a820ac61be3
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tioca_provider.h
@@ -0,0 +1,207 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
+#define _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
+
+#include <asm/sn/tioca.h>
+
+/*
+ * WAR enables
+ * Defines for individual WARs. Each is a bitmask of applicable
+ * part revision numbers. (1 << 1) == rev A, (1 << 2) == rev B,
+ * (3 << 1) == (rev A or rev B), etc
+ */
+
+#define TIOCA_WAR_ENABLED(pv, tioca_common) \
+ ((1 << tioca_common->ca_rev) & pv)
+
+ /* TIO:ICE:FRZ:Freezer loses a PIO data ucred on PIO RD RSP with CW error */
+#define PV907908 (1 << 1)
+ /* ATI config space problems after BIOS execution starts */
+#define PV908234 (1 << 1)
+ /* CA:AGPDMA write request data mismatch with ABC1CL merge */
+#define PV895469 (1 << 1)
+ /* TIO:CA TLB invalidate of written GART entries possibly not occurring in CA*/
+#define PV910244 (1 << 1)
+
+struct tioca_dmamap{
+ struct list_head cad_list; /* headed by ca_list */
+
+ dma_addr_t cad_dma_addr; /* Linux dma handle */
+ uint cad_gart_entry; /* start entry in ca_gart_pagemap */
+ uint cad_gart_size; /* #entries for this map */
+};
+
+/*
+ * Kernel only fields. Prom may look at this stuff for debugging only.
+ * Access this structure through the ca_kernel_private ptr.
+ */
+
+struct tioca_common ;
+
+struct tioca_kernel {
+ struct tioca_common *ca_common; /* tioca this belongs to */
+ struct list_head ca_list; /* list of all ca's */
+ struct list_head ca_dmamaps;
+ spinlock_t ca_lock; /* Kernel lock */
+ cnodeid_t ca_closest_node;
+ struct list_head *ca_devices; /* bus->devices */
+
+ /*
+ * General GART stuff
+ */
+ u64 ca_ap_size; /* size of aperature in bytes */
+ u32 ca_gart_entries; /* # u64 entries in gart */
+ u32 ca_ap_pagesize; /* aperature page size in bytes */
+ u64 ca_ap_bus_base; /* bus address of CA aperature */
+ u64 ca_gart_size; /* gart size in bytes */
+ u64 *ca_gart; /* gart table vaddr */
+ u64 ca_gart_coretalk_addr; /* gart coretalk addr */
+ u8 ca_gart_iscoherent; /* used in tioca_tlbflush */
+
+ /* PCI GART convenience values */
+ u64 ca_pciap_base; /* pci aperature bus base address */
+ u64 ca_pciap_size; /* pci aperature size (bytes) */
+ u64 ca_pcigart_base; /* gfx GART bus base address */
+ u64 *ca_pcigart; /* gfx GART vm address */
+ u32 ca_pcigart_entries;
+ u32 ca_pcigart_start; /* PCI start index in ca_gart */
+ void *ca_pcigart_pagemap;
+
+ /* AGP GART convenience values */
+ u64 ca_gfxap_base; /* gfx aperature bus base address */
+ u64 ca_gfxap_size; /* gfx aperature size (bytes) */
+ u64 ca_gfxgart_base; /* gfx GART bus base address */
+ u64 *ca_gfxgart; /* gfx GART vm address */
+ u32 ca_gfxgart_entries;
+ u32 ca_gfxgart_start; /* agpgart start index in ca_gart */
+};
+
+/*
+ * Common tioca info shared between kernel and prom
+ *
+ * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES
+ * TO THE PROM VERSION.
+ */
+
+struct tioca_common {
+ struct pcibus_bussoft ca_common; /* common pciio header */
+
+ u32 ca_rev;
+ u32 ca_closest_nasid;
+
+ u64 ca_prom_private;
+ u64 ca_kernel_private;
+};
+
+/**
+ * tioca_paddr_to_gart - Convert an SGI coretalk address to a CA GART entry
+ * @paddr: page address to convert
+ *
+ * Convert a system [coretalk] address to a GART entry. GART entries are
+ * formed using the following:
+ *
+ * data = ( (1<<63) | ( (REMAP_NODE_ID << 40) | (MD_CHIPLET_ID << 38) |
+ * (REMAP_SYS_ADDR) ) >> 12 )
+ *
+ * DATA written to 1 GART TABLE Entry in system memory is remapped system
+ * addr for 1 page
+ *
+ * The data is for coretalk address format right shifted 12 bits with a
+ * valid bit.
+ *
+ * GART_TABLE_ENTRY [ 25:0 ] -- REMAP_SYS_ADDRESS[37:12].
+ * GART_TABLE_ENTRY [ 27:26 ] -- SHUB MD chiplet id.
+ * GART_TABLE_ENTRY [ 41:28 ] -- REMAP_NODE_ID.
+ * GART_TABLE_ENTRY [ 63 ] -- Valid Bit
+ */
+static inline u64
+tioca_paddr_to_gart(unsigned long paddr)
+{
+ /*
+ * We are assuming right now that paddr already has the correct
+ * format since the address from xtalk_dmaXXX should already have
+ * NODE_ID, CHIPLET_ID, and SYS_ADDR in the correct locations.
+ */
+
+ return ((paddr) >> 12) | (1UL << 63);
+}
+
+/**
+ * tioca_physpage_to_gart - Map a host physical page for SGI CA based DMA
+ * @page_addr: system page address to map
+ */
+
+static inline unsigned long
+tioca_physpage_to_gart(u64 page_addr)
+{
+ u64 coretalk_addr;
+
+ coretalk_addr = PHYS_TO_TIODMA(page_addr);
+ if (!coretalk_addr) {
+ return 0;
+ }
+
+ return tioca_paddr_to_gart(coretalk_addr);
+}
+
+/**
+ * tioca_tlbflush - invalidate cached SGI CA GART TLB entries
+ * @tioca_kernel: CA context
+ *
+ * Invalidate tlb entries for a given CA GART. Main complexity is to account
+ * for revA bug.
+ */
+static inline void
+tioca_tlbflush(struct tioca_kernel *tioca_kernel)
+{
+ volatile u64 tmp;
+ volatile struct tioca __iomem *ca_base;
+ struct tioca_common *tioca_common;
+
+ tioca_common = tioca_kernel->ca_common;
+ ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
+
+ /*
+ * Explicit flushes not needed if GART is in cached mode
+ */
+ if (tioca_kernel->ca_gart_iscoherent) {
+ if (TIOCA_WAR_ENABLED(PV910244, tioca_common)) {
+ /*
+ * PV910244: RevA CA needs explicit flushes.
+ * Need to put GART into uncached mode before
+ * flushing otherwise the explicit flush is ignored.
+ *
+ * Alternate WAR would be to leave GART cached and
+ * touch every CL aligned GART entry.
+ */
+
+ __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
+ __sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
+ __sn_setq_relaxed(&ca_base->ca_control2,
+ (0x2ull << CA_GART_MEM_PARAM_SHFT));
+ tmp = __sn_readq_relaxed(&ca_base->ca_control2);
+ }
+
+ return;
+ }
+
+ /*
+ * Gart in uncached mode ... need an explicit flush.
+ */
+
+ __sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
+ tmp = __sn_readq_relaxed(&ca_base->ca_control2);
+}
+
+extern u32 tioca_gart_found;
+extern struct list_head tioca_list;
+extern int tioca_init_provider(void);
+extern void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern);
+#endif /* _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H */
diff --git a/arch/ia64/include/asm/sn/tioce.h b/arch/ia64/include/asm/sn/tioce.h
new file mode 100644
index 000000000000..893468e1b41b
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tioce.h
@@ -0,0 +1,760 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_IA64_SN_TIOCE_H__
+#define __ASM_IA64_SN_TIOCE_H__
+
+/* CE ASIC part & mfgr information */
+#define TIOCE_PART_NUM 0xCE00
+#define TIOCE_SRC_ID 0x01
+#define TIOCE_REV_A 0x1
+
+/* CE Virtual PPB Vendor/Device IDs */
+#define CE_VIRT_PPB_VENDOR_ID 0x10a9
+#define CE_VIRT_PPB_DEVICE_ID 0x4002
+
+/* CE Host Bridge Vendor/Device IDs */
+#define CE_HOST_BRIDGE_VENDOR_ID 0x10a9
+#define CE_HOST_BRIDGE_DEVICE_ID 0x4001
+
+
+#define TIOCE_NUM_M40_ATES 4096
+#define TIOCE_NUM_M3240_ATES 2048
+#define TIOCE_NUM_PORTS 2
+
+/*
+ * Register layout for TIOCE. MMR offsets are shown at the far right of the
+ * structure definition.
+ */
+typedef volatile struct tioce {
+ /*
+ * ADMIN : Administration Registers
+ */
+ u64 ce_adm_id; /* 0x000000 */
+ u64 ce_pad_000008; /* 0x000008 */
+ u64 ce_adm_dyn_credit_status; /* 0x000010 */
+ u64 ce_adm_last_credit_status; /* 0x000018 */
+ u64 ce_adm_credit_limit; /* 0x000020 */
+ u64 ce_adm_force_credit; /* 0x000028 */
+ u64 ce_adm_control; /* 0x000030 */
+ u64 ce_adm_mmr_chn_timeout; /* 0x000038 */
+ u64 ce_adm_ssp_ure_timeout; /* 0x000040 */
+ u64 ce_adm_ssp_dre_timeout; /* 0x000048 */
+ u64 ce_adm_ssp_debug_sel; /* 0x000050 */
+ u64 ce_adm_int_status; /* 0x000058 */
+ u64 ce_adm_int_status_alias; /* 0x000060 */
+ u64 ce_adm_int_mask; /* 0x000068 */
+ u64 ce_adm_int_pending; /* 0x000070 */
+ u64 ce_adm_force_int; /* 0x000078 */
+ u64 ce_adm_ure_ups_buf_barrier_flush; /* 0x000080 */
+ u64 ce_adm_int_dest[15]; /* 0x000088 -- 0x0000F8 */
+ u64 ce_adm_error_summary; /* 0x000100 */
+ u64 ce_adm_error_summary_alias; /* 0x000108 */
+ u64 ce_adm_error_mask; /* 0x000110 */
+ u64 ce_adm_first_error; /* 0x000118 */
+ u64 ce_adm_error_overflow; /* 0x000120 */
+ u64 ce_adm_error_overflow_alias; /* 0x000128 */
+ u64 ce_pad_000130[2]; /* 0x000130 -- 0x000138 */
+ u64 ce_adm_tnum_error; /* 0x000140 */
+ u64 ce_adm_mmr_err_detail; /* 0x000148 */
+ u64 ce_adm_msg_sram_perr_detail; /* 0x000150 */
+ u64 ce_adm_bap_sram_perr_detail; /* 0x000158 */
+ u64 ce_adm_ce_sram_perr_detail; /* 0x000160 */
+ u64 ce_adm_ce_credit_oflow_detail; /* 0x000168 */
+ u64 ce_adm_tx_link_idle_max_timer; /* 0x000170 */
+ u64 ce_adm_pcie_debug_sel; /* 0x000178 */
+ u64 ce_pad_000180[16]; /* 0x000180 -- 0x0001F8 */
+
+ u64 ce_adm_pcie_debug_sel_top; /* 0x000200 */
+ u64 ce_adm_pcie_debug_lat_sel_lo_top; /* 0x000208 */
+ u64 ce_adm_pcie_debug_lat_sel_hi_top; /* 0x000210 */
+ u64 ce_adm_pcie_debug_trig_sel_top; /* 0x000218 */
+ u64 ce_adm_pcie_debug_trig_lat_sel_lo_top; /* 0x000220 */
+ u64 ce_adm_pcie_debug_trig_lat_sel_hi_top; /* 0x000228 */
+ u64 ce_adm_pcie_trig_compare_top; /* 0x000230 */
+ u64 ce_adm_pcie_trig_compare_en_top; /* 0x000238 */
+ u64 ce_adm_ssp_debug_sel_top; /* 0x000240 */
+ u64 ce_adm_ssp_debug_lat_sel_lo_top; /* 0x000248 */
+ u64 ce_adm_ssp_debug_lat_sel_hi_top; /* 0x000250 */
+ u64 ce_adm_ssp_debug_trig_sel_top; /* 0x000258 */
+ u64 ce_adm_ssp_debug_trig_lat_sel_lo_top; /* 0x000260 */
+ u64 ce_adm_ssp_debug_trig_lat_sel_hi_top; /* 0x000268 */
+ u64 ce_adm_ssp_trig_compare_top; /* 0x000270 */
+ u64 ce_adm_ssp_trig_compare_en_top; /* 0x000278 */
+ u64 ce_pad_000280[48]; /* 0x000280 -- 0x0003F8 */
+
+ u64 ce_adm_bap_ctrl; /* 0x000400 */
+ u64 ce_pad_000408[127]; /* 0x000408 -- 0x0007F8 */
+
+ u64 ce_msg_buf_data63_0[35]; /* 0x000800 -- 0x000918 */
+ u64 ce_pad_000920[29]; /* 0x000920 -- 0x0009F8 */
+
+ u64 ce_msg_buf_data127_64[35]; /* 0x000A00 -- 0x000B18 */
+ u64 ce_pad_000B20[29]; /* 0x000B20 -- 0x000BF8 */
+
+ u64 ce_msg_buf_parity[35]; /* 0x000C00 -- 0x000D18 */
+ u64 ce_pad_000D20[29]; /* 0x000D20 -- 0x000DF8 */
+
+ u64 ce_pad_000E00[576]; /* 0x000E00 -- 0x001FF8 */
+
+ /*
+ * LSI : LSI's PCI Express Link Registers (Link#1 and Link#2)
+ * Link#1 MMRs at start at 0x002000, Link#2 MMRs at 0x003000
+ * NOTE: the comment offsets at far right: let 'z' = {2 or 3}
+ */
+ #define ce_lsi(link_num) ce_lsi[link_num-1]
+ struct ce_lsi_reg {
+ u64 ce_lsi_lpu_id; /* 0x00z000 */
+ u64 ce_lsi_rst; /* 0x00z008 */
+ u64 ce_lsi_dbg_stat; /* 0x00z010 */
+ u64 ce_lsi_dbg_cfg; /* 0x00z018 */
+ u64 ce_lsi_ltssm_ctrl; /* 0x00z020 */
+ u64 ce_lsi_lk_stat; /* 0x00z028 */
+ u64 ce_pad_00z030[2]; /* 0x00z030 -- 0x00z038 */
+ u64 ce_lsi_int_and_stat; /* 0x00z040 */
+ u64 ce_lsi_int_mask; /* 0x00z048 */
+ u64 ce_pad_00z050[22]; /* 0x00z050 -- 0x00z0F8 */
+ u64 ce_lsi_lk_perf_cnt_sel; /* 0x00z100 */
+ u64 ce_pad_00z108; /* 0x00z108 */
+ u64 ce_lsi_lk_perf_cnt_ctrl; /* 0x00z110 */
+ u64 ce_pad_00z118; /* 0x00z118 */
+ u64 ce_lsi_lk_perf_cnt1; /* 0x00z120 */
+ u64 ce_lsi_lk_perf_cnt1_test; /* 0x00z128 */
+ u64 ce_lsi_lk_perf_cnt2; /* 0x00z130 */
+ u64 ce_lsi_lk_perf_cnt2_test; /* 0x00z138 */
+ u64 ce_pad_00z140[24]; /* 0x00z140 -- 0x00z1F8 */
+ u64 ce_lsi_lk_lyr_cfg; /* 0x00z200 */
+ u64 ce_lsi_lk_lyr_status; /* 0x00z208 */
+ u64 ce_lsi_lk_lyr_int_stat; /* 0x00z210 */
+ u64 ce_lsi_lk_ly_int_stat_test; /* 0x00z218 */
+ u64 ce_lsi_lk_ly_int_stat_mask; /* 0x00z220 */
+ u64 ce_pad_00z228[3]; /* 0x00z228 -- 0x00z238 */
+ u64 ce_lsi_fc_upd_ctl; /* 0x00z240 */
+ u64 ce_pad_00z248[3]; /* 0x00z248 -- 0x00z258 */
+ u64 ce_lsi_flw_ctl_upd_to_timer; /* 0x00z260 */
+ u64 ce_lsi_flw_ctl_upd_timer0; /* 0x00z268 */
+ u64 ce_lsi_flw_ctl_upd_timer1; /* 0x00z270 */
+ u64 ce_pad_00z278[49]; /* 0x00z278 -- 0x00z3F8 */
+ u64 ce_lsi_freq_nak_lat_thrsh; /* 0x00z400 */
+ u64 ce_lsi_ack_nak_lat_tmr; /* 0x00z408 */
+ u64 ce_lsi_rply_tmr_thr; /* 0x00z410 */
+ u64 ce_lsi_rply_tmr; /* 0x00z418 */
+ u64 ce_lsi_rply_num_stat; /* 0x00z420 */
+ u64 ce_lsi_rty_buf_max_addr; /* 0x00z428 */
+ u64 ce_lsi_rty_fifo_ptr; /* 0x00z430 */
+ u64 ce_lsi_rty_fifo_rd_wr_ptr; /* 0x00z438 */
+ u64 ce_lsi_rty_fifo_cred; /* 0x00z440 */
+ u64 ce_lsi_seq_cnt; /* 0x00z448 */
+ u64 ce_lsi_ack_sent_seq_num; /* 0x00z450 */
+ u64 ce_lsi_seq_cnt_fifo_max_addr; /* 0x00z458 */
+ u64 ce_lsi_seq_cnt_fifo_ptr; /* 0x00z460 */
+ u64 ce_lsi_seq_cnt_rd_wr_ptr; /* 0x00z468 */
+ u64 ce_lsi_tx_lk_ts_ctl; /* 0x00z470 */
+ u64 ce_pad_00z478; /* 0x00z478 */
+ u64 ce_lsi_mem_addr_ctl; /* 0x00z480 */
+ u64 ce_lsi_mem_d_ld0; /* 0x00z488 */
+ u64 ce_lsi_mem_d_ld1; /* 0x00z490 */
+ u64 ce_lsi_mem_d_ld2; /* 0x00z498 */
+ u64 ce_lsi_mem_d_ld3; /* 0x00z4A0 */
+ u64 ce_lsi_mem_d_ld4; /* 0x00z4A8 */
+ u64 ce_pad_00z4B0[2]; /* 0x00z4B0 -- 0x00z4B8 */
+ u64 ce_lsi_rty_d_cnt; /* 0x00z4C0 */
+ u64 ce_lsi_seq_buf_cnt; /* 0x00z4C8 */
+ u64 ce_lsi_seq_buf_bt_d; /* 0x00z4D0 */
+ u64 ce_pad_00z4D8; /* 0x00z4D8 */
+ u64 ce_lsi_ack_lat_thr; /* 0x00z4E0 */
+ u64 ce_pad_00z4E8[3]; /* 0x00z4E8 -- 0x00z4F8 */
+ u64 ce_lsi_nxt_rcv_seq_1_cntr; /* 0x00z500 */
+ u64 ce_lsi_unsp_dllp_rcvd; /* 0x00z508 */
+ u64 ce_lsi_rcv_lk_ts_ctl; /* 0x00z510 */
+ u64 ce_pad_00z518[29]; /* 0x00z518 -- 0x00z5F8 */
+ u64 ce_lsi_phy_lyr_cfg; /* 0x00z600 */
+ u64 ce_pad_00z608; /* 0x00z608 */
+ u64 ce_lsi_phy_lyr_int_stat; /* 0x00z610 */
+ u64 ce_lsi_phy_lyr_int_stat_test; /* 0x00z618 */
+ u64 ce_lsi_phy_lyr_int_mask; /* 0x00z620 */
+ u64 ce_pad_00z628[11]; /* 0x00z628 -- 0x00z678 */
+ u64 ce_lsi_rcv_phy_cfg; /* 0x00z680 */
+ u64 ce_lsi_rcv_phy_stat1; /* 0x00z688 */
+ u64 ce_lsi_rcv_phy_stat2; /* 0x00z690 */
+ u64 ce_lsi_rcv_phy_stat3; /* 0x00z698 */
+ u64 ce_lsi_rcv_phy_int_stat; /* 0x00z6A0 */
+ u64 ce_lsi_rcv_phy_int_stat_test; /* 0x00z6A8 */
+ u64 ce_lsi_rcv_phy_int_mask; /* 0x00z6B0 */
+ u64 ce_pad_00z6B8[9]; /* 0x00z6B8 -- 0x00z6F8 */
+ u64 ce_lsi_tx_phy_cfg; /* 0x00z700 */
+ u64 ce_lsi_tx_phy_stat; /* 0x00z708 */
+ u64 ce_lsi_tx_phy_int_stat; /* 0x00z710 */
+ u64 ce_lsi_tx_phy_int_stat_test; /* 0x00z718 */
+ u64 ce_lsi_tx_phy_int_mask; /* 0x00z720 */
+ u64 ce_lsi_tx_phy_stat2; /* 0x00z728 */
+ u64 ce_pad_00z730[10]; /* 0x00z730 -- 0x00z77F */
+ u64 ce_lsi_ltssm_cfg1; /* 0x00z780 */
+ u64 ce_lsi_ltssm_cfg2; /* 0x00z788 */
+ u64 ce_lsi_ltssm_cfg3; /* 0x00z790 */
+ u64 ce_lsi_ltssm_cfg4; /* 0x00z798 */
+ u64 ce_lsi_ltssm_cfg5; /* 0x00z7A0 */
+ u64 ce_lsi_ltssm_stat1; /* 0x00z7A8 */
+ u64 ce_lsi_ltssm_stat2; /* 0x00z7B0 */
+ u64 ce_lsi_ltssm_int_stat; /* 0x00z7B8 */
+ u64 ce_lsi_ltssm_int_stat_test; /* 0x00z7C0 */
+ u64 ce_lsi_ltssm_int_mask; /* 0x00z7C8 */
+ u64 ce_lsi_ltssm_stat_wr_en; /* 0x00z7D0 */
+ u64 ce_pad_00z7D8[5]; /* 0x00z7D8 -- 0x00z7F8 */
+ u64 ce_lsi_gb_cfg1; /* 0x00z800 */
+ u64 ce_lsi_gb_cfg2; /* 0x00z808 */
+ u64 ce_lsi_gb_cfg3; /* 0x00z810 */
+ u64 ce_lsi_gb_cfg4; /* 0x00z818 */
+ u64 ce_lsi_gb_stat; /* 0x00z820 */
+ u64 ce_lsi_gb_int_stat; /* 0x00z828 */
+ u64 ce_lsi_gb_int_stat_test; /* 0x00z830 */
+ u64 ce_lsi_gb_int_mask; /* 0x00z838 */
+ u64 ce_lsi_gb_pwr_dn1; /* 0x00z840 */
+ u64 ce_lsi_gb_pwr_dn2; /* 0x00z848 */
+ u64 ce_pad_00z850[246]; /* 0x00z850 -- 0x00zFF8 */
+ } ce_lsi[2];
+
+ u64 ce_pad_004000[10]; /* 0x004000 -- 0x004048 */
+
+ /*
+ * CRM: Coretalk Receive Module Registers
+ */
+ u64 ce_crm_debug_mux; /* 0x004050 */
+ u64 ce_pad_004058; /* 0x004058 */
+ u64 ce_crm_ssp_err_cmd_wrd; /* 0x004060 */
+ u64 ce_crm_ssp_err_addr; /* 0x004068 */
+ u64 ce_crm_ssp_err_syn; /* 0x004070 */
+
+ u64 ce_pad_004078[499]; /* 0x004078 -- 0x005008 */
+
+ /*
+ * CXM: Coretalk Xmit Module Registers
+ */
+ u64 ce_cxm_dyn_credit_status; /* 0x005010 */
+ u64 ce_cxm_last_credit_status; /* 0x005018 */
+ u64 ce_cxm_credit_limit; /* 0x005020 */
+ u64 ce_cxm_force_credit; /* 0x005028 */
+ u64 ce_cxm_disable_bypass; /* 0x005030 */
+ u64 ce_pad_005038[3]; /* 0x005038 -- 0x005048 */
+ u64 ce_cxm_debug_mux; /* 0x005050 */
+
+ u64 ce_pad_005058[501]; /* 0x005058 -- 0x005FF8 */
+
+ /*
+ * DTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
+ * DTL: Link#1 MMRs at start at 0x006000, Link#2 MMRs at 0x008000
+ * DTL: the comment offsets at far right: let 'y' = {6 or 8}
+ *
+ * UTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
+ * UTL: Link#1 MMRs at start at 0x007000, Link#2 MMRs at 0x009000
+ * UTL: the comment offsets at far right: let 'z' = {7 or 9}
+ */
+ #define ce_dtl(link_num) ce_dtl_utl[link_num-1]
+ #define ce_utl(link_num) ce_dtl_utl[link_num-1]
+ struct ce_dtl_utl_reg {
+ /* DTL */
+ u64 ce_dtl_dtdr_credit_limit; /* 0x00y000 */
+ u64 ce_dtl_dtdr_credit_force; /* 0x00y008 */
+ u64 ce_dtl_dyn_credit_status; /* 0x00y010 */
+ u64 ce_dtl_dtl_last_credit_stat; /* 0x00y018 */
+ u64 ce_dtl_dtl_ctrl; /* 0x00y020 */
+ u64 ce_pad_00y028[5]; /* 0x00y028 -- 0x00y048 */
+ u64 ce_dtl_debug_sel; /* 0x00y050 */
+ u64 ce_pad_00y058[501]; /* 0x00y058 -- 0x00yFF8 */
+
+ /* UTL */
+ u64 ce_utl_utl_ctrl; /* 0x00z000 */
+ u64 ce_utl_debug_sel; /* 0x00z008 */
+ u64 ce_pad_00z010[510]; /* 0x00z010 -- 0x00zFF8 */
+ } ce_dtl_utl[2];
+
+ u64 ce_pad_00A000[514]; /* 0x00A000 -- 0x00B008 */
+
+ /*
+ * URE: Upstream Request Engine
+ */
+ u64 ce_ure_dyn_credit_status; /* 0x00B010 */
+ u64 ce_ure_last_credit_status; /* 0x00B018 */
+ u64 ce_ure_credit_limit; /* 0x00B020 */
+ u64 ce_pad_00B028; /* 0x00B028 */
+ u64 ce_ure_control; /* 0x00B030 */
+ u64 ce_ure_status; /* 0x00B038 */
+ u64 ce_pad_00B040[2]; /* 0x00B040 -- 0x00B048 */
+ u64 ce_ure_debug_sel; /* 0x00B050 */
+ u64 ce_ure_pcie_debug_sel; /* 0x00B058 */
+ u64 ce_ure_ssp_err_cmd_wrd; /* 0x00B060 */
+ u64 ce_ure_ssp_err_addr; /* 0x00B068 */
+ u64 ce_ure_page_map; /* 0x00B070 */
+ u64 ce_ure_dir_map[TIOCE_NUM_PORTS]; /* 0x00B078 */
+ u64 ce_ure_pipe_sel1; /* 0x00B088 */
+ u64 ce_ure_pipe_mask1; /* 0x00B090 */
+ u64 ce_ure_pipe_sel2; /* 0x00B098 */
+ u64 ce_ure_pipe_mask2; /* 0x00B0A0 */
+ u64 ce_ure_pcie1_credits_sent; /* 0x00B0A8 */
+ u64 ce_ure_pcie1_credits_used; /* 0x00B0B0 */
+ u64 ce_ure_pcie1_credit_limit; /* 0x00B0B8 */
+ u64 ce_ure_pcie2_credits_sent; /* 0x00B0C0 */
+ u64 ce_ure_pcie2_credits_used; /* 0x00B0C8 */
+ u64 ce_ure_pcie2_credit_limit; /* 0x00B0D0 */
+ u64 ce_ure_pcie_force_credit; /* 0x00B0D8 */
+ u64 ce_ure_rd_tnum_val; /* 0x00B0E0 */
+ u64 ce_ure_rd_tnum_rsp_rcvd; /* 0x00B0E8 */
+ u64 ce_ure_rd_tnum_esent_timer; /* 0x00B0F0 */
+ u64 ce_ure_rd_tnum_error; /* 0x00B0F8 */
+ u64 ce_ure_rd_tnum_first_cl; /* 0x00B100 */
+ u64 ce_ure_rd_tnum_link_buf; /* 0x00B108 */
+ u64 ce_ure_wr_tnum_val; /* 0x00B110 */
+ u64 ce_ure_sram_err_addr0; /* 0x00B118 */
+ u64 ce_ure_sram_err_addr1; /* 0x00B120 */
+ u64 ce_ure_sram_err_addr2; /* 0x00B128 */
+ u64 ce_ure_sram_rd_addr0; /* 0x00B130 */
+ u64 ce_ure_sram_rd_addr1; /* 0x00B138 */
+ u64 ce_ure_sram_rd_addr2; /* 0x00B140 */
+ u64 ce_ure_sram_wr_addr0; /* 0x00B148 */
+ u64 ce_ure_sram_wr_addr1; /* 0x00B150 */
+ u64 ce_ure_sram_wr_addr2; /* 0x00B158 */
+ u64 ce_ure_buf_flush10; /* 0x00B160 */
+ u64 ce_ure_buf_flush11; /* 0x00B168 */
+ u64 ce_ure_buf_flush12; /* 0x00B170 */
+ u64 ce_ure_buf_flush13; /* 0x00B178 */
+ u64 ce_ure_buf_flush20; /* 0x00B180 */
+ u64 ce_ure_buf_flush21; /* 0x00B188 */
+ u64 ce_ure_buf_flush22; /* 0x00B190 */
+ u64 ce_ure_buf_flush23; /* 0x00B198 */
+ u64 ce_ure_pcie_control1; /* 0x00B1A0 */
+ u64 ce_ure_pcie_control2; /* 0x00B1A8 */
+
+ u64 ce_pad_00B1B0[458]; /* 0x00B1B0 -- 0x00BFF8 */
+
+ /* Upstream Data Buffer, Port1 */
+ struct ce_ure_maint_ups_dat1_data {
+ u64 data63_0[512]; /* 0x00C000 -- 0x00CFF8 */
+ u64 data127_64[512]; /* 0x00D000 -- 0x00DFF8 */
+ u64 parity[512]; /* 0x00E000 -- 0x00EFF8 */
+ } ce_ure_maint_ups_dat1;
+
+ /* Upstream Header Buffer, Port1 */
+ struct ce_ure_maint_ups_hdr1_data {
+ u64 data63_0[512]; /* 0x00F000 -- 0x00FFF8 */
+ u64 data127_64[512]; /* 0x010000 -- 0x010FF8 */
+ u64 parity[512]; /* 0x011000 -- 0x011FF8 */
+ } ce_ure_maint_ups_hdr1;
+
+ /* Upstream Data Buffer, Port2 */
+ struct ce_ure_maint_ups_dat2_data {
+ u64 data63_0[512]; /* 0x012000 -- 0x012FF8 */
+ u64 data127_64[512]; /* 0x013000 -- 0x013FF8 */
+ u64 parity[512]; /* 0x014000 -- 0x014FF8 */
+ } ce_ure_maint_ups_dat2;
+
+ /* Upstream Header Buffer, Port2 */
+ struct ce_ure_maint_ups_hdr2_data {
+ u64 data63_0[512]; /* 0x015000 -- 0x015FF8 */
+ u64 data127_64[512]; /* 0x016000 -- 0x016FF8 */
+ u64 parity[512]; /* 0x017000 -- 0x017FF8 */
+ } ce_ure_maint_ups_hdr2;
+
+ /* Downstream Data Buffer */
+ struct ce_ure_maint_dns_dat_data {
+ u64 data63_0[512]; /* 0x018000 -- 0x018FF8 */
+ u64 data127_64[512]; /* 0x019000 -- 0x019FF8 */
+ u64 parity[512]; /* 0x01A000 -- 0x01AFF8 */
+ } ce_ure_maint_dns_dat;
+
+ /* Downstream Header Buffer */
+ struct ce_ure_maint_dns_hdr_data {
+ u64 data31_0[64]; /* 0x01B000 -- 0x01B1F8 */
+ u64 data95_32[64]; /* 0x01B200 -- 0x01B3F8 */
+ u64 parity[64]; /* 0x01B400 -- 0x01B5F8 */
+ } ce_ure_maint_dns_hdr;
+
+ /* RCI Buffer Data */
+ struct ce_ure_maint_rci_data {
+ u64 data41_0[64]; /* 0x01B600 -- 0x01B7F8 */
+ u64 data69_42[64]; /* 0x01B800 -- 0x01B9F8 */
+ } ce_ure_maint_rci;
+
+ /* Response Queue */
+ u64 ce_ure_maint_rspq[64]; /* 0x01BA00 -- 0x01BBF8 */
+
+ u64 ce_pad_01C000[4224]; /* 0x01BC00 -- 0x023FF8 */
+
+ /* Admin Build-a-Packet Buffer */
+ struct ce_adm_maint_bap_buf_data {
+ u64 data63_0[258]; /* 0x024000 -- 0x024808 */
+ u64 data127_64[258]; /* 0x024810 -- 0x025018 */
+ u64 parity[258]; /* 0x025020 -- 0x025828 */
+ } ce_adm_maint_bap_buf;
+
+ u64 ce_pad_025830[5370]; /* 0x025830 -- 0x02FFF8 */
+
+ /* URE: 40bit PMU ATE Buffer */ /* 0x030000 -- 0x037FF8 */
+ u64 ce_ure_ate40[TIOCE_NUM_M40_ATES];
+
+ /* URE: 32/40bit PMU ATE Buffer */ /* 0x038000 -- 0x03BFF8 */
+ u64 ce_ure_ate3240[TIOCE_NUM_M3240_ATES];
+
+ u64 ce_pad_03C000[2050]; /* 0x03C000 -- 0x040008 */
+
+ /*
+ * DRE: Down Stream Request Engine
+ */
+ u64 ce_dre_dyn_credit_status1; /* 0x040010 */
+ u64 ce_dre_dyn_credit_status2; /* 0x040018 */
+ u64 ce_dre_last_credit_status1; /* 0x040020 */
+ u64 ce_dre_last_credit_status2; /* 0x040028 */
+ u64 ce_dre_credit_limit1; /* 0x040030 */
+ u64 ce_dre_credit_limit2; /* 0x040038 */
+ u64 ce_dre_force_credit1; /* 0x040040 */
+ u64 ce_dre_force_credit2; /* 0x040048 */
+ u64 ce_dre_debug_mux1; /* 0x040050 */
+ u64 ce_dre_debug_mux2; /* 0x040058 */
+ u64 ce_dre_ssp_err_cmd_wrd; /* 0x040060 */
+ u64 ce_dre_ssp_err_addr; /* 0x040068 */
+ u64 ce_dre_comp_err_cmd_wrd; /* 0x040070 */
+ u64 ce_dre_comp_err_addr; /* 0x040078 */
+ u64 ce_dre_req_status; /* 0x040080 */
+ u64 ce_dre_config1; /* 0x040088 */
+ u64 ce_dre_config2; /* 0x040090 */
+ u64 ce_dre_config_req_status; /* 0x040098 */
+ u64 ce_pad_0400A0[12]; /* 0x0400A0 -- 0x0400F8 */
+ u64 ce_dre_dyn_fifo; /* 0x040100 */
+ u64 ce_pad_040108[3]; /* 0x040108 -- 0x040118 */
+ u64 ce_dre_last_fifo; /* 0x040120 */
+
+ u64 ce_pad_040128[27]; /* 0x040128 -- 0x0401F8 */
+
+ /* DRE Downstream Head Queue */
+ struct ce_dre_maint_ds_head_queue {
+ u64 data63_0[32]; /* 0x040200 -- 0x0402F8 */
+ u64 data127_64[32]; /* 0x040300 -- 0x0403F8 */
+ u64 parity[32]; /* 0x040400 -- 0x0404F8 */
+ } ce_dre_maint_ds_head_q;
+
+ u64 ce_pad_040500[352]; /* 0x040500 -- 0x040FF8 */
+
+ /* DRE Downstream Data Queue */
+ struct ce_dre_maint_ds_data_queue {
+ u64 data63_0[256]; /* 0x041000 -- 0x0417F8 */
+ u64 ce_pad_041800[256]; /* 0x041800 -- 0x041FF8 */
+ u64 data127_64[256]; /* 0x042000 -- 0x0427F8 */
+ u64 ce_pad_042800[256]; /* 0x042800 -- 0x042FF8 */
+ u64 parity[256]; /* 0x043000 -- 0x0437F8 */
+ u64 ce_pad_043800[256]; /* 0x043800 -- 0x043FF8 */
+ } ce_dre_maint_ds_data_q;
+
+ /* DRE URE Upstream Response Queue */
+ struct ce_dre_maint_ure_us_rsp_queue {
+ u64 data63_0[8]; /* 0x044000 -- 0x044038 */
+ u64 ce_pad_044040[24]; /* 0x044040 -- 0x0440F8 */
+ u64 data127_64[8]; /* 0x044100 -- 0x044138 */
+ u64 ce_pad_044140[24]; /* 0x044140 -- 0x0441F8 */
+ u64 parity[8]; /* 0x044200 -- 0x044238 */
+ u64 ce_pad_044240[24]; /* 0x044240 -- 0x0442F8 */
+ } ce_dre_maint_ure_us_rsp_q;
+
+ u64 ce_dre_maint_us_wrt_rsp[32];/* 0x044300 -- 0x0443F8 */
+
+ u64 ce_end_of_struct; /* 0x044400 */
+} tioce_t;
+
+/* ce_lsiX_gb_cfg1 register bit masks & shifts */
+#define CE_LSI_GB_CFG1_RXL0S_THS_SHFT 0
+#define CE_LSI_GB_CFG1_RXL0S_THS_MASK (0xffULL << 0)
+#define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT 8
+#define CE_LSI_GB_CFG1_RXL0S_SMP_MASK (0xfULL << 8);
+#define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT 12
+#define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK (0x7ULL << 12)
+#define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT 15
+#define CE_LSI_GB_CFG1_RXL0S_FLT_MASK (0x1ULL << 15)
+#define CE_LSI_GB_CFG1_LPBK_SEL_SHFT 16
+#define CE_LSI_GB_CFG1_LPBK_SEL_MASK (0x3ULL << 16)
+#define CE_LSI_GB_CFG1_LPBK_EN_SHFT 18
+#define CE_LSI_GB_CFG1_LPBK_EN_MASK (0x1ULL << 18)
+#define CE_LSI_GB_CFG1_RVRS_LB_SHFT 19
+#define CE_LSI_GB_CFG1_RVRS_LB_MASK (0x1ULL << 19)
+#define CE_LSI_GB_CFG1_RVRS_CLK_SHFT 20
+#define CE_LSI_GB_CFG1_RVRS_CLK_MASK (0x3ULL << 20)
+#define CE_LSI_GB_CFG1_SLF_TS_SHFT 24
+#define CE_LSI_GB_CFG1_SLF_TS_MASK (0xfULL << 24)
+
+/* ce_adm_int_mask/ce_adm_int_status register bit defines */
+#define CE_ADM_INT_CE_ERROR_SHFT 0
+#define CE_ADM_INT_LSI1_IP_ERROR_SHFT 1
+#define CE_ADM_INT_LSI2_IP_ERROR_SHFT 2
+#define CE_ADM_INT_PCIE_ERROR_SHFT 3
+#define CE_ADM_INT_PORT1_HOTPLUG_EVENT_SHFT 4
+#define CE_ADM_INT_PORT2_HOTPLUG_EVENT_SHFT 5
+#define CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT 6
+#define CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT 7
+#define CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT 8
+#define CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT 9
+#define CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT 10
+#define CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT 11
+#define CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT 12
+#define CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT 13
+#define CE_ADM_INT_PCIE_MSG_SHFT 14 /*see int_dest_14*/
+#define CE_ADM_INT_PCIE_MSG_SLOT_0_SHFT 14
+#define CE_ADM_INT_PCIE_MSG_SLOT_1_SHFT 15
+#define CE_ADM_INT_PCIE_MSG_SLOT_2_SHFT 16
+#define CE_ADM_INT_PCIE_MSG_SLOT_3_SHFT 17
+#define CE_ADM_INT_PORT1_PM_PME_MSG_SHFT 22
+#define CE_ADM_INT_PORT2_PM_PME_MSG_SHFT 23
+
+/* ce_adm_force_int register bit defines */
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT 0
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT 1
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT 2
+#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT 3
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT 4
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT 5
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT 6
+#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT 7
+#define CE_ADM_FORCE_INT_ALWAYS_SHFT 8
+
+/* ce_adm_int_dest register bit masks & shifts */
+#define INTR_VECTOR_SHFT 56
+
+/* ce_adm_error_mask and ce_adm_error_summary register bit masks */
+#define CE_ADM_ERR_CRM_SSP_REQ_INVALID (0x1ULL << 0)
+#define CE_ADM_ERR_SSP_REQ_HEADER (0x1ULL << 1)
+#define CE_ADM_ERR_SSP_RSP_HEADER (0x1ULL << 2)
+#define CE_ADM_ERR_SSP_PROTOCOL_ERROR (0x1ULL << 3)
+#define CE_ADM_ERR_SSP_SBE (0x1ULL << 4)
+#define CE_ADM_ERR_SSP_MBE (0x1ULL << 5)
+#define CE_ADM_ERR_CXM_CREDIT_OFLOW (0x1ULL << 6)
+#define CE_ADM_ERR_DRE_SSP_REQ_INVAL (0x1ULL << 7)
+#define CE_ADM_ERR_SSP_REQ_LONG (0x1ULL << 8)
+#define CE_ADM_ERR_SSP_REQ_OFLOW (0x1ULL << 9)
+#define CE_ADM_ERR_SSP_REQ_SHORT (0x1ULL << 10)
+#define CE_ADM_ERR_SSP_REQ_SIDEBAND (0x1ULL << 11)
+#define CE_ADM_ERR_SSP_REQ_ADDR_ERR (0x1ULL << 12)
+#define CE_ADM_ERR_SSP_REQ_BAD_BE (0x1ULL << 13)
+#define CE_ADM_ERR_PCIE_COMPL_TIMEOUT (0x1ULL << 14)
+#define CE_ADM_ERR_PCIE_UNEXP_COMPL (0x1ULL << 15)
+#define CE_ADM_ERR_PCIE_ERR_COMPL (0x1ULL << 16)
+#define CE_ADM_ERR_DRE_CREDIT_OFLOW (0x1ULL << 17)
+#define CE_ADM_ERR_DRE_SRAM_PE (0x1ULL << 18)
+#define CE_ADM_ERR_SSP_RSP_INVALID (0x1ULL << 19)
+#define CE_ADM_ERR_SSP_RSP_LONG (0x1ULL << 20)
+#define CE_ADM_ERR_SSP_RSP_SHORT (0x1ULL << 21)
+#define CE_ADM_ERR_SSP_RSP_SIDEBAND (0x1ULL << 22)
+#define CE_ADM_ERR_URE_SSP_RSP_UNEXP (0x1ULL << 23)
+#define CE_ADM_ERR_URE_SSP_WR_REQ_TIMEOUT (0x1ULL << 24)
+#define CE_ADM_ERR_URE_SSP_RD_REQ_TIMEOUT (0x1ULL << 25)
+#define CE_ADM_ERR_URE_ATE3240_PAGE_FAULT (0x1ULL << 26)
+#define CE_ADM_ERR_URE_ATE40_PAGE_FAULT (0x1ULL << 27)
+#define CE_ADM_ERR_URE_CREDIT_OFLOW (0x1ULL << 28)
+#define CE_ADM_ERR_URE_SRAM_PE (0x1ULL << 29)
+#define CE_ADM_ERR_ADM_SSP_RSP_UNEXP (0x1ULL << 30)
+#define CE_ADM_ERR_ADM_SSP_REQ_TIMEOUT (0x1ULL << 31)
+#define CE_ADM_ERR_MMR_ACCESS_ERROR (0x1ULL << 32)
+#define CE_ADM_ERR_MMR_ADDR_ERROR (0x1ULL << 33)
+#define CE_ADM_ERR_ADM_CREDIT_OFLOW (0x1ULL << 34)
+#define CE_ADM_ERR_ADM_SRAM_PE (0x1ULL << 35)
+#define CE_ADM_ERR_DTL1_MIN_PDATA_CREDIT_ERR (0x1ULL << 36)
+#define CE_ADM_ERR_DTL1_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 37)
+#define CE_ADM_ERR_DTL1_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 38)
+#define CE_ADM_ERR_DTL1_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 39)
+#define CE_ADM_ERR_DTL1_COMP_HD_CRED_MAX_ERR (0x1ULL << 40)
+#define CE_ADM_ERR_DTL1_COMP_D_CRED_MAX_ERR (0x1ULL << 41)
+#define CE_ADM_ERR_DTL1_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 42)
+#define CE_ADM_ERR_DTL1_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 43)
+#define CE_ADM_ERR_DTL1_POSTED_HD_CRED_MAX_ERR (0x1ULL << 44)
+#define CE_ADM_ERR_DTL1_POSTED_D_CRED_MAX_ERR (0x1ULL << 45)
+#define CE_ADM_ERR_DTL2_MIN_PDATA_CREDIT_ERR (0x1ULL << 46)
+#define CE_ADM_ERR_DTL2_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 47)
+#define CE_ADM_ERR_DTL2_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 48)
+#define CE_ADM_ERR_DTL2_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 49)
+#define CE_ADM_ERR_DTL2_COMP_HD_CRED_MAX_ERR (0x1ULL << 50)
+#define CE_ADM_ERR_DTL2_COMP_D_CRED_MAX_ERR (0x1ULL << 51)
+#define CE_ADM_ERR_DTL2_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 52)
+#define CE_ADM_ERR_DTL2_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 53)
+#define CE_ADM_ERR_DTL2_POSTED_HD_CRED_MAX_ERR (0x1ULL << 54)
+#define CE_ADM_ERR_DTL2_POSTED_D_CRED_MAX_ERR (0x1ULL << 55)
+#define CE_ADM_ERR_PORT1_PCIE_COR_ERR (0x1ULL << 56)
+#define CE_ADM_ERR_PORT1_PCIE_NFAT_ERR (0x1ULL << 57)
+#define CE_ADM_ERR_PORT1_PCIE_FAT_ERR (0x1ULL << 58)
+#define CE_ADM_ERR_PORT2_PCIE_COR_ERR (0x1ULL << 59)
+#define CE_ADM_ERR_PORT2_PCIE_NFAT_ERR (0x1ULL << 60)
+#define CE_ADM_ERR_PORT2_PCIE_FAT_ERR (0x1ULL << 61)
+
+/* ce_adm_ure_ups_buf_barrier_flush register bit masks and shifts */
+#define FLUSH_SEL_PORT1_PIPE0_SHFT 0
+#define FLUSH_SEL_PORT1_PIPE1_SHFT 4
+#define FLUSH_SEL_PORT1_PIPE2_SHFT 8
+#define FLUSH_SEL_PORT1_PIPE3_SHFT 12
+#define FLUSH_SEL_PORT2_PIPE0_SHFT 16
+#define FLUSH_SEL_PORT2_PIPE1_SHFT 20
+#define FLUSH_SEL_PORT2_PIPE2_SHFT 24
+#define FLUSH_SEL_PORT2_PIPE3_SHFT 28
+
+/* ce_dre_config1 register bit masks and shifts */
+#define CE_DRE_RO_ENABLE (0x1ULL << 0)
+#define CE_DRE_DYN_RO_ENABLE (0x1ULL << 1)
+#define CE_DRE_SUP_CONFIG_COMP_ERROR (0x1ULL << 2)
+#define CE_DRE_SUP_IO_COMP_ERROR (0x1ULL << 3)
+#define CE_DRE_ADDR_MODE_SHFT 4
+
+/* ce_dre_config_req_status register bit masks */
+#define CE_DRE_LAST_CONFIG_COMPLETION (0x7ULL << 0)
+#define CE_DRE_DOWNSTREAM_CONFIG_ERROR (0x1ULL << 3)
+#define CE_DRE_CONFIG_COMPLETION_VALID (0x1ULL << 4)
+#define CE_DRE_CONFIG_REQUEST_ACTIVE (0x1ULL << 5)
+
+/* ce_ure_control register bit masks & shifts */
+#define CE_URE_RD_MRG_ENABLE (0x1ULL << 0)
+#define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4)
+#define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5)
+#define CE_URE_WRT_MRG_TIMER_SHFT 12
+#define CE_URE_WRT_MRG_TIMER_MASK (0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT)
+#define CE_URE_WRT_MRG_TIMER(x) (((u64)(x) << \
+ CE_URE_WRT_MRG_TIMER_SHFT) & \
+ CE_URE_WRT_MRG_TIMER_MASK)
+#define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24)
+#define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32)
+#define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33)
+#define CE_URE_UPS_DAT2_PAR_DISABLE (0x1ULL << 34)
+#define CE_URE_UPS_HDR2_PAR_DISABLE (0x1ULL << 35)
+#define CE_URE_ATE_PAR_DISABLE (0x1ULL << 36)
+#define CE_URE_RCI_PAR_DISABLE (0x1ULL << 37)
+#define CE_URE_RSPQ_PAR_DISABLE (0x1ULL << 38)
+#define CE_URE_DNS_DAT_PAR_DISABLE (0x1ULL << 39)
+#define CE_URE_DNS_HDR_PAR_DISABLE (0x1ULL << 40)
+#define CE_URE_MALFORM_DISABLE (0x1ULL << 44)
+#define CE_URE_UNSUP_DISABLE (0x1ULL << 45)
+
+/* ce_ure_page_map register bit masks & shifts */
+#define CE_URE_ATE3240_ENABLE (0x1ULL << 0)
+#define CE_URE_ATE40_ENABLE (0x1ULL << 1)
+#define CE_URE_PAGESIZE_SHFT 4
+#define CE_URE_PAGESIZE_MASK (0x7ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_4K_PAGESIZE (0x0ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_16K_PAGESIZE (0x1ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_64K_PAGESIZE (0x2ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_128K_PAGESIZE (0x3ULL << CE_URE_PAGESIZE_SHFT)
+#define CE_URE_256K_PAGESIZE (0x4ULL << CE_URE_PAGESIZE_SHFT)
+
+/* ce_ure_pipe_sel register bit masks & shifts */
+#define PKT_TRAFIC_SHRT 16
+#define BUS_SRC_ID_SHFT 8
+#define DEV_SRC_ID_SHFT 3
+#define FNC_SRC_ID_SHFT 0
+#define CE_URE_TC_MASK (0x07ULL << PKT_TRAFIC_SHRT)
+#define CE_URE_BUS_MASK (0xFFULL << BUS_SRC_ID_SHFT)
+#define CE_URE_DEV_MASK (0x1FULL << DEV_SRC_ID_SHFT)
+#define CE_URE_FNC_MASK (0x07ULL << FNC_SRC_ID_SHFT)
+#define CE_URE_PIPE_BUS(b) (((u64)(b) << BUS_SRC_ID_SHFT) & \
+ CE_URE_BUS_MASK)
+#define CE_URE_PIPE_DEV(d) (((u64)(d) << DEV_SRC_ID_SHFT) & \
+ CE_URE_DEV_MASK)
+#define CE_URE_PIPE_FNC(f) (((u64)(f) << FNC_SRC_ID_SHFT) & \
+ CE_URE_FNC_MASK)
+
+#define CE_URE_SEL1_SHFT 0
+#define CE_URE_SEL2_SHFT 20
+#define CE_URE_SEL3_SHFT 40
+#define CE_URE_SEL1_MASK (0x7FFFFULL << CE_URE_SEL1_SHFT)
+#define CE_URE_SEL2_MASK (0x7FFFFULL << CE_URE_SEL2_SHFT)
+#define CE_URE_SEL3_MASK (0x7FFFFULL << CE_URE_SEL3_SHFT)
+
+
+/* ce_ure_pipe_mask register bit masks & shifts */
+#define CE_URE_MASK1_SHFT 0
+#define CE_URE_MASK2_SHFT 20
+#define CE_URE_MASK3_SHFT 40
+#define CE_URE_MASK1_MASK (0x7FFFFULL << CE_URE_MASK1_SHFT)
+#define CE_URE_MASK2_MASK (0x7FFFFULL << CE_URE_MASK2_SHFT)
+#define CE_URE_MASK3_MASK (0x7FFFFULL << CE_URE_MASK3_SHFT)
+
+
+/* ce_ure_pcie_control1 register bit masks & shifts */
+#define CE_URE_SI (0x1ULL << 0)
+#define CE_URE_ELAL_SHFT 4
+#define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT)
+#define CE_URE_ELAL_SET(n) (((u64)(n) << CE_URE_ELAL_SHFT) & \
+ CE_URE_ELAL_MASK)
+#define CE_URE_ELAL1_SHFT 8
+#define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT)
+#define CE_URE_ELAL1_SET(n) (((u64)(n) << CE_URE_ELAL1_SHFT) & \
+ CE_URE_ELAL1_MASK)
+#define CE_URE_SCC (0x1ULL << 12)
+#define CE_URE_PN1_SHFT 16
+#define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT)
+#define CE_URE_PN2_SHFT 24
+#define CE_URE_PN2_MASK (0xFFULL << CE_URE_PN2_SHFT)
+#define CE_URE_PN1_SET(n) (((u64)(n) << CE_URE_PN1_SHFT) & \
+ CE_URE_PN1_MASK)
+#define CE_URE_PN2_SET(n) (((u64)(n) << CE_URE_PN2_SHFT) & \
+ CE_URE_PN2_MASK)
+
+/* ce_ure_pcie_control2 register bit masks & shifts */
+#define CE_URE_ABP (0x1ULL << 0)
+#define CE_URE_PCP (0x1ULL << 1)
+#define CE_URE_MSP (0x1ULL << 2)
+#define CE_URE_AIP (0x1ULL << 3)
+#define CE_URE_PIP (0x1ULL << 4)
+#define CE_URE_HPS (0x1ULL << 5)
+#define CE_URE_HPC (0x1ULL << 6)
+#define CE_URE_SPLV_SHFT 7
+#define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT)
+#define CE_URE_SPLV_SET(n) (((u64)(n) << CE_URE_SPLV_SHFT) & \
+ CE_URE_SPLV_MASK)
+#define CE_URE_SPLS_SHFT 15
+#define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT)
+#define CE_URE_SPLS_SET(n) (((u64)(n) << CE_URE_SPLS_SHFT) & \
+ CE_URE_SPLS_MASK)
+#define CE_URE_PSN1_SHFT 19
+#define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT)
+#define CE_URE_PSN2_SHFT 32
+#define CE_URE_PSN2_MASK (0x1FFFULL << CE_URE_PSN2_SHFT)
+#define CE_URE_PSN1_SET(n) (((u64)(n) << CE_URE_PSN1_SHFT) & \
+ CE_URE_PSN1_MASK)
+#define CE_URE_PSN2_SET(n) (((u64)(n) << CE_URE_PSN2_SHFT) & \
+ CE_URE_PSN2_MASK)
+
+/*
+ * PIO address space ranges for CE
+ */
+
+/* Local CE Registers Space */
+#define CE_PIO_MMR 0x00000000
+#define CE_PIO_MMR_LEN 0x04000000
+
+/* PCI Compatible Config Space */
+#define CE_PIO_CONFIG_SPACE 0x04000000
+#define CE_PIO_CONFIG_SPACE_LEN 0x04000000
+
+/* PCI I/O Space Alias */
+#define CE_PIO_IO_SPACE_ALIAS 0x08000000
+#define CE_PIO_IO_SPACE_ALIAS_LEN 0x08000000
+
+/* PCI Enhanced Config Space */
+#define CE_PIO_E_CONFIG_SPACE 0x10000000
+#define CE_PIO_E_CONFIG_SPACE_LEN 0x10000000
+
+/* PCI I/O Space */
+#define CE_PIO_IO_SPACE 0x100000000
+#define CE_PIO_IO_SPACE_LEN 0x100000000
+
+/* PCI MEM Space */
+#define CE_PIO_MEM_SPACE 0x200000000
+#define CE_PIO_MEM_SPACE_LEN TIO_HWIN_SIZE
+
+
+/*
+ * CE PCI Enhanced Config Space shifts & masks
+ */
+#define CE_E_CONFIG_BUS_SHFT 20
+#define CE_E_CONFIG_BUS_MASK (0xFF << CE_E_CONFIG_BUS_SHFT)
+#define CE_E_CONFIG_DEVICE_SHFT 15
+#define CE_E_CONFIG_DEVICE_MASK (0x1F << CE_E_CONFIG_DEVICE_SHFT)
+#define CE_E_CONFIG_FUNC_SHFT 12
+#define CE_E_CONFIG_FUNC_MASK (0x7 << CE_E_CONFIG_FUNC_SHFT)
+
+#endif /* __ASM_IA64_SN_TIOCE_H__ */
diff --git a/arch/ia64/include/asm/sn/tioce_provider.h b/arch/ia64/include/asm/sn/tioce_provider.h
new file mode 100644
index 000000000000..32c32f30b099
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tioce_provider.h
@@ -0,0 +1,63 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_CE_PROVIDER_H
+#define _ASM_IA64_SN_CE_PROVIDER_H
+
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/tioce.h>
+
+/*
+ * Common TIOCE structure shared between the prom and kernel
+ *
+ * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES TO THE
+ * PROM VERSION.
+ */
+struct tioce_common {
+ struct pcibus_bussoft ce_pcibus; /* common pciio header */
+
+ u32 ce_rev;
+ u64 ce_kernel_private;
+ u64 ce_prom_private;
+};
+
+struct tioce_kernel {
+ struct tioce_common *ce_common;
+ spinlock_t ce_lock;
+ struct list_head ce_dmamap_list;
+
+ u64 ce_ate40_shadow[TIOCE_NUM_M40_ATES];
+ u64 ce_ate3240_shadow[TIOCE_NUM_M3240_ATES];
+ u32 ce_ate3240_pagesize;
+
+ u8 ce_port1_secondary;
+
+ /* per-port resources */
+ struct {
+ int dirmap_refcnt;
+ u64 dirmap_shadow;
+ } ce_port[TIOCE_NUM_PORTS];
+};
+
+struct tioce_dmamap {
+ struct list_head ce_dmamap_list; /* headed by tioce_kernel */
+ u32 refcnt;
+
+ u64 nbytes; /* # bytes mapped */
+
+ u64 ct_start; /* coretalk start address */
+ u64 pci_start; /* bus start address */
+
+ u64 __iomem *ate_hw;/* hw ptr of first ate in map */
+ u64 *ate_shadow; /* shadow ptr of firat ate */
+ u16 ate_count; /* # ate's in the map */
+};
+
+extern int tioce_init_provider(void);
+
+#endif /* __ASM_IA64_SN_CE_PROVIDER_H */
diff --git a/arch/ia64/include/asm/sn/tiocp.h b/arch/ia64/include/asm/sn/tiocp.h
new file mode 100644
index 000000000000..e8ad0bb5b6c5
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tiocp.h
@@ -0,0 +1,257 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_TIOCP_H
+#define _ASM_IA64_SN_PCI_TIOCP_H
+
+#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
+#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
+#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60)
+
+
+/*****************************************************************************
+ *********************** TIOCP MMR structure mapping ***************************
+ *****************************************************************************/
+
+struct tiocp{
+
+ /* 0x000000-0x00FFFF -- Local Registers */
+
+ /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
+ u64 cp_id; /* 0x000000 */
+ u64 cp_stat; /* 0x000008 */
+ u64 cp_err_upper; /* 0x000010 */
+ u64 cp_err_lower; /* 0x000018 */
+ #define cp_err cp_err_lower
+ u64 cp_control; /* 0x000020 */
+ u64 cp_req_timeout; /* 0x000028 */
+ u64 cp_intr_upper; /* 0x000030 */
+ u64 cp_intr_lower; /* 0x000038 */
+ #define cp_intr cp_intr_lower
+ u64 cp_err_cmdword; /* 0x000040 */
+ u64 _pad_000048; /* 0x000048 */
+ u64 cp_tflush; /* 0x000050 */
+
+ /* 0x000058-0x00007F -- Bridge-specific Configuration */
+ u64 cp_aux_err; /* 0x000058 */
+ u64 cp_resp_upper; /* 0x000060 */
+ u64 cp_resp_lower; /* 0x000068 */
+ #define cp_resp cp_resp_lower
+ u64 cp_tst_pin_ctrl; /* 0x000070 */
+ u64 cp_addr_lkerr; /* 0x000078 */
+
+ /* 0x000080-0x00008F -- PMU & MAP */
+ u64 cp_dir_map; /* 0x000080 */
+ u64 _pad_000088; /* 0x000088 */
+
+ /* 0x000090-0x00009F -- SSRAM */
+ u64 cp_map_fault; /* 0x000090 */
+ u64 _pad_000098; /* 0x000098 */
+
+ /* 0x0000A0-0x0000AF -- Arbitration */
+ u64 cp_arb; /* 0x0000A0 */
+ u64 _pad_0000A8; /* 0x0000A8 */
+
+ /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+ u64 cp_ate_parity_err; /* 0x0000B0 */
+ u64 _pad_0000B8; /* 0x0000B8 */
+
+ /* 0x0000C0-0x0000FF -- PCI/GIO */
+ u64 cp_bus_timeout; /* 0x0000C0 */
+ u64 cp_pci_cfg; /* 0x0000C8 */
+ u64 cp_pci_err_upper; /* 0x0000D0 */
+ u64 cp_pci_err_lower; /* 0x0000D8 */
+ #define cp_pci_err cp_pci_err_lower
+ u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
+
+ /* 0x000100-0x0001FF -- Interrupt */
+ u64 cp_int_status; /* 0x000100 */
+ u64 cp_int_enable; /* 0x000108 */
+ u64 cp_int_rst_stat; /* 0x000110 */
+ u64 cp_int_mode; /* 0x000118 */
+ u64 cp_int_device; /* 0x000120 */
+ u64 cp_int_host_err; /* 0x000128 */
+ u64 cp_int_addr[8]; /* 0x0001{30,,,68} */
+ u64 cp_err_int_view; /* 0x000170 */
+ u64 cp_mult_int; /* 0x000178 */
+ u64 cp_force_always[8]; /* 0x0001{80,,,B8} */
+ u64 cp_force_pin[8]; /* 0x0001{C0,,,F8} */
+
+ /* 0x000200-0x000298 -- Device */
+ u64 cp_device[4]; /* 0x0002{00,,,18} */
+ u64 _pad_000220[4]; /* 0x0002{20,,,38} */
+ u64 cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
+ u64 _pad_000260[4]; /* 0x0002{60,,,78} */
+ u64 cp_rrb_map[2]; /* 0x0002{80,,,88} */
+ #define cp_even_resp cp_rrb_map[0] /* 0x000280 */
+ #define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
+ u64 cp_resp_status; /* 0x000290 */
+ u64 cp_resp_clear; /* 0x000298 */
+
+ u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
+
+ /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+ struct {
+ u64 upper; /* 0x0003{00,,,F0} */
+ u64 lower; /* 0x0003{08,,,F8} */
+ } cp_buf_addr_match[16];
+
+ /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+ struct {
+ u64 flush_w_touch; /* 0x000{400,,,5C0} */
+ u64 flush_wo_touch; /* 0x000{408,,,5C8} */
+ u64 inflight; /* 0x000{410,,,5D0} */
+ u64 prefetch; /* 0x000{418,,,5D8} */
+ u64 total_pci_retry; /* 0x000{420,,,5E0} */
+ u64 max_pci_retry; /* 0x000{428,,,5E8} */
+ u64 max_latency; /* 0x000{430,,,5F0} */
+ u64 clear_all; /* 0x000{438,,,5F8} */
+ } cp_buf_count[8];
+
+
+ /* 0x000600-0x0009FF -- PCI/X registers */
+ u64 cp_pcix_bus_err_addr; /* 0x000600 */
+ u64 cp_pcix_bus_err_attr; /* 0x000608 */
+ u64 cp_pcix_bus_err_data; /* 0x000610 */
+ u64 cp_pcix_pio_split_addr; /* 0x000618 */
+ u64 cp_pcix_pio_split_attr; /* 0x000620 */
+ u64 cp_pcix_dma_req_err_attr; /* 0x000628 */
+ u64 cp_pcix_dma_req_err_addr; /* 0x000630 */
+ u64 cp_pcix_timeout; /* 0x000638 */
+
+ u64 _pad_000640[24]; /* 0x000{640,,,6F8} */
+
+ /* 0x000700-0x000737 -- Debug Registers */
+ u64 cp_ct_debug_ctl; /* 0x000700 */
+ u64 cp_br_debug_ctl; /* 0x000708 */
+ u64 cp_mux3_debug_ctl; /* 0x000710 */
+ u64 cp_mux4_debug_ctl; /* 0x000718 */
+ u64 cp_mux5_debug_ctl; /* 0x000720 */
+ u64 cp_mux6_debug_ctl; /* 0x000728 */
+ u64 cp_mux7_debug_ctl; /* 0x000730 */
+
+ u64 _pad_000738[89]; /* 0x000{738,,,9F8} */
+
+ /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+ struct {
+ u64 cp_buf_addr; /* 0x000{A00,,,AF0} */
+ u64 cp_buf_attr; /* 0X000{A08,,,AF8} */
+ } cp_pcix_read_buf_64[16];
+
+ struct {
+ u64 cp_buf_addr; /* 0x000{B00,,,BE0} */
+ u64 cp_buf_attr; /* 0x000{B08,,,BE8} */
+ u64 cp_buf_valid; /* 0x000{B10,,,BF0} */
+ u64 __pad1; /* 0x000{B18,,,BF8} */
+ } cp_pcix_write_buf_64[8];
+
+ /* End of Local Registers -- Start of Address Map space */
+
+ char _pad_000c00[0x010000 - 0x000c00];
+
+ /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
+ u64 cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
+
+ char _pad_012000[0x14000 - 0x012000];
+
+ /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
+ u64 cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
+
+ char _pad_016000[0x18000 - 0x016000];
+
+ /* 0x18000-0x197F8 -- TIOCP Write Request Ram */
+ u64 cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
+ u64 cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
+ u64 cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
+
+ char _pad_019800[0x1C000 - 0x019800];
+
+ /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
+ u64 cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
+ u64 cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
+ u64 cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
+
+ char _pad_01F000[0x20000 - 0x01F000];
+
+ /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
+ char _pad_020000[0x021000 - 0x20000];
+
+ /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
+ union {
+ u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
+ u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
+ u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
+ u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
+
+ /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+ union {
+ u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
+ u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
+ u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
+ u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } cp_type1_cfg; /* 0x028000-0x029000 */
+
+ char _pad_029000[0x030000-0x029000];
+
+ /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } cp_pci_iack; /* 0x030000-0x030007 */
+
+ char _pad_030007[0x040000-0x030008];
+
+ /* 0x040000-0x040007 -- PCIX Special Cycle */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } cp_pcix_cycle; /* 0x040000-0x040007 */
+
+ char _pad_040007[0x200000-0x040008];
+
+ /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
+ union {
+ u8 c[0x100000 / 1];
+ u16 s[0x100000 / 2];
+ u32 l[0x100000 / 4];
+ u64 d[0x100000 / 8];
+ } cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
+
+ #define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
+
+ char _pad_800000[0xA00000-0x800000];
+
+ /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
+ union {
+ u8 c[0x100000 / 1];
+ u16 s[0x100000 / 2];
+ u32 l[0x100000 / 4];
+ u64 d[0x100000 / 8];
+ } cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
+
+ #define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
+
+};
+
+#endif /* _ASM_IA64_SN_PCI_TIOCP_H */
diff --git a/arch/ia64/include/asm/sn/tiocx.h b/arch/ia64/include/asm/sn/tiocx.h
new file mode 100644
index 000000000000..d29728492f36
--- /dev/null
+++ b/arch/ia64/include/asm/sn/tiocx.h
@@ -0,0 +1,72 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_TIO_TIOCX_H
+#define _ASM_IA64_SN_TIO_TIOCX_H
+
+#ifdef __KERNEL__
+
+struct cx_id_s {
+ unsigned int part_num;
+ unsigned int mfg_num;
+ int nasid;
+};
+
+struct cx_dev {
+ struct cx_id_s cx_id;
+ int bt; /* board/blade type */
+ void *soft; /* driver specific */
+ struct hubdev_info *hubdev;
+ struct device dev;
+ struct cx_drv *driver;
+};
+
+struct cx_device_id {
+ unsigned int part_num;
+ unsigned int mfg_num;
+};
+
+struct cx_drv {
+ char *name;
+ const struct cx_device_id *id_table;
+ struct device_driver driver;
+ int (*probe) (struct cx_dev * dev, const struct cx_device_id * id);
+ int (*remove) (struct cx_dev * dev);
+};
+
+/* create DMA address by stripping AS bits */
+#define TIOCX_DMA_ADDR(a) (u64)((u64)(a) & 0xffffcfffffffffUL)
+
+#define TIOCX_TO_TIOCX_DMA_ADDR(a) (u64)(((u64)(a) & 0xfffffffff) | \
+ ((((u64)(a)) & 0xffffc000000000UL) <<2))
+
+#define TIO_CE_ASIC_PARTNUM 0xce00
+#define TIOCX_CORELET 3
+
+/* These are taken from tio_mmr_as.h */
+#define TIO_ICE_FRZ_CFG TIO_MMR_ADDR_MOD(0x00000000b0008100UL)
+#define TIO_ICE_PMI_TX_CFG TIO_MMR_ADDR_MOD(0x00000000b000b100UL)
+#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3 TIO_MMR_ADDR_MOD(0x00000000b000be18UL)
+#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK 0x000000000000000fUL
+
+#define to_cx_dev(n) container_of(n, struct cx_dev, dev)
+#define to_cx_driver(drv) container_of(drv, struct cx_drv, driver)
+
+extern struct sn_irq_info *tiocx_irq_alloc(nasid_t, int, int, nasid_t, int);
+extern void tiocx_irq_free(struct sn_irq_info *);
+extern int cx_device_unregister(struct cx_dev *);
+extern int cx_device_register(nasid_t, int, int, struct hubdev_info *, int);
+extern int cx_driver_unregister(struct cx_drv *);
+extern int cx_driver_register(struct cx_drv *);
+extern u64 tiocx_dma_addr(u64 addr);
+extern u64 tiocx_swin_base(int nasid);
+extern void tiocx_mmr_store(int nasid, u64 offset, u64 value);
+extern u64 tiocx_mmr_load(int nasid, u64 offset);
+
+#endif // __KERNEL__
+#endif // _ASM_IA64_SN_TIO_TIOCX__
diff --git a/arch/ia64/include/asm/sn/types.h b/arch/ia64/include/asm/sn/types.h
new file mode 100644
index 000000000000..8e04ee211e59
--- /dev/null
+++ b/arch/ia64/include/asm/sn/types.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_IA64_SN_TYPES_H
+#define _ASM_IA64_SN_TYPES_H
+
+#include <linux/types.h>
+
+typedef unsigned long cpuid_t;
+typedef signed short nasid_t; /* node id in numa-as-id space */
+typedef signed char partid_t; /* partition ID type */
+typedef unsigned int moduleid_t; /* user-visible module number type */
+typedef unsigned int cmoduleid_t; /* kernel compact module id type */
+typedef unsigned char slotid_t; /* slot (blade) within module */
+typedef unsigned char slabid_t; /* slab (asic) within slot */
+typedef u64 nic_t;
+typedef unsigned long iopaddr_t;
+typedef unsigned long paddr_t;
+typedef short cnodeid_t;
+
+#endif /* _ASM_IA64_SN_TYPES_H */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
new file mode 100644
index 000000000000..d5ef0aa3e312
--- /dev/null
+++ b/arch/ia64/include/asm/socket.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_SOCKET_H
+#define _ASM_IA64_SOCKET_H
+
+/*
+ * Socket related defines.
+ *
+ * Based on <asm-i386/socket.h>.
+ *
+ * Modified 1998-2000
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_SNDBUFFORCE 32
+#define SO_RCVBUFFORCE 33
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_ACCEPTCONN 30
+
+#define SO_PEERSEC 31
+#define SO_PASSSEC 34
+#define SO_TIMESTAMPNS 35
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+
+#define SO_MARK 36
+
+#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/sockios.h b/arch/ia64/include/asm/sockios.h
new file mode 100644
index 000000000000..15c92468ad38
--- /dev/null
+++ b/arch/ia64/include/asm/sockios.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_SOCKIOS_H
+#define _ASM_IA64_SOCKIOS_H
+
+/*
+ * Socket-level I/O control calls.
+ *
+ * Based on <asm-i386/sockios.h>.
+ *
+ * Modified 1998, 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+#endif /* _ASM_IA64_SOCKIOS_H */
diff --git a/arch/ia64/include/asm/sparsemem.h b/arch/ia64/include/asm/sparsemem.h
new file mode 100644
index 000000000000..67a7c40ec27f
--- /dev/null
+++ b/arch/ia64/include/asm/sparsemem.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_SPARSEMEM_H
+#define _ASM_IA64_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+
+#define SECTION_SIZE_BITS (30)
+#define MAX_PHYSMEM_BITS (50)
+#ifdef CONFIG_FORCE_MAX_ZONEORDER
+#if ((CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS)
+#undef SECTION_SIZE_BITS
+#define SECTION_SIZE_BITS (CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT)
+#endif
+#endif
+
+#endif /* CONFIG_SPARSEMEM */
+#endif /* _ASM_IA64_SPARSEMEM_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
new file mode 100644
index 000000000000..0229fb95fb38
--- /dev/null
+++ b/arch/ia64/include/asm/spinlock.h
@@ -0,0 +1,220 @@
+#ifndef _ASM_IA64_SPINLOCK_H
+#define _ASM_IA64_SPINLOCK_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ *
+ * This file is used for SMP configurations only.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include <asm/atomic.h>
+#include <asm/intrinsics.h>
+#include <asm/system.h>
+
+#define __raw_spin_lock_init(x) ((x)->lock = 0)
+
+#ifdef ASM_SUPPORTED
+/*
+ * Try to get the lock. If we fail to get the lock, make a non-standard call to
+ * ia64_spinlock_contention(). We do not use a normal call because that would force all
+ * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
+ * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered".
+ */
+
+#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
+
+static inline void
+__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
+{
+ register volatile unsigned int *ptr asm ("r31") = &lock->lock;
+
+#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+# ifdef CONFIG_ITANIUM
+ /* don't use brl on Itanium... */
+ asm volatile ("{\n\t"
+ " mov ar.ccv = r0\n\t"
+ " mov r28 = ip\n\t"
+ " mov r30 = 1;;\n\t"
+ "}\n\t"
+ "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
+ "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
+ "cmp4.ne p14, p0 = r30, r0\n\t"
+ "mov b6 = r29;;\n\t"
+ "mov r27=%2\n\t"
+ "(p14) br.cond.spnt.many b6"
+ : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
+# else
+ asm volatile ("{\n\t"
+ " mov ar.ccv = r0\n\t"
+ " mov r28 = ip\n\t"
+ " mov r30 = 1;;\n\t"
+ "}\n\t"
+ "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
+ "cmp4.ne p14, p0 = r30, r0\n\t"
+ "mov r27=%2\n\t"
+ "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
+ : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
+# endif /* CONFIG_MCKINLEY */
+#else
+# ifdef CONFIG_ITANIUM
+ /* don't use brl on Itanium... */
+ /* mis-declare, so we get the entry-point, not it's function descriptor: */
+ asm volatile ("mov r30 = 1\n\t"
+ "mov r27=%2\n\t"
+ "mov ar.ccv = r0;;\n\t"
+ "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
+ "movl r29 = ia64_spinlock_contention;;\n\t"
+ "cmp4.ne p14, p0 = r30, r0\n\t"
+ "mov b6 = r29;;\n\t"
+ "(p14) br.call.spnt.many b6 = b6"
+ : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
+# else
+ asm volatile ("mov r30 = 1\n\t"
+ "mov r27=%2\n\t"
+ "mov ar.ccv = r0;;\n\t"
+ "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
+ "cmp4.ne p14, p0 = r30, r0\n\t"
+ "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
+ : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
+# endif /* CONFIG_MCKINLEY */
+#endif
+}
+
+#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
+
+/* Unlock by doing an ordered store and releasing the cacheline with nta */
+static inline void __raw_spin_unlock(raw_spinlock_t *x) {
+ barrier();
+ asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
+}
+
+#else /* !ASM_SUPPORTED */
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+# define __raw_spin_lock(x) \
+do { \
+ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
+ __u64 ia64_spinlock_val; \
+ ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
+ if (unlikely(ia64_spinlock_val)) { \
+ do { \
+ while (*ia64_spinlock_ptr) \
+ ia64_barrier(); \
+ ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
+ } while (ia64_spinlock_val); \
+ } \
+} while (0)
+#define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0)
+#endif /* !ASM_SUPPORTED */
+
+#define __raw_spin_is_locked(x) ((x)->lock != 0)
+#define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
+#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+
+#define __raw_read_lock(rw) \
+do { \
+ raw_rwlock_t *__read_lock_ptr = (rw); \
+ \
+ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
+ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
+ while (*(volatile int *)__read_lock_ptr < 0) \
+ cpu_relax(); \
+ } \
+} while (0)
+
+#define __raw_read_unlock(rw) \
+do { \
+ raw_rwlock_t *__read_lock_ptr = (rw); \
+ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
+} while (0)
+
+#ifdef ASM_SUPPORTED
+#define __raw_write_lock(rw) \
+do { \
+ __asm__ __volatile__ ( \
+ "mov ar.ccv = r0\n" \
+ "dep r29 = -1, r0, 31, 1;;\n" \
+ "1:\n" \
+ "ld4 r2 = [%0];;\n" \
+ "cmp4.eq p0,p7 = r0,r2\n" \
+ "(p7) br.cond.spnt.few 1b \n" \
+ "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
+ "cmp4.eq p0,p7 = r0, r2\n" \
+ "(p7) br.cond.spnt.few 1b;;\n" \
+ :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
+} while(0)
+
+#define __raw_write_trylock(rw) \
+({ \
+ register long result; \
+ \
+ __asm__ __volatile__ ( \
+ "mov ar.ccv = r0\n" \
+ "dep r29 = -1, r0, 31, 1;;\n" \
+ "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
+ : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
+ (result == 0); \
+})
+
+static inline void __raw_write_unlock(raw_rwlock_t *x)
+{
+ u8 *y = (u8 *)x;
+ barrier();
+ asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
+}
+
+#else /* !ASM_SUPPORTED */
+
+#define __raw_write_lock(l) \
+({ \
+ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
+ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
+ do { \
+ while (*ia64_write_lock_ptr) \
+ ia64_barrier(); \
+ ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
+ } while (ia64_val); \
+})
+
+#define __raw_write_trylock(rw) \
+({ \
+ __u64 ia64_val; \
+ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
+ ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
+ (ia64_val == 0); \
+})
+
+static inline void __raw_write_unlock(raw_rwlock_t *x)
+{
+ barrier();
+ x->write_lock = 0;
+}
+
+#endif /* !ASM_SUPPORTED */
+
+static inline int __raw_read_trylock(raw_rwlock_t *x)
+{
+ union {
+ raw_rwlock_t lock;
+ __u32 word;
+ } old, new;
+ old.lock = new.lock = *x;
+ old.lock.write_lock = new.lock.write_lock = 0;
+ ++new.lock.read_counter;
+ return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
+}
+
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..474e46f1ab4a
--- /dev/null
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_SPINLOCK_TYPES_H
+#define _ASM_IA64_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int read_counter : 31;
+ volatile unsigned int write_lock : 1;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
+
+#endif
diff --git a/arch/ia64/include/asm/stat.h b/arch/ia64/include/asm/stat.h
new file mode 100644
index 000000000000..367bb90cdffa
--- /dev/null
+++ b/arch/ia64/include/asm/stat.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_IA64_STAT_H
+#define _ASM_IA64_STAT_H
+
+/*
+ * Modified 1998, 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+struct stat {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned long st_nlink;
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad0;
+ unsigned long st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long st_blksize;
+ long st_blocks;
+ unsigned long __unused[3];
+};
+
+#define STAT_HAVE_NSEC 1
+
+struct ia64_oldstat {
+ unsigned int st_dev;
+ unsigned int st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int st_rdev;
+ unsigned int __pad1;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+ unsigned int st_blksize;
+ int st_blocks;
+ unsigned int __unused1;
+ unsigned int __unused2;
+};
+
+#endif /* _ASM_IA64_STAT_H */
diff --git a/arch/ia64/include/asm/statfs.h b/arch/ia64/include/asm/statfs.h
new file mode 100644
index 000000000000..811097974f31
--- /dev/null
+++ b/arch/ia64/include/asm/statfs.h
@@ -0,0 +1,62 @@
+#ifndef _ASM_IA64_STATFS_H
+#define _ASM_IA64_STATFS_H
+
+/*
+ * Based on <asm-i386/statfs.h>.
+ *
+ * Modified 1998, 1999, 2003
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#ifndef __KERNEL_STRICT_NAMES
+# include <linux/types.h>
+typedef __kernel_fsid_t fsid_t;
+#endif
+
+/*
+ * This is ugly --- we're already 64-bit, so just duplicate the definitions
+ */
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_frsize;
+ long f_spare[5];
+};
+
+
+struct statfs64 {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_frsize;
+ long f_spare[5];
+};
+
+struct compat_statfs64 {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_bavail;
+ __u64 f_files;
+ __u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_frsize;
+ __u32 f_spare[5];
+} __attribute__((packed));
+
+#endif /* _ASM_IA64_STATFS_H */
diff --git a/arch/ia64/include/asm/string.h b/arch/ia64/include/asm/string.h
new file mode 100644
index 000000000000..85fd65c52a8c
--- /dev/null
+++ b/arch/ia64/include/asm/string.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_IA64_STRING_H
+#define _ASM_IA64_STRING_H
+
+/*
+ * Here is where we want to put optimized versions of the string
+ * routines.
+ *
+ * Copyright (C) 1998-2000, 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */
+#define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */
+#define __HAVE_ARCH_MEMCPY 1 /* see arch/ia64/lib/memcpy.S */
+
+extern __kernel_size_t strlen (const char *);
+extern void *memcpy (void *, const void *, __kernel_size_t);
+extern void *memset (void *, int, __kernel_size_t);
+
+#endif /* _ASM_IA64_STRING_H */
diff --git a/arch/ia64/include/asm/suspend.h b/arch/ia64/include/asm/suspend.h
new file mode 100644
index 000000000000..b05bbb6074e2
--- /dev/null
+++ b/arch/ia64/include/asm/suspend.h
@@ -0,0 +1 @@
+/* dummy (must be non-empty to prevent prejudicial removal...) */
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h
new file mode 100644
index 000000000000..927a381c20ca
--- /dev/null
+++ b/arch/ia64/include/asm/system.h
@@ -0,0 +1,292 @@
+#ifndef _ASM_IA64_SYSTEM_H
+#define _ASM_IA64_SYSTEM_H
+
+/*
+ * System defines. Note that this is included both from .c and .S
+ * files, so it does only defines, not any C code. This is based
+ * on information published in the Processor Abstraction Layer
+ * and the System Abstraction Layer manual.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#include <asm/kregs.h>
+#include <asm/page.h>
+#include <asm/pal.h>
+#include <asm/percpu.h>
+
+#define GATE_ADDR RGN_BASE(RGN_GATE)
+
+/*
+ * 0xa000000000000000+2*PERCPU_PAGE_SIZE
+ * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
+ */
+#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
+#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
+#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
+
+struct pci_vector_struct {
+ __u16 segment; /* PCI Segment number */
+ __u16 bus; /* PCI Bus number */
+ __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
+ __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
+ __u32 irq; /* IRQ assigned */
+};
+
+extern struct ia64_boot_param {
+ __u64 command_line; /* physical address of command line arguments */
+ __u64 efi_systab; /* physical address of EFI system table */
+ __u64 efi_memmap; /* physical address of EFI memory map */
+ __u64 efi_memmap_size; /* size of EFI memory map */
+ __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */
+ __u32 efi_memdesc_version; /* memory descriptor version */
+ struct {
+ __u16 num_cols; /* number of columns on console output device */
+ __u16 num_rows; /* number of rows on console output device */
+ __u16 orig_x; /* cursor's x position */
+ __u16 orig_y; /* cursor's y position */
+ } console_info;
+ __u64 fpswa; /* physical address of the fpswa interface */
+ __u64 initrd_start;
+ __u64 initrd_size;
+} *ia64_boot_param;
+
+/*
+ * Macros to force memory ordering. In these descriptions, "previous"
+ * and "subsequent" refer to program order; "visible" means that all
+ * architecturally visible effects of a memory access have occurred
+ * (at a minimum, this means the memory has been read or written).
+ *
+ * wmb(): Guarantees that all preceding stores to memory-
+ * like regions are visible before any subsequent
+ * stores and that all following stores will be
+ * visible only after all previous stores.
+ * rmb(): Like wmb(), but for reads.
+ * mb(): wmb()/rmb() combo, i.e., all previous memory
+ * accesses are visible before all subsequent
+ * accesses and vice versa. This is also known as
+ * a "fence."
+ *
+ * Note: "mb()" and its variants cannot be used as a fence to order
+ * accesses to memory mapped I/O registers. For that, mf.a needs to
+ * be used. However, we don't want to always use mf.a because (a)
+ * it's (presumably) much slower than mf and (b) mf.a is supported for
+ * sequential memory pages only.
+ */
+#define mb() ia64_mf()
+#define rmb() mb()
+#define wmb() mb()
+#define read_barrier_depends() do { } while(0)
+
+#ifdef CONFIG_SMP
+# define smp_mb() mb()
+# define smp_rmb() rmb()
+# define smp_wmb() wmb()
+# define smp_read_barrier_depends() read_barrier_depends()
+#else
+# define smp_mb() barrier()
+# define smp_rmb() barrier()
+# define smp_wmb() barrier()
+# define smp_read_barrier_depends() do { } while(0)
+#endif
+
+/*
+ * XXX check on this ---I suspect what Linus really wants here is
+ * acquire vs release semantics but we can't discuss this stuff with
+ * Linus just yet. Grrr...
+ */
+#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
+
+#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
+
+/*
+ * The group barrier in front of the rsm & ssm are necessary to ensure
+ * that none of the previous instructions in the same group are
+ * affected by the rsm/ssm.
+ */
+/* For spinlocks etc */
+
+/*
+ * - clearing psr.i is implicitly serialized (visible by next insn)
+ * - setting psr.i requires data serialization
+ * - we need a stop-bit before reading PSR because we sometimes
+ * write a floating-point register right before reading the PSR
+ * and that writes to PSR.mfl
+ */
+#ifdef CONFIG_PARAVIRT
+#define __local_save_flags() ia64_get_psr_i()
+#else
+#define __local_save_flags() ia64_getreg(_IA64_REG_PSR)
+#endif
+
+#define __local_irq_save(x) \
+do { \
+ ia64_stop(); \
+ (x) = __local_save_flags(); \
+ ia64_stop(); \
+ ia64_rsm(IA64_PSR_I); \
+} while (0)
+
+#define __local_irq_disable() \
+do { \
+ ia64_stop(); \
+ ia64_rsm(IA64_PSR_I); \
+} while (0)
+
+#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I)
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+
+ extern unsigned long last_cli_ip;
+
+# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP)
+
+# define local_irq_save(x) \
+do { \
+ unsigned long __psr; \
+ \
+ __local_irq_save(__psr); \
+ if (__psr & IA64_PSR_I) \
+ __save_ip(); \
+ (x) = __psr; \
+} while (0)
+
+# define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0)
+
+# define local_irq_restore(x) \
+do { \
+ unsigned long __old_psr, __psr = (x); \
+ \
+ local_save_flags(__old_psr); \
+ __local_irq_restore(__psr); \
+ if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \
+ __save_ip(); \
+} while (0)
+
+#else /* !CONFIG_IA64_DEBUG_IRQ */
+# define local_irq_save(x) __local_irq_save(x)
+# define local_irq_disable() __local_irq_disable()
+# define local_irq_restore(x) __local_irq_restore(x)
+#endif /* !CONFIG_IA64_DEBUG_IRQ */
+
+#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
+#define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); })
+
+#define irqs_disabled() \
+({ \
+ unsigned long __ia64_id_flags; \
+ local_save_flags(__ia64_id_flags); \
+ (__ia64_id_flags & IA64_PSR_I) == 0; \
+})
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_IA32_SUPPORT
+# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
+#else
+# define IS_IA32_PROCESS(regs) 0
+struct task_struct;
+static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){}
+static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){}
+#endif
+
+/*
+ * Context switch from one thread to another. If the two threads have
+ * different address spaces, schedule() has already taken care of
+ * switching to the new address space by calling switch_mm().
+ *
+ * Disabling access to the fph partition and the debug-register
+ * context switch MUST be done before calling ia64_switch_to() since a
+ * newly created thread returns directly to
+ * ia64_ret_from_syscall_clear_r8.
+ */
+extern struct task_struct *ia64_switch_to (void *next_task);
+
+struct task_struct;
+
+extern void ia64_save_extra (struct task_struct *task);
+extern void ia64_load_extra (struct task_struct *task);
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
+# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
+#else
+# define IA64_ACCOUNT_ON_SWITCH(p,n)
+#endif
+
+#ifdef CONFIG_PERFMON
+ DECLARE_PER_CPU(unsigned long, pfm_syst_info);
+# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
+#else
+# define PERFMON_IS_SYSWIDE() (0)
+#endif
+
+#define IA64_HAS_EXTRA_STATE(t) \
+ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
+ || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
+
+#define __switch_to(prev,next,last) do { \
+ IA64_ACCOUNT_ON_SWITCH(prev, next); \
+ if (IA64_HAS_EXTRA_STATE(prev)) \
+ ia64_save_extra(prev); \
+ if (IA64_HAS_EXTRA_STATE(next)) \
+ ia64_load_extra(next); \
+ ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
+ (last) = ia64_switch_to((next)); \
+} while (0)
+
+#ifdef CONFIG_SMP
+/*
+ * In the SMP case, we save the fph state when context-switching away from a thread that
+ * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can
+ * pick up the state from task->thread.fph, avoiding the complication of having to fetch
+ * the latest fph state from another CPU. In other words: eager save, lazy restore.
+ */
+# define switch_to(prev,next,last) do { \
+ if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
+ ia64_psr(task_pt_regs(prev))->mfh = 0; \
+ (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
+ __ia64_save_fpu((prev)->thread.fph); \
+ } \
+ __switch_to(prev, next, last); \
+ /* "next" in old context is "current" in new context */ \
+ if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \
+ (task_cpu(current) != \
+ task_thread_info(current)->last_cpu))) { \
+ platform_migrate(current); \
+ task_thread_info(current)->last_cpu = task_cpu(current); \
+ } \
+} while (0)
+#else
+# define switch_to(prev,next,last) __switch_to(prev, next, last)
+#endif
+
+#define __ARCH_WANT_UNLOCKED_CTXSW
+#define ARCH_HAS_PREFETCH_SWITCH_STACK
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
+
+void cpu_idle_wait(void);
+
+#define arch_align_stack(x) (x)
+
+void default_idle(void);
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_IA64_SYSTEM_H */
diff --git a/arch/ia64/include/asm/termbits.h b/arch/ia64/include/asm/termbits.h
new file mode 100644
index 000000000000..9f162e0089ad
--- /dev/null
+++ b/arch/ia64/include/asm/termbits.h
@@ -0,0 +1,207 @@
+#ifndef _ASM_IA64_TERMBITS_H
+#define _ASM_IA64_TERMBITS_H
+
+/*
+ * Based on <asm-i386/termbits.h>.
+ *
+ * Modified 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * 99/01/28 Added new baudrates
+ */
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _ASM_IA64_TERMBITS_H */
diff --git a/arch/ia64/include/asm/termios.h b/arch/ia64/include/asm/termios.h
new file mode 100644
index 000000000000..689d218c0c28
--- /dev/null
+++ b/arch/ia64/include/asm/termios.h
@@ -0,0 +1,97 @@
+#ifndef _ASM_IA64_TERMIOS_H
+#define _ASM_IA64_TERMIOS_H
+
+/*
+ * Modified 1999
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ *
+ * 99/01/28 Added N_IRDA and N_SMSBLOCK
+ */
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+# ifdef __KERNEL__
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ *(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+# endif /* __KERNEL__ */
+
+#endif /* _ASM_IA64_TERMIOS_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
new file mode 100644
index 000000000000..7c60fcdd2efd
--- /dev/null
+++ b/arch/ia64/include/asm/thread_info.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#ifndef _ASM_IA64_THREAD_INFO_H
+#define _ASM_IA64_THREAD_INFO_H
+
+#ifndef ASM_OFFSETS_C
+#include <asm/asm-offsets.h>
+#endif
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+
+#define PREEMPT_ACTIVE_BIT 30
+#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * On IA-64, we want to keep the task structure and kernel stack together, so they can be
+ * mapped by a single TLB entry and so they can be addressed by the "current" pointer
+ * without having to do pointer masking.
+ */
+struct thread_info {
+ struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
+ struct exec_domain *exec_domain;/* execution domain */
+ __u32 flags; /* thread_info flags (see TIF_*) */
+ __u32 cpu; /* current CPU */
+ __u32 last_cpu; /* Last CPU thread ran on */
+ __u32 status; /* Thread synchronous flags */
+ mm_segment_t addr_limit; /* user-level address space limit */
+ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
+ struct restart_block restart_block;
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ __u64 ac_stamp;
+ __u64 ac_leave;
+ __u64 ac_stime;
+ __u64 ac_utime;
+#endif
+};
+
+#define THREAD_SIZE KERNEL_STACK_SIZE
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .addr_limit = KERNEL_DS, \
+ .preempt_count = 0, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+
+#ifndef ASM_OFFSETS_C
+/* how to get the thread information struct from C */
+#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
+#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#else
+#define current_thread_info() ((struct thread_info *) 0)
+#define alloc_thread_info(tsk) ((struct thread_info *) 0)
+#define task_thread_info(tsk) ((struct thread_info *) 0)
+#endif
+#define free_thread_info(ti) /* nothing */
+#define task_stack_page(tsk) ((void *)(tsk))
+
+#define __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#define setup_thread_stack(p, org) \
+ *task_thread_info(p) = *task_thread_info(org); \
+ task_thread_info(p)->ac_stime = 0; \
+ task_thread_info(p)->ac_utime = 0; \
+ task_thread_info(p)->task = (p);
+#else
+#define setup_thread_stack(p, org) \
+ *task_thread_info(p) = *task_thread_info(org); \
+ task_thread_info(p)->task = (p);
+#endif
+#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
+
+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
+#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
+#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
+
+#define tsk_set_notify_resume(tsk) \
+ set_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME)
+extern void tsk_clear_notify_resume(struct task_struct *tsk);
+#endif /* !__ASSEMBLY */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in least-significant 16 bits, other flags
+ * in top 16 bits
+ */
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
+#define TIF_SYSCALL_TRACE 2 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
+#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
+#define TIF_NOTIFY_RESUME 6 /* resumption notification requested */
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 17
+#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
+#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
+#define TIF_FREEZE 20 /* is freezing for suspend */
+#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
+#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
+
+/* "work to do on user-return" bits */
+#define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
+ _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE)
+/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
+#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
+
+#define TS_POLLING 1 /* true if in idle loop and not sleeping */
+#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */
+
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
+
+#ifndef __ASSEMBLY__
+#define HAVE_SET_RESTORE_SIGMASK 1
+static inline void set_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->status |= TS_RESTORE_SIGMASK;
+ set_bit(TIF_SIGPENDING, &ti->flags);
+}
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
new file mode 100644
index 000000000000..05a6baf8a472
--- /dev/null
+++ b/arch/ia64/include/asm/timex.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_IA64_TIMEX_H
+#define _ASM_IA64_TIMEX_H
+
+/*
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * 2001/01/18 davidm Removed CLOCK_TICK_RATE. It makes no sense on IA-64.
+ * Also removed cacheflush_time as it's entirely unused.
+ */
+
+#include <asm/intrinsics.h>
+#include <asm/processor.h>
+
+typedef unsigned long cycles_t;
+
+extern void (*ia64_udelay)(unsigned long usecs);
+
+/*
+ * For performance reasons, we don't want to define CLOCK_TICK_TRATE as
+ * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George
+ * Anzinger, 1/CLOCK_TICK_RATE is taken as the resolution of the timer clock. The time
+ * calculation assumes that you will use enough of these so that your tick size <= 1/HZ.
+ * If the calculation shows that your CLOCK_TICK_RATE can not supply exactly 1/HZ ticks,
+ * the actual value is calculated and used to update the wall clock each jiffie. Setting
+ * the CLOCK_TICK_RATE to x*HZ insures that the calculation will find no errors. Hence we
+ * pick a multiple of HZ which gives us a (totally virtual) CLOCK_TICK_RATE of about
+ * 100MHz.
+ */
+#define CLOCK_TICK_RATE (HZ * 100000UL)
+
+static inline cycles_t
+get_cycles (void)
+{
+ cycles_t ret;
+
+ ret = ia64_getreg(_IA64_REG_AR_ITC);
+ return ret;
+}
+
+#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
new file mode 100644
index 000000000000..20d8a39680c2
--- /dev/null
+++ b/arch/ia64/include/asm/tlb.h
@@ -0,0 +1,257 @@
+#ifndef _ASM_IA64_TLB_H
+#define _ASM_IA64_TLB_H
+/*
+ * Based on <asm-generic/tlb.h>.
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+/*
+ * Removing a translation from a page table (including TLB-shootdown) is a four-step
+ * procedure:
+ *
+ * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
+ * (this is a no-op on ia64).
+ * (2) Clear the relevant portions of the page-table
+ * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
+ * (4) Release the pages that were freed up in step (2).
+ *
+ * Note that the ordering of these steps is crucial to avoid races on MP machines.
+ *
+ * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
+ * unmapping a portion of the virtual address space, these hooks are called according to
+ * the following template:
+ *
+ * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
+ * {
+ * for each vma that needs a shootdown do {
+ * tlb_start_vma(tlb, vma);
+ * for each page-table-entry PTE that needs to be removed do {
+ * tlb_remove_tlb_entry(tlb, pte, address);
+ * if (pte refers to a normal page) {
+ * tlb_remove_page(tlb, page);
+ * }
+ * }
+ * tlb_end_vma(tlb, vma);
+ * }
+ * }
+ * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
+ */
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+#include <asm/machvec.h>
+
+#ifdef CONFIG_SMP
+# define FREE_PTE_NR 2048
+# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
+#else
+# define FREE_PTE_NR 0
+# define tlb_fast_mode(tlb) (1)
+#endif
+
+struct mmu_gather {
+ struct mm_struct *mm;
+ unsigned int nr; /* == ~0U => fast mode */
+ unsigned char fullmm; /* non-zero means full mm flush */
+ unsigned char need_flush; /* really unmapped some PTEs? */
+ unsigned long start_addr;
+ unsigned long end_addr;
+ struct page *pages[FREE_PTE_NR];
+};
+
+struct ia64_tr_entry {
+ u64 ifa;
+ u64 itir;
+ u64 pte;
+ u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+
+extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK 0x0000000000000001L
+#define RR_VE_SHIFT 0
+#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK 0x00000000000000fcL
+#define RR_PS_SHIFT 2
+#define RR_RID_MASK 0x00000000ffffff00L
+#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
+
+/* Users of the generic TLB shootdown code must declare this storage space. */
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/*
+ * Flush the TLB for address range START to END and, if not in fast mode, release the
+ * freed pages that where gathered up to this point.
+ */
+static inline void
+ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ unsigned int nr;
+
+ if (!tlb->need_flush)
+ return;
+ tlb->need_flush = 0;
+
+ if (tlb->fullmm) {
+ /*
+ * Tearing down the entire address space. This happens both as a result
+ * of exit() and execve(). The latter case necessitates the call to
+ * flush_tlb_mm() here.
+ */
+ flush_tlb_mm(tlb->mm);
+ } else if (unlikely (end - start >= 1024*1024*1024*1024UL
+ || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
+ {
+ /*
+ * If we flush more than a tera-byte or across regions, we're probably
+ * better off just flushing the entire TLB(s). This should be very rare
+ * and is not worth optimizing for.
+ */
+ flush_tlb_all();
+ } else {
+ /*
+ * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
+ * vma pointer.
+ */
+ struct vm_area_struct vma;
+
+ vma.vm_mm = tlb->mm;
+ /* flush the address range from the tlb: */
+ flush_tlb_range(&vma, start, end);
+ /* now flush the virt. page-table area mapping the address range: */
+ flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
+ }
+
+ /* lastly, release the freed pages */
+ nr = tlb->nr;
+ if (!tlb_fast_mode(tlb)) {
+ unsigned long i;
+ tlb->nr = 0;
+ tlb->start_addr = ~0UL;
+ for (i = 0; i < nr; ++i)
+ free_page_and_swap_cache(tlb->pages[i]);
+ }
+}
+
+/*
+ * Return a pointer to an initialized struct mmu_gather.
+ */
+static inline struct mmu_gather *
+tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
+{
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+
+ tlb->mm = mm;
+ /*
+ * Use fast mode if only 1 CPU is online.
+ *
+ * It would be tempting to turn on fast-mode for full_mm_flush as well. But this
+ * doesn't work because of speculative accesses and software prefetching: the page
+ * table of "mm" may (and usually is) the currently active page table and even
+ * though the kernel won't do any user-space accesses during the TLB shoot down, a
+ * compiler might use speculation or lfetch.fault on what happens to be a valid
+ * user-space address. This in turn could trigger a TLB miss fault (or a VHPT
+ * walk) and re-insert a TLB entry we just removed. Slow mode avoids such
+ * problems. (We could make fast-mode work by switching the current task to a
+ * different "mm" during the shootdown.) --davidm 08/02/2002
+ */
+ tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
+ tlb->fullmm = full_mm_flush;
+ tlb->start_addr = ~0UL;
+ return tlb;
+}
+
+/*
+ * Called at the end of the shootdown operation to free up any resources that were
+ * collected.
+ */
+static inline void
+tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ /*
+ * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
+ * tlb->end_addr.
+ */
+ ia64_tlb_flush_mmu(tlb, start, end);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+
+ put_cpu_var(mmu_gathers);
+}
+
+/*
+ * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
+ * must be delayed until after the TLB has been flushed (see comments at the beginning of
+ * this file).
+ */
+static inline void
+tlb_remove_page (struct mmu_gather *tlb, struct page *page)
+{
+ tlb->need_flush = 1;
+
+ if (tlb_fast_mode(tlb)) {
+ free_page_and_swap_cache(page);
+ return;
+ }
+ tlb->pages[tlb->nr++] = page;
+ if (tlb->nr >= FREE_PTE_NR)
+ ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
+}
+
+/*
+ * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
+ * PTE, not just those pointing to (normal) physical memory.
+ */
+static inline void
+__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
+{
+ if (tlb->start_addr == ~0UL)
+ tlb->start_addr = address;
+ tlb->end_addr = address + PAGE_SIZE;
+}
+
+#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+
+#define tlb_remove_tlb_entry(tlb, ptep, addr) \
+do { \
+ tlb->need_flush = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, addr); \
+} while (0)
+
+#define pte_free_tlb(tlb, ptep) \
+do { \
+ tlb->need_flush = 1; \
+ __pte_free_tlb(tlb, ptep); \
+} while (0)
+
+#define pmd_free_tlb(tlb, ptep) \
+do { \
+ tlb->need_flush = 1; \
+ __pmd_free_tlb(tlb, ptep); \
+} while (0)
+
+#define pud_free_tlb(tlb, pudp) \
+do { \
+ tlb->need_flush = 1; \
+ __pud_free_tlb(tlb, pudp); \
+} while (0)
+
+#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/ia64/include/asm/tlbflush.h b/arch/ia64/include/asm/tlbflush.h
new file mode 100644
index 000000000000..3be25dfed164
--- /dev/null
+++ b/arch/ia64/include/asm/tlbflush.h
@@ -0,0 +1,102 @@
+#ifndef _ASM_IA64_TLBFLUSH_H
+#define _ASM_IA64_TLBFLUSH_H
+
+/*
+ * Copyright (C) 2002 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+
+#include <linux/mm.h>
+
+#include <asm/intrinsics.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+
+/*
+ * Now for some TLB flushing routines. This is the kind of stuff that
+ * can be very expensive, so try to avoid them whenever possible.
+ */
+extern void setup_ptcg_sem(int max_purges, int from_palo);
+
+/*
+ * Flush everything (kernel mapping may also have changed due to
+ * vmalloc/vfree).
+ */
+extern void local_flush_tlb_all (void);
+
+#ifdef CONFIG_SMP
+ extern void smp_flush_tlb_all (void);
+ extern void smp_flush_tlb_mm (struct mm_struct *mm);
+ extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
+# define flush_tlb_all() smp_flush_tlb_all()
+#else
+# define flush_tlb_all() local_flush_tlb_all()
+# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
+#endif
+
+static inline void
+local_finish_flush_tlb_mm (struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ activate_context(mm);
+}
+
+/*
+ * Flush a specified user mapping. This is called, e.g., as a result of fork() and
+ * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
+ * the PTEs of the parent task.
+ */
+static inline void
+flush_tlb_mm (struct mm_struct *mm)
+{
+ if (!mm)
+ return;
+
+ set_bit(mm->context, ia64_ctx.flushmap);
+ mm->context = 0;
+
+ if (atomic_read(&mm->mm_users) == 0)
+ return; /* happens as a result of exit_mmap() */
+
+#ifdef CONFIG_SMP
+ smp_flush_tlb_mm(mm);
+#else
+ local_finish_flush_tlb_mm(mm);
+#endif
+}
+
+extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
+
+/*
+ * Page-granular tlb flush.
+ */
+static inline void
+flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
+{
+#ifdef CONFIG_SMP
+ flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
+#else
+ if (vma->vm_mm == current->active_mm)
+ ia64_ptcl(addr, (PAGE_SHIFT << 2));
+ else
+ vma->vm_mm->context = 0;
+#endif
+}
+
+/*
+ * Flush the local TLB. Invoked from another cpu using an IPI.
+ */
+#ifdef CONFIG_SMP
+void smp_local_flush_tlb(void);
+#else
+#define smp_local_flush_tlb()
+#endif
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all(); /* XXX fix me */
+}
+
+#endif /* _ASM_IA64_TLBFLUSH_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
new file mode 100644
index 000000000000..35bcb641c9e5
--- /dev/null
+++ b/arch/ia64/include/asm/topology.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2002, Erich Focht, NEC
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _ASM_IA64_TOPOLOGY_H
+#define _ASM_IA64_TOPOLOGY_H
+
+#include <asm/acpi.h>
+#include <asm/numa.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_NUMA
+
+/* Nodes w/o CPUs are preferred for memory allocations, see build_zonelists */
+#define PENALTY_FOR_NODE_WITH_CPUS 255
+
+/*
+ * Distance above which we begin to use zone reclaim
+ */
+#define RECLAIM_DISTANCE 15
+
+/*
+ * Returns the number of the node containing CPU 'cpu'
+ */
+#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
+
+/*
+ * Returns a bitmask of CPUs on Node 'node'.
+ */
+#define node_to_cpumask(node) (node_to_cpu_mask[node])
+
+/*
+ * Returns the number of the node containing Node 'nid'.
+ * Not implemented here. Multi-level hierarchies detected with
+ * the help of node_distance().
+ */
+#define parent_node(nid) (nid)
+
+/*
+ * Returns the number of the first CPU on Node 'node'.
+ */
+#define node_to_first_cpu(node) (first_cpu(node_to_cpumask(node)))
+
+/*
+ * Determines the node for a given pci bus
+ */
+#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node
+
+void build_cpu_to_node_map(void);
+
+#define SD_CPU_INIT (struct sched_domain) { \
+ .span = CPU_MASK_NONE, \
+ .parent = NULL, \
+ .child = NULL, \
+ .groups = NULL, \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 2, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 2, \
+ .wake_idx = 1, \
+ .forkexec_idx = 1, \
+ .flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_NEWIDLE \
+ | SD_BALANCE_EXEC \
+ | SD_WAKE_AFFINE, \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .nr_balance_failed = 0, \
+}
+
+/* sched_domains SD_NODE_INIT for IA64 NUMA machines */
+#define SD_NODE_INIT (struct sched_domain) { \
+ .span = CPU_MASK_NONE, \
+ .parent = NULL, \
+ .child = NULL, \
+ .groups = NULL, \
+ .min_interval = 8, \
+ .max_interval = 8*(min(num_online_cpus(), 32)), \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 2, \
+ .busy_idx = 3, \
+ .idle_idx = 2, \
+ .newidle_idx = 2, \
+ .wake_idx = 1, \
+ .forkexec_idx = 1, \
+ .flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_EXEC \
+ | SD_BALANCE_FORK \
+ | SD_SERIALIZE \
+ | SD_WAKE_BALANCE, \
+ .last_balance = jiffies, \
+ .balance_interval = 64, \
+ .nr_balance_failed = 0, \
+}
+
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_SMP
+#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
+#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
+#define topology_core_siblings(cpu) (cpu_core_map[cpu])
+#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
+#define smt_capable() (smp_num_siblings > 1)
+#endif
+
+extern void arch_fix_phys_package_id(int num, u32 slot);
+
+#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
+ CPU_MASK_ALL : \
+ node_to_cpumask(pcibus_to_node(bus)) \
+ )
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_IA64_TOPOLOGY_H */
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
new file mode 100644
index 000000000000..e36b3716e718
--- /dev/null
+++ b/arch/ia64/include/asm/types.h
@@ -0,0 +1,46 @@
+#ifndef _ASM_IA64_TYPES_H
+#define _ASM_IA64_TYPES_H
+
+/*
+ * This file is never included by application software unless explicitly requested (e.g.,
+ * via linux/types.h) in which case the application is Linux specific so (user-) name
+ * space pollution is not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * Based on <asm-alpha/types.h>.
+ *
+ * Modified 1998-2000, 2002
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <asm-generic/int-l64.h>
+
+#ifdef __ASSEMBLY__
+# define __IA64_UL(x) (x)
+# define __IA64_UL_CONST(x) x
+
+# ifdef __KERNEL__
+# define BITS_PER_LONG 64
+# endif
+
+#else
+# define __IA64_UL(x) ((unsigned long)(x))
+# define __IA64_UL_CONST(x) x##UL
+
+typedef unsigned int umode_t;
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+# ifdef __KERNEL__
+
+#define BITS_PER_LONG 64
+
+/* DMA addresses are 64-bits wide, in general. */
+
+typedef u64 dma_addr_t;
+
+# endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_TYPES_H */
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
new file mode 100644
index 000000000000..449c8c0fa2bd
--- /dev/null
+++ b/arch/ia64/include/asm/uaccess.h
@@ -0,0 +1,401 @@
+#ifndef _ASM_IA64_UACCESS_H
+#define _ASM_IA64_UACCESS_H
+
+/*
+ * This file defines various macros to transfer memory areas across
+ * the user/kernel boundary. This needs to be done carefully because
+ * this code is executed in kernel mode and uses user-specified
+ * addresses. Thus, we need to be careful not to let the user to
+ * trick us into accessing kernel memory that would normally be
+ * inaccessible. This code is also fairly performance sensitive,
+ * so we want to spend as little time doing safety checks as
+ * possible.
+ *
+ * To make matters a bit more interesting, these macros sometimes also
+ * called from within the kernel itself, in which case the address
+ * validity check must be skipped. The get_fs() macro tells us what
+ * to do: if get_fs()==USER_DS, checking is performed, if
+ * get_fs()==KERNEL_DS, checking is bypassed.
+ *
+ * Note that even if the memory area specified by the user is in a
+ * valid address range, it is still possible that we'll get a page
+ * fault while accessing it. This is handled by filling out an
+ * exception handler fixup entry for each instruction that has the
+ * potential to fault. When such a fault occurs, the page fault
+ * handler checks to see whether the faulting instruction has a fixup
+ * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
+ * then resumes execution at the continuation point.
+ *
+ * Based on <asm-alpha/uaccess.h>.
+ *
+ * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/page-flags.h>
+#include <linux/mm.h>
+
+#include <asm/intrinsics.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+
+/*
+ * For historical reasons, the following macros are grossly misnamed:
+ */
+#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
+#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+/*
+ * When accessing user memory, we need to make sure the entire area really is in
+ * user-level space. In order to do this efficiently, we make sure that the page at
+ * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
+ * point inside the virtually mapped linear page table.
+ */
+#define __access_ok(addr, size, segment) \
+({ \
+ __chk_user_ptr(addr); \
+ (likely((unsigned long) (addr) <= (segment).seg) \
+ && ((segment).seg == KERNEL_DS.seg \
+ || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
+})
+#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof/typeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
+#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
+
+/*
+ * The "__xxx" versions do not do address space checking, useful when
+ * doing multiple accesses to the same area (the programmer has to do the
+ * checks by hand with "access_ok()")
+ */
+#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_unaligned_unknown (void);
+
+#define __put_user_unaligned(x, ptr) \
+({ \
+ long __ret; \
+ switch (sizeof(*(ptr))) { \
+ case 1: __ret = __put_user((x), (ptr)); break; \
+ case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
+ | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
+ case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
+ | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
+ case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
+ | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
+ default: __ret = __put_user_unaligned_unknown(); \
+ } \
+ __ret; \
+})
+
+extern long __get_user_unaligned_unknown (void);
+
+#define __get_user_unaligned(x, ptr) \
+({ \
+ long __ret; \
+ switch (sizeof(*(ptr))) { \
+ case 1: __ret = __get_user((x), (ptr)); break; \
+ case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
+ | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
+ case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
+ | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
+ case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
+ | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
+ default: __ret = __get_user_unaligned_unknown(); \
+ } \
+ __ret; \
+})
+
+#ifdef ASM_SUPPORTED
+ struct __large_struct { unsigned long buf[100]; };
+# define __m(x) (*(struct __large_struct __user *)(x))
+
+/* We need to declare the __ex_table section before we can use it in .xdata. */
+asm (".section \"__ex_table\", \"a\"\n\t.previous");
+
+# define __get_user_size(val, addr, n, err) \
+do { \
+ register long __gu_r8 asm ("r8") = 0; \
+ register long __gu_r9 asm ("r9"); \
+ asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
+ "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \
+ "[1:]" \
+ : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \
+ (err) = __gu_r8; \
+ (val) = __gu_r9; \
+} while (0)
+
+/*
+ * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This
+ * is because they do not write to any memory gcc knows about, so there are no aliasing
+ * issues.
+ */
+# define __put_user_size(val, addr, n, err) \
+do { \
+ register long __pu_r8 asm ("r8") = 0; \
+ asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
+ "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
+ "[1:]" \
+ : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \
+ (err) = __pu_r8; \
+} while (0)
+
+#else /* !ASM_SUPPORTED */
+# define RELOC_TYPE 2 /* ip-rel */
+# define __get_user_size(val, addr, n, err) \
+do { \
+ __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \
+ (err) = ia64_getreg(_IA64_REG_R8); \
+ (val) = ia64_getreg(_IA64_REG_R9); \
+} while (0)
+# define __put_user_size(val, addr, n, err) \
+do { \
+ __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \
+ (err) = ia64_getreg(_IA64_REG_R8); \
+} while (0)
+#endif /* !ASM_SUPPORTED */
+
+extern void __get_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
+ * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while
+ * using r8/r9.
+ */
+#define __do_get_user(check, x, ptr, size, segment) \
+({ \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__ (size) __gu_size = (size); \
+ long __gu_err = -EFAULT; \
+ unsigned long __gu_val = 0; \
+ if (!check || __access_ok(__gu_ptr, size, segment)) \
+ switch (__gu_size) { \
+ case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
+ case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
+ case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \
+ case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
+ default: __get_user_unknown(); break; \
+ } \
+ (x) = (__typeof__(*(__gu_ptr))) __gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS)
+#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment)
+
+extern void __put_user_unknown (void);
+
+/*
+ * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
+ * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8.
+ */
+#define __do_put_user(check, x, ptr, size, segment) \
+({ \
+ __typeof__ (x) __pu_x = (x); \
+ __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__ (size) __pu_size = (size); \
+ long __pu_err = -EFAULT; \
+ \
+ if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \
+ switch (__pu_size) { \
+ case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
+ case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
+ case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \
+ case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \
+ default: __put_user_unknown(); break; \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS)
+#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment)
+
+/*
+ * Complex access routines
+ */
+extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
+ unsigned long count);
+
+static inline unsigned long
+__copy_to_user (void __user *to, const void *from, unsigned long count)
+{
+ return __copy_user(to, (__force void __user *) from, count);
+}
+
+static inline unsigned long
+__copy_from_user (void *to, const void __user *from, unsigned long count)
+{
+ return __copy_user((__force void __user *) to, from, count);
+}
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+#define copy_to_user(to, from, n) \
+({ \
+ void __user *__cu_to = (to); \
+ const void *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+ if (__access_ok(__cu_to, __cu_len, get_fs())) \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ __cu_len; \
+})
+
+#define copy_from_user(to, from, n) \
+({ \
+ void *__cu_to = (to); \
+ const void __user *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+ if (__access_ok(__cu_from, __cu_len, get_fs())) \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ __cu_len; \
+})
+
+#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
+
+static inline unsigned long
+copy_in_user (void __user *to, const void __user *from, unsigned long n)
+{
+ if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
+ n = __copy_user(to, from, n);
+ return n;
+}
+
+extern unsigned long __do_clear_user (void __user *, unsigned long);
+
+#define __clear_user(to, n) __do_clear_user(to, n)
+
+#define clear_user(to, n) \
+({ \
+ unsigned long __cu_len = (n); \
+ if (__access_ok(to, __cu_len, get_fs())) \
+ __cu_len = __do_clear_user(to, __cu_len); \
+ __cu_len; \
+})
+
+
+/*
+ * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
+ * strlen.
+ */
+extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
+
+#define strncpy_from_user(to, from, n) \
+({ \
+ const char __user * __sfu_from = (from); \
+ long __sfu_ret = -EFAULT; \
+ if (__access_ok(__sfu_from, 0, get_fs())) \
+ __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
+ __sfu_ret; \
+})
+
+/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
+extern unsigned long __strlen_user (const char __user *);
+
+#define strlen_user(str) \
+({ \
+ const char __user *__su_str = (str); \
+ unsigned long __su_ret = 0; \
+ if (__access_ok(__su_str, 0, get_fs())) \
+ __su_ret = __strlen_user(__su_str); \
+ __su_ret; \
+})
+
+/*
+ * Returns: 0 if exception before NUL or reaching the supplied limit
+ * (N), a value greater than N if the limit would be exceeded, else
+ * strlen.
+ */
+extern unsigned long __strnlen_user (const char __user *, long);
+
+#define strnlen_user(str, len) \
+({ \
+ const char __user *__su_str = (str); \
+ unsigned long __su_ret = 0; \
+ if (__access_ok(__su_str, 0, get_fs())) \
+ __su_ret = __strnlen_user(__su_str, len); \
+ __su_ret; \
+})
+
+/* Generic code can't deal with the location-relative format that we use for compactness. */
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
+
+struct exception_table_entry {
+ int addr; /* location-relative address of insn this fixup is for */
+ int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
+};
+
+extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
+extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
+
+static inline int
+ia64_done_with_exception (struct pt_regs *regs)
+{
+ const struct exception_table_entry *e;
+ e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
+ if (e) {
+ ia64_handle_exception(regs, e);
+ return 1;
+ }
+ return 0;
+}
+
+#define ARCH_HAS_TRANSLATE_MEM_PTR 1
+static __inline__ char *
+xlate_dev_mem_ptr (unsigned long p)
+{
+ struct page *page;
+ char * ptr;
+
+ page = pfn_to_page(p >> PAGE_SHIFT);
+ if (PageUncached(page))
+ ptr = (char *)p + __IA64_UNCACHED_OFFSET;
+ else
+ ptr = __va(p);
+
+ return ptr;
+}
+
+/*
+ * Convert a virtual cached kernel memory pointer to an uncached pointer
+ */
+static __inline__ char *
+xlate_dev_kmem_ptr (char * p)
+{
+ struct page *page;
+ char * ptr;
+
+ page = virt_to_page((unsigned long)p);
+ if (PageUncached(page))
+ ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
+ else
+ ptr = p;
+
+ return ptr;
+}
+
+#endif /* _ASM_IA64_UACCESS_H */
diff --git a/arch/ia64/include/asm/ucontext.h b/arch/ia64/include/asm/ucontext.h
new file mode 100644
index 000000000000..bf573dc8ca6a
--- /dev/null
+++ b/arch/ia64/include/asm/ucontext.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_IA64_UCONTEXT_H
+#define _ASM_IA64_UCONTEXT_H
+
+struct ucontext {
+ struct sigcontext uc_mcontext;
+};
+
+#define uc_link uc_mcontext.sc_gr[0] /* wrong type; nobody cares */
+#define uc_sigmask uc_mcontext.sc_sigmask
+#define uc_stack uc_mcontext.sc_stack
+
+#endif /* _ASM_IA64_UCONTEXT_H */
diff --git a/arch/ia64/include/asm/unaligned.h b/arch/ia64/include/asm/unaligned.h
new file mode 100644
index 000000000000..7bddc7f58584
--- /dev/null
+++ b/arch/ia64/include/asm/unaligned.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_IA64_UNALIGNED_H
+#define _ASM_IA64_UNALIGNED_H
+
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+
+#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/arch/ia64/include/asm/uncached.h b/arch/ia64/include/asm/uncached.h
new file mode 100644
index 000000000000..13d7e65ca3cc
--- /dev/null
+++ b/arch/ia64/include/asm/uncached.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Prototypes for the uncached page allocator
+ */
+
+extern unsigned long uncached_alloc_page(int starting_nid, int n_pages);
+extern void uncached_free_page(unsigned long uc_addr, int n_pages);
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
new file mode 100644
index 000000000000..d535833aab5e
--- /dev/null
+++ b/arch/ia64/include/asm/unistd.h
@@ -0,0 +1,384 @@
+#ifndef _ASM_IA64_UNISTD_H
+#define _ASM_IA64_UNISTD_H
+
+/*
+ * IA-64 Linux syscall numbers and inline-functions.
+ *
+ * Copyright (C) 1998-2005 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm/break.h>
+
+#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL
+
+#define __NR_ni_syscall 1024
+#define __NR_exit 1025
+#define __NR_read 1026
+#define __NR_write 1027
+#define __NR_open 1028
+#define __NR_close 1029
+#define __NR_creat 1030
+#define __NR_link 1031
+#define __NR_unlink 1032
+#define __NR_execve 1033
+#define __NR_chdir 1034
+#define __NR_fchdir 1035
+#define __NR_utimes 1036
+#define __NR_mknod 1037
+#define __NR_chmod 1038
+#define __NR_chown 1039
+#define __NR_lseek 1040
+#define __NR_getpid 1041
+#define __NR_getppid 1042
+#define __NR_mount 1043
+#define __NR_umount 1044
+#define __NR_setuid 1045
+#define __NR_getuid 1046
+#define __NR_geteuid 1047
+#define __NR_ptrace 1048
+#define __NR_access 1049
+#define __NR_sync 1050
+#define __NR_fsync 1051
+#define __NR_fdatasync 1052
+#define __NR_kill 1053
+#define __NR_rename 1054
+#define __NR_mkdir 1055
+#define __NR_rmdir 1056
+#define __NR_dup 1057
+#define __NR_pipe 1058
+#define __NR_times 1059
+#define __NR_brk 1060
+#define __NR_setgid 1061
+#define __NR_getgid 1062
+#define __NR_getegid 1063
+#define __NR_acct 1064
+#define __NR_ioctl 1065
+#define __NR_fcntl 1066
+#define __NR_umask 1067
+#define __NR_chroot 1068
+#define __NR_ustat 1069
+#define __NR_dup2 1070
+#define __NR_setreuid 1071
+#define __NR_setregid 1072
+#define __NR_getresuid 1073
+#define __NR_setresuid 1074
+#define __NR_getresgid 1075
+#define __NR_setresgid 1076
+#define __NR_getgroups 1077
+#define __NR_setgroups 1078
+#define __NR_getpgid 1079
+#define __NR_setpgid 1080
+#define __NR_setsid 1081
+#define __NR_getsid 1082
+#define __NR_sethostname 1083
+#define __NR_setrlimit 1084
+#define __NR_getrlimit 1085
+#define __NR_getrusage 1086
+#define __NR_gettimeofday 1087
+#define __NR_settimeofday 1088
+#define __NR_select 1089
+#define __NR_poll 1090
+#define __NR_symlink 1091
+#define __NR_readlink 1092
+#define __NR_uselib 1093
+#define __NR_swapon 1094
+#define __NR_swapoff 1095
+#define __NR_reboot 1096
+#define __NR_truncate 1097
+#define __NR_ftruncate 1098
+#define __NR_fchmod 1099
+#define __NR_fchown 1100
+#define __NR_getpriority 1101
+#define __NR_setpriority 1102
+#define __NR_statfs 1103
+#define __NR_fstatfs 1104
+#define __NR_gettid 1105
+#define __NR_semget 1106
+#define __NR_semop 1107
+#define __NR_semctl 1108
+#define __NR_msgget 1109
+#define __NR_msgsnd 1110
+#define __NR_msgrcv 1111
+#define __NR_msgctl 1112
+#define __NR_shmget 1113
+#define __NR_shmat 1114
+#define __NR_shmdt 1115
+#define __NR_shmctl 1116
+/* also known as klogctl() in GNU libc: */
+#define __NR_syslog 1117
+#define __NR_setitimer 1118
+#define __NR_getitimer 1119
+/* 1120 was __NR_old_stat */
+/* 1121 was __NR_old_lstat */
+/* 1122 was __NR_old_fstat */
+#define __NR_vhangup 1123
+#define __NR_lchown 1124
+#define __NR_remap_file_pages 1125
+#define __NR_wait4 1126
+#define __NR_sysinfo 1127
+#define __NR_clone 1128
+#define __NR_setdomainname 1129
+#define __NR_uname 1130
+#define __NR_adjtimex 1131
+/* 1132 was __NR_create_module */
+#define __NR_init_module 1133
+#define __NR_delete_module 1134
+/* 1135 was __NR_get_kernel_syms */
+/* 1136 was __NR_query_module */
+#define __NR_quotactl 1137
+#define __NR_bdflush 1138
+#define __NR_sysfs 1139
+#define __NR_personality 1140
+#define __NR_afs_syscall 1141
+#define __NR_setfsuid 1142
+#define __NR_setfsgid 1143
+#define __NR_getdents 1144
+#define __NR_flock 1145
+#define __NR_readv 1146
+#define __NR_writev 1147
+#define __NR_pread64 1148
+#define __NR_pwrite64 1149
+#define __NR__sysctl 1150
+#define __NR_mmap 1151
+#define __NR_munmap 1152
+#define __NR_mlock 1153
+#define __NR_mlockall 1154
+#define __NR_mprotect 1155
+#define __NR_mremap 1156
+#define __NR_msync 1157
+#define __NR_munlock 1158
+#define __NR_munlockall 1159
+#define __NR_sched_getparam 1160
+#define __NR_sched_setparam 1161
+#define __NR_sched_getscheduler 1162
+#define __NR_sched_setscheduler 1163
+#define __NR_sched_yield 1164
+#define __NR_sched_get_priority_max 1165
+#define __NR_sched_get_priority_min 1166
+#define __NR_sched_rr_get_interval 1167
+#define __NR_nanosleep 1168
+#define __NR_nfsservctl 1169
+#define __NR_prctl 1170
+/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */
+#define __NR_mmap2 1172
+#define __NR_pciconfig_read 1173
+#define __NR_pciconfig_write 1174
+#define __NR_perfmonctl 1175
+#define __NR_sigaltstack 1176
+#define __NR_rt_sigaction 1177
+#define __NR_rt_sigpending 1178
+#define __NR_rt_sigprocmask 1179
+#define __NR_rt_sigqueueinfo 1180
+#define __NR_rt_sigreturn 1181
+#define __NR_rt_sigsuspend 1182
+#define __NR_rt_sigtimedwait 1183
+#define __NR_getcwd 1184
+#define __NR_capget 1185
+#define __NR_capset 1186
+#define __NR_sendfile 1187
+#define __NR_getpmsg 1188
+#define __NR_putpmsg 1189
+#define __NR_socket 1190
+#define __NR_bind 1191
+#define __NR_connect 1192
+#define __NR_listen 1193
+#define __NR_accept 1194
+#define __NR_getsockname 1195
+#define __NR_getpeername 1196
+#define __NR_socketpair 1197
+#define __NR_send 1198
+#define __NR_sendto 1199
+#define __NR_recv 1200
+#define __NR_recvfrom 1201
+#define __NR_shutdown 1202
+#define __NR_setsockopt 1203
+#define __NR_getsockopt 1204
+#define __NR_sendmsg 1205
+#define __NR_recvmsg 1206
+#define __NR_pivot_root 1207
+#define __NR_mincore 1208
+#define __NR_madvise 1209
+#define __NR_stat 1210
+#define __NR_lstat 1211
+#define __NR_fstat 1212
+#define __NR_clone2 1213
+#define __NR_getdents64 1214
+#define __NR_getunwind 1215
+#define __NR_readahead 1216
+#define __NR_setxattr 1217
+#define __NR_lsetxattr 1218
+#define __NR_fsetxattr 1219
+#define __NR_getxattr 1220
+#define __NR_lgetxattr 1221
+#define __NR_fgetxattr 1222
+#define __NR_listxattr 1223
+#define __NR_llistxattr 1224
+#define __NR_flistxattr 1225
+#define __NR_removexattr 1226
+#define __NR_lremovexattr 1227
+#define __NR_fremovexattr 1228
+#define __NR_tkill 1229
+#define __NR_futex 1230
+#define __NR_sched_setaffinity 1231
+#define __NR_sched_getaffinity 1232
+#define __NR_set_tid_address 1233
+#define __NR_fadvise64 1234
+#define __NR_tgkill 1235
+#define __NR_exit_group 1236
+#define __NR_lookup_dcookie 1237
+#define __NR_io_setup 1238
+#define __NR_io_destroy 1239
+#define __NR_io_getevents 1240
+#define __NR_io_submit 1241
+#define __NR_io_cancel 1242
+#define __NR_epoll_create 1243
+#define __NR_epoll_ctl 1244
+#define __NR_epoll_wait 1245
+#define __NR_restart_syscall 1246
+#define __NR_semtimedop 1247
+#define __NR_timer_create 1248
+#define __NR_timer_settime 1249
+#define __NR_timer_gettime 1250
+#define __NR_timer_getoverrun 1251
+#define __NR_timer_delete 1252
+#define __NR_clock_settime 1253
+#define __NR_clock_gettime 1254
+#define __NR_clock_getres 1255
+#define __NR_clock_nanosleep 1256
+#define __NR_fstatfs64 1257
+#define __NR_statfs64 1258
+#define __NR_mbind 1259
+#define __NR_get_mempolicy 1260
+#define __NR_set_mempolicy 1261
+#define __NR_mq_open 1262
+#define __NR_mq_unlink 1263
+#define __NR_mq_timedsend 1264
+#define __NR_mq_timedreceive 1265
+#define __NR_mq_notify 1266
+#define __NR_mq_getsetattr 1267
+#define __NR_kexec_load 1268
+#define __NR_vserver 1269
+#define __NR_waitid 1270
+#define __NR_add_key 1271
+#define __NR_request_key 1272
+#define __NR_keyctl 1273
+#define __NR_ioprio_set 1274
+#define __NR_ioprio_get 1275
+#define __NR_move_pages 1276
+#define __NR_inotify_init 1277
+#define __NR_inotify_add_watch 1278
+#define __NR_inotify_rm_watch 1279
+#define __NR_migrate_pages 1280
+#define __NR_openat 1281
+#define __NR_mkdirat 1282
+#define __NR_mknodat 1283
+#define __NR_fchownat 1284
+#define __NR_futimesat 1285
+#define __NR_newfstatat 1286
+#define __NR_unlinkat 1287
+#define __NR_renameat 1288
+#define __NR_linkat 1289
+#define __NR_symlinkat 1290
+#define __NR_readlinkat 1291
+#define __NR_fchmodat 1292
+#define __NR_faccessat 1293
+#define __NR_pselect6 1294
+#define __NR_ppoll 1295
+#define __NR_unshare 1296
+#define __NR_splice 1297
+#define __NR_set_robust_list 1298
+#define __NR_get_robust_list 1299
+#define __NR_sync_file_range 1300
+#define __NR_tee 1301
+#define __NR_vmsplice 1302
+#define __NR_fallocate 1303
+#define __NR_getcpu 1304
+#define __NR_epoll_pwait 1305
+#define __NR_utimensat 1306
+#define __NR_signalfd 1307
+#define __NR_timerfd 1308
+#define __NR_eventfd 1309
+#define __NR_timerfd_create 1310
+#define __NR_timerfd_settime 1311
+#define __NR_timerfd_gettime 1312
+#define __NR_signalfd4 1313
+#define __NR_eventfd2 1314
+#define __NR_epoll_create1 1315
+#define __NR_dup3 1316
+#define __NR_pipe2 1317
+#define __NR_inotify_init1 1318
+
+#ifdef __KERNEL__
+
+
+#define NR_syscalls 295 /* length of syscall table */
+
+/*
+ * The following defines stop scripts/checksyscalls.sh from complaining about
+ * unimplemented system calls. Glibc provides for each of these by using
+ * more modern equivalent system calls.
+ */
+#define __IGNORE_fork /* clone() */
+#define __IGNORE_time /* gettimeofday() */
+#define __IGNORE_alarm /* setitimer(ITIMER_REAL, ... */
+#define __IGNORE_pause /* rt_sigprocmask(), rt_sigsuspend() */
+#define __IGNORE_utime /* utimes() */
+#define __IGNORE_getpgrp /* getpgid() */
+#define __IGNORE_vfork /* clone() */
+
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+
+#ifdef CONFIG_IA32_SUPPORT
+# define __ARCH_WANT_SYS_FADVISE64
+# define __ARCH_WANT_SYS_GETPGRP
+# define __ARCH_WANT_SYS_LLSEEK
+# define __ARCH_WANT_SYS_NICE
+# define __ARCH_WANT_SYS_OLD_GETRLIMIT
+# define __ARCH_WANT_SYS_OLDUMOUNT
+# define __ARCH_WANT_SYS_SIGPENDING
+# define __ARCH_WANT_SYS_SIGPROCMASK
+# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
+# define __ARCH_WANT_COMPAT_SYS_TIME
+#endif
+
+#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+
+extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long nr);
+
+asmlinkage unsigned long sys_mmap(
+ unsigned long addr, unsigned long len,
+ int prot, int flags,
+ int fd, long off);
+asmlinkage unsigned long sys_mmap2(
+ unsigned long addr, unsigned long len,
+ int prot, int flags,
+ int fd, long pgoff);
+struct pt_regs;
+struct sigaction;
+long sys_execve(char __user *filename, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs);
+asmlinkage long sys_pipe(void);
+asmlinkage long sys_rt_sigaction(int sig,
+ const struct sigaction __user *act,
+ struct sigaction __user *oact,
+ size_t sigsetsize);
+
+/*
+ * "Conditional" syscalls
+ *
+ * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in
+ * kernel/sys_ni.c. This version causes warnings because the declaration isn't a
+ * proper prototype, but we can't use __typeof__ either, because not all cond_syscall()
+ * declarations have prototypes at the moment.
+ */
+#define cond_syscall(x) asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/include/asm/unwind.h b/arch/ia64/include/asm/unwind.h
new file mode 100644
index 000000000000..1af3875f1a57
--- /dev/null
+++ b/arch/ia64/include/asm/unwind.h
@@ -0,0 +1,233 @@
+#ifndef _ASM_IA64_UNWIND_H
+#define _ASM_IA64_UNWIND_H
+
+/*
+ * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whitles, so there
+ * is not much point in implementing the full IA-64 unwind API (though
+ * it would of course be possible to implement the kernel API on top
+ * of it).
+ */
+
+struct task_struct; /* forward declaration */
+struct switch_stack; /* forward declaration */
+
+enum unw_application_register {
+ UNW_AR_BSP,
+ UNW_AR_BSPSTORE,
+ UNW_AR_PFS,
+ UNW_AR_RNAT,
+ UNW_AR_UNAT,
+ UNW_AR_LC,
+ UNW_AR_EC,
+ UNW_AR_FPSR,
+ UNW_AR_RSC,
+ UNW_AR_CCV,
+ UNW_AR_CSD,
+ UNW_AR_SSD
+};
+
+/*
+ * The following declarations are private to the unwind
+ * implementation:
+ */
+
+struct unw_stack {
+ unsigned long limit;
+ unsigned long top;
+};
+
+#define UNW_FLAG_INTERRUPT_FRAME (1UL << 0)
+
+/*
+ * No user of this module should every access this structure directly
+ * as it is subject to change. It is declared here solely so we can
+ * use automatic variables.
+ */
+struct unw_frame_info {
+ struct unw_stack regstk;
+ struct unw_stack memstk;
+ unsigned int flags;
+ short hint;
+ short prev_script;
+
+ /* current frame info: */
+ unsigned long bsp; /* backing store pointer value */
+ unsigned long sp; /* stack pointer value */
+ unsigned long psp; /* previous sp value */
+ unsigned long ip; /* instruction pointer value */
+ unsigned long pr; /* current predicate values */
+ unsigned long *cfm_loc; /* cfm save location (or NULL) */
+ unsigned long pt; /* struct pt_regs location */
+
+ struct task_struct *task;
+ struct switch_stack *sw;
+
+ /* preserved state: */
+ unsigned long *bsp_loc; /* previous bsp save location */
+ unsigned long *bspstore_loc;
+ unsigned long *pfs_loc;
+ unsigned long *rnat_loc;
+ unsigned long *rp_loc;
+ unsigned long *pri_unat_loc;
+ unsigned long *unat_loc;
+ unsigned long *pr_loc;
+ unsigned long *lc_loc;
+ unsigned long *fpsr_loc;
+ struct unw_ireg {
+ unsigned long *loc;
+ struct unw_ireg_nat {
+ unsigned long type : 3; /* enum unw_nat_type */
+ signed long off : 61; /* NaT word is at loc+nat.off */
+ } nat;
+ } r4, r5, r6, r7;
+ unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc;
+ struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16];
+};
+
+/*
+ * The official API follows below:
+ */
+
+struct unw_table_entry {
+ u64 start_offset;
+ u64 end_offset;
+ u64 info_offset;
+};
+
+/*
+ * Initialize unwind support.
+ */
+extern void unw_init (void);
+
+extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
+ const void *table_start, const void *table_end);
+
+extern void unw_remove_unwind_table (void *handle);
+
+/*
+ * Prepare to unwind blocked task t.
+ */
+extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
+
+extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
+ struct switch_stack *sw);
+
+/*
+ * Prepare to unwind the currently running thread.
+ */
+extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg);
+
+/*
+ * Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error.
+ */
+extern int unw_unwind (struct unw_frame_info *info);
+
+/*
+ * Unwind until the return pointer is in user-land (or until an error
+ * occurs). Returns 0 if successful, negative number in case of
+ * error.
+ */
+extern int unw_unwind_to_user (struct unw_frame_info *info);
+
+#define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0)
+
+static inline int
+unw_get_ip (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->ip;
+ return 0;
+}
+
+static inline int
+unw_get_sp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->sp;
+ return 0;
+}
+
+static inline int
+unw_get_psp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->psp;
+ return 0;
+}
+
+static inline int
+unw_get_bsp (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = (info)->bsp;
+ return 0;
+}
+
+static inline int
+unw_get_cfm (struct unw_frame_info *info, unsigned long *valp)
+{
+ *valp = *(info)->cfm_loc;
+ return 0;
+}
+
+static inline int
+unw_set_cfm (struct unw_frame_info *info, unsigned long val)
+{
+ *(info)->cfm_loc = val;
+ return 0;
+}
+
+static inline int
+unw_get_rp (struct unw_frame_info *info, unsigned long *val)
+{
+ if (!info->rp_loc)
+ return -1;
+ *val = *info->rp_loc;
+ return 0;
+}
+
+extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char *, int);
+extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int);
+extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, int);
+extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int);
+extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int);
+
+static inline int
+unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat)
+{
+ return unw_access_gr(i, n, &v, &nat, 1);
+}
+
+static inline int
+unw_set_br (struct unw_frame_info *i, int n, unsigned long v)
+{
+ return unw_access_br(i, n, &v, 1);
+}
+
+static inline int
+unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v)
+{
+ return unw_access_fr(i, n, &v, 1);
+}
+
+static inline int
+unw_set_ar (struct unw_frame_info *i, int n, unsigned long v)
+{
+ return unw_access_ar(i, n, &v, 1);
+}
+
+static inline int
+unw_set_pr (struct unw_frame_info *i, unsigned long v)
+{
+ return unw_access_pr(i, &v, 1);
+}
+
+#define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0)
+#define unw_get_br(i,n,v) unw_access_br(i,n,v,0)
+#define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0)
+#define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0)
+#define unw_get_pr(i,v) unw_access_pr(i,v,0)
+
+#endif /* _ASM_UNWIND_H */
diff --git a/arch/ia64/include/asm/user.h b/arch/ia64/include/asm/user.h
new file mode 100644
index 000000000000..8b9821110348
--- /dev/null
+++ b/arch/ia64/include/asm/user.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_IA64_USER_H
+#define _ASM_IA64_USER_H
+
+/*
+ * Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the `trad-core' bfd). The file contents are as
+ * follows:
+ *
+ * upage: 1 page consisting of a user struct that tells gdb
+ * what is present in the file. Directly after this is a
+ * copy of the task_struct, which is currently not used by gdb,
+ * but it may come in handy at some point. All of the registers
+ * are stored as part of the upage. The upage should always be
+ * only one page long.
+ * data: The data segment follows next. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any memory
+ * that may have been sbrk'ed. No attempt is made to determine if a
+ * page is demand-zero or if a page is totally unused, we just cover
+ * the entire range. All of the addresses are rounded in such a way
+ * that an integral number of pages is written.
+ * stack: We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from usp to
+ * current->start_stack, so we round each of these in order to be able
+ * to write an integer number of pages.
+ *
+ * Modified 1998, 1999, 2001
+ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
+ */
+
+#include <linux/ptrace.h>
+#include <linux/types.h>
+
+#include <asm/page.h>
+
+#define EF_SIZE 3072 /* XXX fix me */
+
+struct user {
+ unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */
+ size_t u_tsize; /* text size (pages) */
+ size_t u_dsize; /* data size (pages) */
+ size_t u_ssize; /* stack size (pages) */
+ unsigned long start_code; /* text starting address */
+ unsigned long start_data; /* data starting address */
+ unsigned long start_stack; /* stack starting address */
+ long int signal; /* signal causing core dump */
+ unsigned long u_ar0; /* help gdb find registers */
+ unsigned long magic; /* identifies a core file */
+ char u_comm[32]; /* user command name */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_DATA_START_ADDR (u.start_data)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ASM_IA64_USER_H */
diff --git a/arch/ia64/include/asm/ustack.h b/arch/ia64/include/asm/ustack.h
new file mode 100644
index 000000000000..504167c35b8b
--- /dev/null
+++ b/arch/ia64/include/asm/ustack.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_IA64_USTACK_H
+#define _ASM_IA64_USTACK_H
+
+/*
+ * Constants for the user stack size
+ */
+
+#ifdef __KERNEL__
+#include <asm/page.h>
+
+/* The absolute hard limit for stack size is 1/2 of the mappable space in the region */
+#define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2)
+#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
+#define STACK_TOP_MAX STACK_TOP
+#endif
+
+/* Make a default stack size of 2GiB */
+#define DEFAULT_USER_STACK_SIZE (1UL << 31)
+
+#endif /* _ASM_IA64_USTACK_H */
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
new file mode 100644
index 000000000000..f607018af4a1
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -0,0 +1,309 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV architectural definitions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_IA64_UV_HUB_H__
+#define __ASM_IA64_UV_HUB_H__
+
+#include <linux/numa.h>
+#include <linux/percpu.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+
+
+/*
+ * Addressing Terminology
+ *
+ * M - The low M bits of a physical address represent the offset
+ * into the blade local memory. RAM memory on a blade is physically
+ * contiguous (although various IO spaces may punch holes in
+ * it)..
+ *
+ * N - Number of bits in the node portion of a socket physical
+ * address.
+ *
+ * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
+ * routers always have low bit of 1, C/MBricks have low bit
+ * equal to 0. Most addressing macros that target UV hub chips
+ * right shift the NASID by 1 to exclude the always-zero bit.
+ * NASIDs contain up to 15 bits.
+ *
+ * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
+ * of nasids.
+ *
+ * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
+ * of the nasid for socket usage.
+ *
+ *
+ * NumaLink Global Physical Address Format:
+ * +--------------------------------+---------------------+
+ * |00..000| GNODE | NodeOffset |
+ * +--------------------------------+---------------------+
+ * |<-------53 - M bits --->|<--------M bits ----->
+ *
+ * M - number of node offset bits (35 .. 40)
+ *
+ *
+ * Memory/UV-HUB Processor Socket Address Format:
+ * +----------------+---------------+---------------------+
+ * |00..000000000000| PNODE | NodeOffset |
+ * +----------------+---------------+---------------------+
+ * <--- N bits --->|<--------M bits ----->
+ *
+ * M - number of node offset bits (35 .. 40)
+ * N - number of PNODE bits (0 .. 10)
+ *
+ * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
+ * The actual values are configuration dependent and are set at
+ * boot time. M & N values are set by the hardware/BIOS at boot.
+ */
+
+
+/*
+ * Maximum number of bricks in all partitions and in all coherency domains.
+ * This is the total number of bricks accessible in the numalink fabric. It
+ * includes all C & M bricks. Routers are NOT included.
+ *
+ * This value is also the value of the maximum number of non-router NASIDs
+ * in the numalink fabric.
+ *
+ * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
+ */
+#define UV_MAX_NUMALINK_BLADES 16384
+
+/*
+ * Maximum number of C/Mbricks within a software SSI (hardware may support
+ * more).
+ */
+#define UV_MAX_SSI_BLADES 1
+
+/*
+ * The largest possible NASID of a C or M brick (+ 2)
+ */
+#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_NODES * 2)
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced and are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct uv_hub_info_s {
+ unsigned long global_mmr_base;
+ unsigned long gpa_mask;
+ unsigned long gnode_upper;
+ unsigned long lowmem_remap_top;
+ unsigned long lowmem_remap_base;
+ unsigned short pnode;
+ unsigned short pnode_mask;
+ unsigned short coherency_domain_number;
+ unsigned short numa_blade_id;
+ unsigned char blade_processor_id;
+ unsigned char m_val;
+ unsigned char n_val;
+};
+DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
+#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
+
+/*
+ * Local & Global MMR space macros.
+ * Note: macros are intended to be used ONLY by inline functions
+ * in this file - not by other kernel code.
+ * n - NASID (full 15-bit global nasid)
+ * g - GNODE (full 15-bit global nasid, right shifted 1)
+ * p - PNODE (local part of nsids, right shifted 1)
+ */
+#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
+#define UV_PNODE_TO_NASID(p) (((p) << 1) | uv_hub_info->gnode_upper)
+
+#define UV_LOCAL_MMR_BASE 0xf4000000UL
+#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
+#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
+
+#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
+#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
+
+#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
+
+#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
+ ((unsigned long)(p) << UV_GLOBAL_MMR64_PNODE_SHIFT)
+
+/*
+ * Macros for converting between kernel virtual addresses, socket local physical
+ * addresses, and UV global physical addresses.
+ * Note: use the standard __pa() & __va() macros for converting
+ * between socket virtual and socket physical addresses.
+ */
+
+/* socket phys RAM --> UV global physical address */
+static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
+{
+ if (paddr < uv_hub_info->lowmem_remap_top)
+ paddr += uv_hub_info->lowmem_remap_base;
+ return paddr | uv_hub_info->gnode_upper;
+}
+
+
+/* socket virtual --> UV global physical address */
+static inline unsigned long uv_gpa(void *v)
+{
+ return __pa(v) | uv_hub_info->gnode_upper;
+}
+
+/* socket virtual --> UV global physical address */
+static inline void *uv_vgpa(void *v)
+{
+ return (void *)uv_gpa(v);
+}
+
+/* UV global physical address --> socket virtual */
+static inline void *uv_va(unsigned long gpa)
+{
+ return __va(gpa & uv_hub_info->gpa_mask);
+}
+
+/* pnode, offset --> socket virtual */
+static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
+{
+ return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
+}
+
+
+/*
+ * Access global MMRs using the low memory MMR32 space. This region supports
+ * faster MMR access but not all MMRs are accessible in this space.
+ */
+static inline unsigned long *uv_global_mmr32_address(int pnode,
+ unsigned long offset)
+{
+ return __va(UV_GLOBAL_MMR32_BASE |
+ UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
+}
+
+static inline void uv_write_global_mmr32(int pnode, unsigned long offset,
+ unsigned long val)
+{
+ *uv_global_mmr32_address(pnode, offset) = val;
+}
+
+static inline unsigned long uv_read_global_mmr32(int pnode,
+ unsigned long offset)
+{
+ return *uv_global_mmr32_address(pnode, offset);
+}
+
+/*
+ * Access Global MMR space using the MMR space located at the top of physical
+ * memory.
+ */
+static inline unsigned long *uv_global_mmr64_address(int pnode,
+ unsigned long offset)
+{
+ return __va(UV_GLOBAL_MMR64_BASE |
+ UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
+}
+
+static inline void uv_write_global_mmr64(int pnode, unsigned long offset,
+ unsigned long val)
+{
+ *uv_global_mmr64_address(pnode, offset) = val;
+}
+
+static inline unsigned long uv_read_global_mmr64(int pnode,
+ unsigned long offset)
+{
+ return *uv_global_mmr64_address(pnode, offset);
+}
+
+/*
+ * Access hub local MMRs. Faster than using global space but only local MMRs
+ * are accessible.
+ */
+static inline unsigned long *uv_local_mmr_address(unsigned long offset)
+{
+ return __va(UV_LOCAL_MMR_BASE | offset);
+}
+
+static inline unsigned long uv_read_local_mmr(unsigned long offset)
+{
+ return *uv_local_mmr_address(offset);
+}
+
+static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
+{
+ *uv_local_mmr_address(offset) = val;
+}
+
+/*
+ * Structures and definitions for converting between cpu, node, pnode, and blade
+ * numbers.
+ */
+
+/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
+static inline int uv_blade_processor_id(void)
+{
+ return smp_processor_id();
+}
+
+/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
+static inline int uv_numa_blade_id(void)
+{
+ return 0;
+}
+
+/* Convert a cpu number to the the UV blade number */
+static inline int uv_cpu_to_blade_id(int cpu)
+{
+ return 0;
+}
+
+/* Convert linux node number to the UV blade number */
+static inline int uv_node_to_blade_id(int nid)
+{
+ return 0;
+}
+
+/* Convert a blade id to the PNODE of the blade */
+static inline int uv_blade_to_pnode(int bid)
+{
+ return 0;
+}
+
+/* Determine the number of possible cpus on a blade */
+static inline int uv_blade_nr_possible_cpus(int bid)
+{
+ return num_possible_cpus();
+}
+
+/* Determine the number of online cpus on a blade */
+static inline int uv_blade_nr_online_cpus(int bid)
+{
+ return num_online_cpus();
+}
+
+/* Convert a cpu id to the PNODE of the blade containing the cpu */
+static inline int uv_cpu_to_pnode(int cpu)
+{
+ return 0;
+}
+
+/* Convert a linux node number to the PNODE of the blade */
+static inline int uv_node_to_pnode(int nid)
+{
+ return 0;
+}
+
+/* Maximum possible number of blades */
+static inline int uv_num_possible_blades(void)
+{
+ return 1;
+}
+
+#endif /* __ASM_IA64_UV_HUB__ */
+
diff --git a/arch/ia64/include/asm/uv/uv_mmrs.h b/arch/ia64/include/asm/uv/uv_mmrs.h
new file mode 100644
index 000000000000..c149ef085437
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv_mmrs.h
@@ -0,0 +1,673 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV MMR definitions
+ *
+ * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef __ASM_IA64_UV_MMRS__
+#define __ASM_IA64_UV_MMRS__
+
+#define UV_MMR_ENABLE (1UL << 63)
+
+/* ========================================================================= */
+/* UVH_BAU_DATA_CONFIG */
+/* ========================================================================= */
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x0438
+
+#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
+#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
+#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
+#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
+#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_BAU_DATA_CONFIG_P_SHFT 13
+#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_BAU_DATA_CONFIG_T_SHFT 15
+#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_BAU_DATA_CONFIG_M_SHFT 16
+#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
+#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_bau_data_config_u {
+ unsigned long v;
+ struct uvh_bau_data_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED0 */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED0 0x70000UL
+#define UVH_EVENT_OCCURRED0_32 0x005e8
+
+#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UVH_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
+#define UVH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
+#define UVH_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
+#define UVH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
+#define UVH_EVENT_OCCURRED0_LH_HCERR_SHFT 3
+#define UVH_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
+#define UVH_EVENT_OCCURRED0_RH_HCERR_SHFT 4
+#define UVH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
+#define UVH_EVENT_OCCURRED0_XN_HCERR_SHFT 5
+#define UVH_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
+#define UVH_EVENT_OCCURRED0_SI_HCERR_SHFT 6
+#define UVH_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
+#define UVH_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
+#define UVH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
+#define UVH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
+#define UVH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
+#define UVH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
+#define UVH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
+#define UVH_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
+#define UVH_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+#define UVH_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
+#define UVH_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
+#define UVH_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
+#define UVH_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
+#define UVH_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
+#define UVH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
+#define UVH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
+#define UVH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
+#define UVH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
+#define UVH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
+#define UVH_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
+#define UVH_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
+#define UVH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
+#define UVH_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
+#define UVH_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
+#define UVH_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
+#define UVH_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
+#define UVH_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
+#define UVH_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
+#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
+#define UVH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
+#define UVH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
+#define UVH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
+#define UVH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
+#define UVH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
+#define UVH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
+#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
+#define UVH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
+#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
+#define UVH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
+#define UVH_EVENT_OCCURRED0_LTC_INT_SHFT 43
+#define UVH_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
+#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
+#define UVH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
+#define UVH_EVENT_OCCURRED0_IPI_INT_SHFT 45
+#define UVH_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
+#define UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
+#define UVH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
+#define UVH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
+#define UVH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
+#define UVH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
+#define UVH_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
+#define UVH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC0_SHFT 51
+#define UVH_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC1_SHFT 52
+#define UVH_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC2_SHFT 53
+#define UVH_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
+#define UVH_EVENT_OCCURRED0_RTC3_SHFT 54
+#define UVH_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
+#define UVH_EVENT_OCCURRED0_BAU_DATA_SHFT 55
+#define UVH_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
+#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
+#define UVH_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+union uvh_event_occurred0_u {
+ unsigned long v;
+ struct uvh_event_occurred0_s {
+ unsigned long lb_hcerr : 1; /* RW, W1C */
+ unsigned long gr0_hcerr : 1; /* RW, W1C */
+ unsigned long gr1_hcerr : 1; /* RW, W1C */
+ unsigned long lh_hcerr : 1; /* RW, W1C */
+ unsigned long rh_hcerr : 1; /* RW, W1C */
+ unsigned long xn_hcerr : 1; /* RW, W1C */
+ unsigned long si_hcerr : 1; /* RW, W1C */
+ unsigned long lb_aoerr0 : 1; /* RW, W1C */
+ unsigned long gr0_aoerr0 : 1; /* RW, W1C */
+ unsigned long gr1_aoerr0 : 1; /* RW, W1C */
+ unsigned long lh_aoerr0 : 1; /* RW, W1C */
+ unsigned long rh_aoerr0 : 1; /* RW, W1C */
+ unsigned long xn_aoerr0 : 1; /* RW, W1C */
+ unsigned long si_aoerr0 : 1; /* RW, W1C */
+ unsigned long lb_aoerr1 : 1; /* RW, W1C */
+ unsigned long gr0_aoerr1 : 1; /* RW, W1C */
+ unsigned long gr1_aoerr1 : 1; /* RW, W1C */
+ unsigned long lh_aoerr1 : 1; /* RW, W1C */
+ unsigned long rh_aoerr1 : 1; /* RW, W1C */
+ unsigned long xn_aoerr1 : 1; /* RW, W1C */
+ unsigned long si_aoerr1 : 1; /* RW, W1C */
+ unsigned long rh_vpi_int : 1; /* RW, W1C */
+ unsigned long system_shutdown_int : 1; /* RW, W1C */
+ unsigned long lb_irq_int_0 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_1 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_2 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_3 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_4 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_5 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_6 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_7 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_8 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_9 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_10 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_11 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_12 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_13 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_14 : 1; /* RW, W1C */
+ unsigned long lb_irq_int_15 : 1; /* RW, W1C */
+ unsigned long l1_nmi_int : 1; /* RW, W1C */
+ unsigned long stop_clock : 1; /* RW, W1C */
+ unsigned long asic_to_l1 : 1; /* RW, W1C */
+ unsigned long l1_to_asic : 1; /* RW, W1C */
+ unsigned long ltc_int : 1; /* RW, W1C */
+ unsigned long la_seq_trigger : 1; /* RW, W1C */
+ unsigned long ipi_int : 1; /* RW, W1C */
+ unsigned long extio_int0 : 1; /* RW, W1C */
+ unsigned long extio_int1 : 1; /* RW, W1C */
+ unsigned long extio_int2 : 1; /* RW, W1C */
+ unsigned long extio_int3 : 1; /* RW, W1C */
+ unsigned long profile_int : 1; /* RW, W1C */
+ unsigned long rtc0 : 1; /* RW, W1C */
+ unsigned long rtc1 : 1; /* RW, W1C */
+ unsigned long rtc2 : 1; /* RW, W1C */
+ unsigned long rtc3 : 1; /* RW, W1C */
+ unsigned long bau_data : 1; /* RW, W1C */
+ unsigned long power_management_req : 1; /* RW, W1C */
+ unsigned long rsvd_57_63 : 7; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED0_ALIAS */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
+
+/* ========================================================================= */
+/* UVH_INT_CMPB */
+/* ========================================================================= */
+#define UVH_INT_CMPB 0x22080UL
+
+#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
+#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpb_u {
+ unsigned long v;
+ struct uvh_int_cmpb_s {
+ unsigned long real_time_cmpb : 56; /* RW */
+ unsigned long rsvd_56_63 : 8; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_INT_CMPC */
+/* ========================================================================= */
+#define UVH_INT_CMPC 0x22100UL
+
+#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpc_u {
+ unsigned long v;
+ struct uvh_int_cmpc_s {
+ unsigned long real_time_cmpc : 56; /* RW */
+ unsigned long rsvd_56_63 : 8; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_INT_CMPD */
+/* ========================================================================= */
+#define UVH_INT_CMPD 0x22180UL
+
+#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpd_u {
+ unsigned long v;
+ struct uvh_int_cmpd_s {
+ unsigned long real_time_cmpd : 56; /* RW */
+ unsigned long rsvd_56_63 : 8; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_NODE_ID */
+/* ========================================================================= */
+#define UVH_NODE_ID 0x0UL
+
+#define UVH_NODE_ID_FORCE1_SHFT 0
+#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVH_NODE_ID_REVISION_SHFT 28
+#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVH_NODE_ID_NODE_ID_SHFT 32
+#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVH_NODE_ID_NODES_PER_BIT_SHFT 48
+#define UVH_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
+#define UVH_NODE_ID_NI_PORT_SHFT 56
+#define UVH_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+union uvh_node_id_u {
+ unsigned long v;
+ struct uvh_node_id_s {
+ unsigned long force1 : 1; /* RO */
+ unsigned long manufacturer : 11; /* RO */
+ unsigned long part_number : 16; /* RO */
+ unsigned long revision : 4; /* RO */
+ unsigned long node_id : 15; /* RW */
+ unsigned long rsvd_47 : 1; /* */
+ unsigned long nodes_per_bit : 7; /* RW */
+ unsigned long rsvd_55 : 1; /* */
+ unsigned long ni_port : 4; /* RO */
+ unsigned long rsvd_60_63 : 4; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23 : 24; /* */
+ unsigned long dest_base : 22; /* RW */
+ unsigned long rsvd_46_63: 18; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23 : 24; /* */
+ unsigned long dest_base : 22; /* RW */
+ unsigned long rsvd_46_63: 18; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23 : 24; /* */
+ unsigned long dest_base : 22; /* RW */
+ unsigned long rsvd_46_63: 18; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_gru_overlay_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27: 28; /* */
+ unsigned long base : 18; /* RW */
+ unsigned long rsvd_46_47: 2; /* */
+ unsigned long gr4 : 1; /* RW */
+ unsigned long rsvd_49_51: 3; /* */
+ unsigned long n_gru : 4; /* RW */
+ unsigned long rsvd_56_62: 7; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_mmr_overlay_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25: 26; /* */
+ unsigned long base : 20; /* RW */
+ unsigned long dual_hub : 1; /* RW */
+ unsigned long rsvd_47_62: 16; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC */
+/* ========================================================================= */
+#define UVH_RTC 0x340000UL
+
+#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
+#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
+
+union uvh_rtc_u {
+ unsigned long v;
+ struct uvh_rtc_s {
+ unsigned long real_time_clock : 56; /* RW */
+ unsigned long rsvd_56_63 : 8; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC1_INT_CONFIG */
+/* ========================================================================= */
+#define UVH_RTC1_INT_CONFIG 0x615c0UL
+
+#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC1_INT_CONFIG_P_SHFT 13
+#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC1_INT_CONFIG_T_SHFT 15
+#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC1_INT_CONFIG_M_SHFT 16
+#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc1_int_config_u {
+ unsigned long v;
+ struct uvh_rtc1_int_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC2_INT_CONFIG */
+/* ========================================================================= */
+#define UVH_RTC2_INT_CONFIG 0x61600UL
+
+#define UVH_RTC2_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC2_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC2_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC2_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC2_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC2_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC2_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC2_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC2_INT_CONFIG_P_SHFT 13
+#define UVH_RTC2_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC2_INT_CONFIG_T_SHFT 15
+#define UVH_RTC2_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC2_INT_CONFIG_M_SHFT 16
+#define UVH_RTC2_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC2_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC2_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc2_int_config_u {
+ unsigned long v;
+ struct uvh_rtc2_int_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC3_INT_CONFIG */
+/* ========================================================================= */
+#define UVH_RTC3_INT_CONFIG 0x61640UL
+
+#define UVH_RTC3_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC3_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC3_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC3_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC3_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC3_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC3_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC3_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC3_INT_CONFIG_P_SHFT 13
+#define UVH_RTC3_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC3_INT_CONFIG_T_SHFT 15
+#define UVH_RTC3_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC3_INT_CONFIG_M_SHFT 16
+#define UVH_RTC3_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC3_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC3_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc3_int_config_u {
+ unsigned long v;
+ struct uvh_rtc3_int_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC_INC_RATIO */
+/* ========================================================================= */
+#define UVH_RTC_INC_RATIO 0x350000UL
+
+#define UVH_RTC_INC_RATIO_FRACTION_SHFT 0
+#define UVH_RTC_INC_RATIO_FRACTION_MASK 0x00000000000fffffUL
+#define UVH_RTC_INC_RATIO_RATIO_SHFT 20
+#define UVH_RTC_INC_RATIO_RATIO_MASK 0x0000000000700000UL
+
+union uvh_rtc_inc_ratio_u {
+ unsigned long v;
+ struct uvh_rtc_inc_ratio_s {
+ unsigned long fraction : 20; /* RW */
+ unsigned long ratio : 3; /* RW */
+ unsigned long rsvd_23_63: 41; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SI_ADDR_MAP_CONFIG */
+/* ========================================================================= */
+#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
+
+#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
+#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
+#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
+
+union uvh_si_addr_map_config_u {
+ unsigned long v;
+ struct uvh_si_addr_map_config_s {
+ unsigned long m_skt : 6; /* RW */
+ unsigned long rsvd_6_7: 2; /* */
+ unsigned long n_skt : 4; /* RW */
+ unsigned long rsvd_12_63: 52; /* */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
+/* ========================================================================= */
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
+
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias0_overlay_config_u {
+ unsigned long v;
+ struct uvh_si_alias0_overlay_config_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
+/* ========================================================================= */
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
+
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias1_overlay_config_u {
+ unsigned long v;
+ struct uvh_si_alias1_overlay_config_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
+/* ========================================================================= */
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
+
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
+#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_si_alias2_overlay_config_u {
+ unsigned long v;
+ struct uvh_si_alias2_overlay_config_s {
+ unsigned long rsvd_0_23: 24; /* */
+ unsigned long base : 8; /* RW */
+ unsigned long rsvd_32_47: 16; /* */
+ unsigned long m_alias : 5; /* RW */
+ unsigned long rsvd_53_62: 10; /* */
+ unsigned long enable : 1; /* RW */
+ } s;
+};
+
+
+#endif /* __ASM_IA64_UV_MMRS__ */
diff --git a/arch/ia64/include/asm/vga.h b/arch/ia64/include/asm/vga.h
new file mode 100644
index 000000000000..02184ecd8208
--- /dev/null
+++ b/arch/ia64/include/asm/vga.h
@@ -0,0 +1,25 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ * (c) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * (c) 1999 Don Dugger <don.dugger@intel.com>
+ */
+
+#ifndef __ASM_IA64_VGA_H_
+#define __ASM_IA64_VGA_H_
+
+/*
+ * On the PC, we can just recalculate addresses and then access the
+ * videoram directly without any black magic.
+ */
+
+extern unsigned long vga_console_iobase;
+extern unsigned long vga_console_membase;
+
+#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap_nocache(vga_console_membase + (x), s))
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif /* __ASM_IA64_VGA_H_ */
diff --git a/arch/ia64/include/asm/xor.h b/arch/ia64/include/asm/xor.h
new file mode 100644
index 000000000000..a349e23dea15
--- /dev/null
+++ b/arch/ia64/include/asm/xor.h
@@ -0,0 +1,31 @@
+/*
+ * Optimized RAID-5 checksumming functions for IA-64.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+extern void xor_ia64_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_ia64_3(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *);
+extern void xor_ia64_4(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *);
+extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *, unsigned long *);
+
+static struct xor_block_template xor_block_ia64 = {
+ .name = "ia64",
+ .do_2 = xor_ia64_2,
+ .do_3 = xor_ia64_3,
+ .do_4 = xor_ia64_4,
+ .do_5 = xor_ia64_5,
+};
+
+#define XOR_TRY_TEMPLATES xor_speed(&xor_block_ia64)
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index c64a55af9b95..94c44b1ccfd0 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -10,11 +10,11 @@
#include <linux/pid.h>
#include <linux/clocksource.h>
#include <linux/kbuild.h>
-#include <asm-ia64/processor.h>
-#include <asm-ia64/ptrace.h>
-#include <asm-ia64/siginfo.h>
-#include <asm-ia64/sigcontext.h>
-#include <asm-ia64/mca.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/siginfo.h>
+#include <asm/sigcontext.h>
+#include <asm/mca.h>
#include "../kernel/sigframe.h"
#include "../kernel/fsyscall_gtod_data.h"
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index db540e58c783..41c712917ff7 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1123,7 +1123,7 @@ SET_REG(b5);
* p15 - used to track flag status.
*
* If you patch this code to use more registers, do not forget to update
- * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
+ * the clobber lists for spin_lock() in arch/ia64/include/asm/spinlock.h.
*/
#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 3bc2fa64f87f..5c4674ae8aea 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -69,7 +69,7 @@
* systems, we use one-to-one mapping between IA-64 vector and IRQ. A
* platform can implement platform_irq_to_vector(irq) and
* platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
- * Please see also include/asm-ia64/hw_irq.h for those APIs.
+ * Please see also arch/ia64/include/asm/hw_irq.h for those APIs.
*
* To sum up, there are three levels of mappings involved:
*
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index 621630256c4a..f69389c7be1d 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -45,7 +45,7 @@
* to the correct location.
*/
#include <asm/asmmacro.h>
-#include <asm-ia64/break.h>
+#include <asm/break.h>
/*
* void jprobe_break(void)
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
index 1ae049181e83..8273afc32db8 100644
--- a/arch/ia64/kernel/nr-irqs.c
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -9,7 +9,7 @@
#include <linux/kbuild.h>
#include <linux/threads.h>
-#include <asm-ia64/native/irq.h>
+#include <asm/native/irq.h>
void foo(void)
{
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index e5c2de9b29a5..593279f33e96 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -314,7 +314,7 @@ static inline void __init setup_crashkernel(unsigned long total, int *n)
*
* Setup the reserved memory areas set aside for the boot parameters,
* initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
- * see include/asm-ia64/meminit.h if you need to define more.
+ * see arch/ia64/include/asm/meminit.h if you need to define more.
*/
void __init
reserve_memory (void)
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index ab7e2fd40798..c77ebdf98119 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(sn_io_addr);
/**
* __sn_mmiowb - I/O space memory barrier
*
- * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
+ * See arch/ia64/include/asm/io.h and Documentation/DocBook/deviceiobook.tmpl
* for details.
*
* On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.