summaryrefslogtreecommitdiff
path: root/arch/um/sys-x86_64
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2008-08-18 21:44:32 -0400
committerH. Peter Anvin <hpa@zytor.com>2008-10-22 22:55:21 -0700
commitf5ad6a42b700d9687bb97cf461e7f2506e3006d6 (patch)
tree1bd92ee745ce1bdf459147dd9cbdb440c72cd9f8 /arch/um/sys-x86_64
parent17dcf75d3ea11d7e26110ba85677cfadbccecf45 (diff)
x86, um: get rid of sysdep symlink
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/um/sys-x86_64')
-rw-r--r--arch/um/sys-x86_64/sysdep/archsetjmp.h24
-rw-r--r--arch/um/sys-x86_64/sysdep/barrier.h7
-rw-r--r--arch/um/sys-x86_64/sysdep/checksum.h144
-rw-r--r--arch/um/sys-x86_64/sysdep/faultinfo.h29
-rw-r--r--arch/um/sys-x86_64/sysdep/host_ldt.h38
-rw-r--r--arch/um/sys-x86_64/sysdep/kernel-offsets.h23
-rw-r--r--arch/um/sys-x86_64/sysdep/ptrace.h240
-rw-r--r--arch/um/sys-x86_64/sysdep/ptrace_user.h77
-rw-r--r--arch/um/sys-x86_64/sysdep/sc.h45
-rw-r--r--arch/um/sys-x86_64/sysdep/sigcontext.h27
-rw-r--r--arch/um/sys-x86_64/sysdep/skas_ptrace.h22
-rw-r--r--arch/um/sys-x86_64/sysdep/stub.h108
-rw-r--r--arch/um/sys-x86_64/sysdep/syscalls.h33
-rw-r--r--arch/um/sys-x86_64/sysdep/system.h132
-rw-r--r--arch/um/sys-x86_64/sysdep/tls.h29
-rw-r--r--arch/um/sys-x86_64/sysdep/vm-flags.h33
16 files changed, 1011 insertions, 0 deletions
diff --git a/arch/um/sys-x86_64/sysdep/archsetjmp.h b/arch/um/sys-x86_64/sysdep/archsetjmp.h
new file mode 100644
index 000000000000..2af8f12ca161
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/archsetjmp.h
@@ -0,0 +1,24 @@
+/*
+ * arch/um/include/sysdep-x86_64/archsetjmp.h
+ */
+
+#ifndef _KLIBC_ARCHSETJMP_H
+#define _KLIBC_ARCHSETJMP_H
+
+struct __jmp_buf {
+ unsigned long __rbx;
+ unsigned long __rsp;
+ unsigned long __rbp;
+ unsigned long __r12;
+ unsigned long __r13;
+ unsigned long __r14;
+ unsigned long __r15;
+ unsigned long __rip;
+};
+
+typedef struct __jmp_buf jmp_buf[1];
+
+#define JB_IP __rip
+#define JB_SP __rsp
+
+#endif /* _SETJMP_H */
diff --git a/arch/um/sys-x86_64/sysdep/barrier.h b/arch/um/sys-x86_64/sysdep/barrier.h
new file mode 100644
index 000000000000..7b610befdc8f
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/barrier.h
@@ -0,0 +1,7 @@
+#ifndef __SYSDEP_X86_64_BARRIER_H
+#define __SYSDEP_X86_64_BARRIER_H
+
+/* Copied from include/asm-x86_64 for use by userspace. */
+#define mb() asm volatile("mfence":::"memory")
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/checksum.h b/arch/um/sys-x86_64/sysdep/checksum.h
new file mode 100644
index 000000000000..a5be9031ea85
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/checksum.h
@@ -0,0 +1,144 @@
+/*
+ * Licensed under the GPL
+ */
+
+#ifndef __UM_SYSDEP_CHECKSUM_H
+#define __UM_SYSDEP_CHECKSUM_H
+
+#include "linux/string.h"
+#include "linux/in6.h"
+#include "asm/uaccess.h"
+
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
+ * access_ok().
+ */
+
+static __inline__
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
+{
+ memcpy(dst, src, len);
+ return(csum_partial(dst, len, sum));
+}
+
+static __inline__
+__wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst, int len, __wsum sum,
+ int *err_ptr)
+{
+ if (copy_from_user(dst, src, len)) {
+ *err_ptr = -EFAULT;
+ return (__force __wsum)-1;
+ }
+ return csum_partial(dst, len, sum);
+}
+
+/**
+ * csum_fold - Fold and invert a 32bit checksum.
+ * sum: 32bit unfolded sum
+ *
+ * Fold a 32bit running checksum to 16bit and invert it. This is usually
+ * the last step before putting a checksum into a packet.
+ * Make sure not to mix with 64bit checksums.
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ __asm__(
+ " addl %1,%0\n"
+ " adcl $0xffff,%0"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000)
+ );
+ return (__force __sum16)(~(__force u32)sum >> 16);
+}
+
+/**
+ * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
+ * @saddr: source address
+ * @daddr: destination address
+ * @len: length of packet
+ * @proto: ip protocol of packet
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
+ * Returns the pseudo header checksum the input data. Result is
+ * 32bit unfolded.
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ asm(" addl %1, %0\n"
+ " adcl %2, %0\n"
+ " adcl %3, %0\n"
+ " adcl $0, %0\n"
+ : "=r" (sum)
+ : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/**
+ * ip_fast_csum - Compute the IPv4 header checksum efficiently.
+ * iph: ipv4 header
+ * ihl: length of header / 4
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum;
+
+ asm( " movl (%1), %0\n"
+ " subl $4, %2\n"
+ " jbe 2f\n"
+ " addl 4(%1), %0\n"
+ " adcl 8(%1), %0\n"
+ " adcl 12(%1), %0\n"
+ "1: adcl 16(%1), %0\n"
+ " lea 4(%1), %1\n"
+ " decl %2\n"
+ " jne 1b\n"
+ " adcl $0, %0\n"
+ " movl %0, %2\n"
+ " shrl $16, %0\n"
+ " addw %w2, %w0\n"
+ " adcl $0, %0\n"
+ " notl %0\n"
+ "2:"
+ /* Since the input registers which are loaded with iph and ipl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+ return (__force __sum16)sum;
+}
+
+static inline unsigned add32_with_carry(unsigned a, unsigned b)
+{
+ asm("addl %2,%0\n\t"
+ "adcl $0,%0"
+ : "=r" (a)
+ : "0" (a), "r" (b));
+ return a;
+}
+
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/faultinfo.h b/arch/um/sys-x86_64/sysdep/faultinfo.h
new file mode 100644
index 000000000000..cb917b0d5660
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/faultinfo.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004 Fujitsu Siemens Computers GmbH
+ * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com>
+ * Licensed under the GPL
+ */
+
+#ifndef __FAULTINFO_X86_64_H
+#define __FAULTINFO_X86_64_H
+
+/* this structure contains the full arch-specific faultinfo
+ * from the traps.
+ * On i386, ptrace_faultinfo unfortunately doesn't provide
+ * all the info, since trap_no is missing.
+ * All common elements are defined at the same position in
+ * both structures, thus making it easy to copy the
+ * contents without knowledge about the structure elements.
+ */
+struct faultinfo {
+ int error_code; /* in ptrace_faultinfo misleadingly called is_write */
+ unsigned long cr2; /* in ptrace_faultinfo called addr */
+ int trap_no; /* missing in ptrace_faultinfo */
+};
+
+#define FAULT_WRITE(fi) ((fi).error_code & 2)
+#define FAULT_ADDRESS(fi) ((fi).cr2)
+
+#define PTRACE_FULL_FAULTINFO 1
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/host_ldt.h b/arch/um/sys-x86_64/sysdep/host_ldt.h
new file mode 100644
index 000000000000..e8b1be1e154f
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/host_ldt.h
@@ -0,0 +1,38 @@
+#ifndef __ASM_HOST_LDT_X86_64_H
+#define __ASM_HOST_LDT_X86_64_H
+
+#include <asm/ldt.h>
+
+/*
+ * macros stolen from include/asm-x86_64/desc.h
+ */
+#define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+/* Don't allow setting of the lm bit. It is useless anyways because
+ * 64bit system calls require __USER_CS. */
+#define LDT_entry_b(info) \
+ (((info)->base_addr & 0xff000000) | \
+ (((info)->base_addr & 0x00ff0000) >> 16) | \
+ ((info)->limit & 0xf0000) | \
+ (((info)->read_exec_only ^ 1) << 9) | \
+ ((info)->contents << 10) | \
+ (((info)->seg_not_present ^ 1) << 15) | \
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+ /* ((info)->lm << 21) | */ \
+ 0x7000)
+
+#define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0 && \
+ (info)->lm == 0)
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/kernel-offsets.h b/arch/um/sys-x86_64/sysdep/kernel-offsets.h
new file mode 100644
index 000000000000..a307237b7964
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/kernel-offsets.h
@@ -0,0 +1,23 @@
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/elf.h>
+#include <linux/crypto.h>
+#include <asm/page.h>
+#include <asm/mman.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define DEFINE_STR1(x) #x
+#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " DEFINE_STR1(val) " " #val: : )
+
+#define BLANK() asm volatile("\n->" : : )
+
+#define OFFSET(sym, str, mem) \
+ DEFINE(sym, offsetof(struct str, mem));
+
+void foo(void)
+{
+#include <common-offsets.h>
+}
diff --git a/arch/um/sys-x86_64/sysdep/ptrace.h b/arch/um/sys-x86_64/sysdep/ptrace.h
new file mode 100644
index 000000000000..9ea44d111f33
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/ptrace.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ * Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ *
+ * Licensed under the GPL
+ */
+
+#ifndef __SYSDEP_X86_64_PTRACE_H
+#define __SYSDEP_X86_64_PTRACE_H
+
+#include "uml-config.h"
+#include "user_constants.h"
+#include "sysdep/faultinfo.h"
+
+#define MAX_REG_OFFSET (UM_FRAME_SIZE)
+#define MAX_REG_NR ((MAX_REG_OFFSET) / sizeof(unsigned long))
+
+#include "skas_ptregs.h"
+
+#define REGS_IP(r) ((r)[HOST_IP])
+#define REGS_SP(r) ((r)[HOST_SP])
+
+#define REGS_RBX(r) ((r)[HOST_RBX])
+#define REGS_RCX(r) ((r)[HOST_RCX])
+#define REGS_RDX(r) ((r)[HOST_RDX])
+#define REGS_RSI(r) ((r)[HOST_RSI])
+#define REGS_RDI(r) ((r)[HOST_RDI])
+#define REGS_RBP(r) ((r)[HOST_RBP])
+#define REGS_RAX(r) ((r)[HOST_RAX])
+#define REGS_R8(r) ((r)[HOST_R8])
+#define REGS_R9(r) ((r)[HOST_R9])
+#define REGS_R10(r) ((r)[HOST_R10])
+#define REGS_R11(r) ((r)[HOST_R11])
+#define REGS_R12(r) ((r)[HOST_R12])
+#define REGS_R13(r) ((r)[HOST_R13])
+#define REGS_R14(r) ((r)[HOST_R14])
+#define REGS_R15(r) ((r)[HOST_R15])
+#define REGS_CS(r) ((r)[HOST_CS])
+#define REGS_EFLAGS(r) ((r)[HOST_EFLAGS])
+#define REGS_SS(r) ((r)[HOST_SS])
+
+#define HOST_FS_BASE 21
+#define HOST_GS_BASE 22
+#define HOST_DS 23
+#define HOST_ES 24
+#define HOST_FS 25
+#define HOST_GS 26
+
+/* Also defined in asm/ptrace-x86_64.h, but not in libc headers. So, these
+ * are already defined for kernel code, but not for userspace code.
+ */
+#ifndef FS_BASE
+/* These aren't defined in ptrace.h, but exist in struct user_regs_struct,
+ * which is what x86_64 ptrace actually uses.
+ */
+#define FS_BASE (HOST_FS_BASE * sizeof(long))
+#define GS_BASE (HOST_GS_BASE * sizeof(long))
+#define DS (HOST_DS * sizeof(long))
+#define ES (HOST_ES * sizeof(long))
+#define FS (HOST_FS * sizeof(long))
+#define GS (HOST_GS * sizeof(long))
+#endif
+
+#define REGS_FS_BASE(r) ((r)[HOST_FS_BASE])
+#define REGS_GS_BASE(r) ((r)[HOST_GS_BASE])
+#define REGS_DS(r) ((r)[HOST_DS])
+#define REGS_ES(r) ((r)[HOST_ES])
+#define REGS_FS(r) ((r)[HOST_FS])
+#define REGS_GS(r) ((r)[HOST_GS])
+
+#define REGS_ORIG_RAX(r) ((r)[HOST_ORIG_RAX])
+
+#define REGS_SET_SYSCALL_RETURN(r, res) REGS_RAX(r) = (res)
+
+#define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r))
+
+#define REGS_SEGV_IS_FIXABLE(r) SEGV_IS_FIXABLE((r)->trap_type)
+
+#define REGS_FAULT_ADDR(r) ((r)->fault_addr)
+
+#define REGS_FAULT_WRITE(r) FAULT_WRITE((r)->fault_type)
+
+#define REGS_TRAP(r) ((r)->trap_type)
+
+#define REGS_ERR(r) ((r)->fault_type)
+
+struct uml_pt_regs {
+ unsigned long gp[MAX_REG_NR];
+ struct faultinfo faultinfo;
+ long syscall;
+ int is_user;
+};
+
+#define EMPTY_UML_PT_REGS { }
+
+#define UPT_RBX(r) REGS_RBX((r)->gp)
+#define UPT_RCX(r) REGS_RCX((r)->gp)
+#define UPT_RDX(r) REGS_RDX((r)->gp)
+#define UPT_RSI(r) REGS_RSI((r)->gp)
+#define UPT_RDI(r) REGS_RDI((r)->gp)
+#define UPT_RBP(r) REGS_RBP((r)->gp)
+#define UPT_RAX(r) REGS_RAX((r)->gp)
+#define UPT_R8(r) REGS_R8((r)->gp)
+#define UPT_R9(r) REGS_R9((r)->gp)
+#define UPT_R10(r) REGS_R10((r)->gp)
+#define UPT_R11(r) REGS_R11((r)->gp)
+#define UPT_R12(r) REGS_R12((r)->gp)
+#define UPT_R13(r) REGS_R13((r)->gp)
+#define UPT_R14(r) REGS_R14((r)->gp)
+#define UPT_R15(r) REGS_R15((r)->gp)
+#define UPT_CS(r) REGS_CS((r)->gp)
+#define UPT_FS_BASE(r) REGS_FS_BASE((r)->gp)
+#define UPT_FS(r) REGS_FS((r)->gp)
+#define UPT_GS_BASE(r) REGS_GS_BASE((r)->gp)
+#define UPT_GS(r) REGS_GS((r)->gp)
+#define UPT_DS(r) REGS_DS((r)->gp)
+#define UPT_ES(r) REGS_ES((r)->gp)
+#define UPT_CS(r) REGS_CS((r)->gp)
+#define UPT_SS(r) REGS_SS((r)->gp)
+#define UPT_ORIG_RAX(r) REGS_ORIG_RAX((r)->gp)
+
+#define UPT_IP(r) REGS_IP((r)->gp)
+#define UPT_SP(r) REGS_SP((r)->gp)
+
+#define UPT_EFLAGS(r) REGS_EFLAGS((r)->gp)
+#define UPT_SYSCALL_NR(r) ((r)->syscall)
+#define UPT_SYSCALL_RET(r) UPT_RAX(r)
+
+extern int user_context(unsigned long sp);
+
+#define UPT_IS_USER(r) ((r)->is_user)
+
+#define UPT_SYSCALL_ARG1(r) UPT_RDI(r)
+#define UPT_SYSCALL_ARG2(r) UPT_RSI(r)
+#define UPT_SYSCALL_ARG3(r) UPT_RDX(r)
+#define UPT_SYSCALL_ARG4(r) UPT_R10(r)
+#define UPT_SYSCALL_ARG5(r) UPT_R8(r)
+#define UPT_SYSCALL_ARG6(r) UPT_R9(r)
+
+struct syscall_args {
+ unsigned long args[6];
+};
+
+#define SYSCALL_ARGS(r) ((struct syscall_args) \
+ { .args = { UPT_SYSCALL_ARG1(r), \
+ UPT_SYSCALL_ARG2(r), \
+ UPT_SYSCALL_ARG3(r), \
+ UPT_SYSCALL_ARG4(r), \
+ UPT_SYSCALL_ARG5(r), \
+ UPT_SYSCALL_ARG6(r) } } )
+
+#define UPT_REG(regs, reg) \
+ ({ unsigned long val; \
+ switch(reg){ \
+ case R8: val = UPT_R8(regs); break; \
+ case R9: val = UPT_R9(regs); break; \
+ case R10: val = UPT_R10(regs); break; \
+ case R11: val = UPT_R11(regs); break; \
+ case R12: val = UPT_R12(regs); break; \
+ case R13: val = UPT_R13(regs); break; \
+ case R14: val = UPT_R14(regs); break; \
+ case R15: val = UPT_R15(regs); break; \
+ case RIP: val = UPT_IP(regs); break; \
+ case RSP: val = UPT_SP(regs); break; \
+ case RAX: val = UPT_RAX(regs); break; \
+ case RBX: val = UPT_RBX(regs); break; \
+ case RCX: val = UPT_RCX(regs); break; \
+ case RDX: val = UPT_RDX(regs); break; \
+ case RSI: val = UPT_RSI(regs); break; \
+ case RDI: val = UPT_RDI(regs); break; \
+ case RBP: val = UPT_RBP(regs); break; \
+ case ORIG_RAX: val = UPT_ORIG_RAX(regs); break; \
+ case CS: val = UPT_CS(regs); break; \
+ case SS: val = UPT_SS(regs); break; \
+ case FS_BASE: val = UPT_FS_BASE(regs); break; \
+ case GS_BASE: val = UPT_GS_BASE(regs); break; \
+ case DS: val = UPT_DS(regs); break; \
+ case ES: val = UPT_ES(regs); break; \
+ case FS : val = UPT_FS (regs); break; \
+ case GS: val = UPT_GS(regs); break; \
+ case EFLAGS: val = UPT_EFLAGS(regs); break; \
+ default : \
+ panic("Bad register in UPT_REG : %d\n", reg); \
+ val = -1; \
+ } \
+ val; \
+ })
+
+
+#define UPT_SET(regs, reg, val) \
+ ({ unsigned long __upt_val = val; \
+ switch(reg){ \
+ case R8: UPT_R8(regs) = __upt_val; break; \
+ case R9: UPT_R9(regs) = __upt_val; break; \
+ case R10: UPT_R10(regs) = __upt_val; break; \
+ case R11: UPT_R11(regs) = __upt_val; break; \
+ case R12: UPT_R12(regs) = __upt_val; break; \
+ case R13: UPT_R13(regs) = __upt_val; break; \
+ case R14: UPT_R14(regs) = __upt_val; break; \
+ case R15: UPT_R15(regs) = __upt_val; break; \
+ case RIP: UPT_IP(regs) = __upt_val; break; \
+ case RSP: UPT_SP(regs) = __upt_val; break; \
+ case RAX: UPT_RAX(regs) = __upt_val; break; \
+ case RBX: UPT_RBX(regs) = __upt_val; break; \
+ case RCX: UPT_RCX(regs) = __upt_val; break; \
+ case RDX: UPT_RDX(regs) = __upt_val; break; \
+ case RSI: UPT_RSI(regs) = __upt_val; break; \
+ case RDI: UPT_RDI(regs) = __upt_val; break; \
+ case RBP: UPT_RBP(regs) = __upt_val; break; \
+ case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
+ case CS: UPT_CS(regs) = __upt_val; break; \
+ case SS: UPT_SS(regs) = __upt_val; break; \
+ case FS_BASE: UPT_FS_BASE(regs) = __upt_val; break; \
+ case GS_BASE: UPT_GS_BASE(regs) = __upt_val; break; \
+ case DS: UPT_DS(regs) = __upt_val; break; \
+ case ES: UPT_ES(regs) = __upt_val; break; \
+ case FS: UPT_FS(regs) = __upt_val; break; \
+ case GS: UPT_GS(regs) = __upt_val; break; \
+ case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
+ default : \
+ panic("Bad register in UPT_SET : %d\n", reg); \
+ break; \
+ } \
+ __upt_val; \
+ })
+
+#define UPT_SET_SYSCALL_RETURN(r, res) \
+ REGS_SET_SYSCALL_RETURN((r)->regs, (res))
+
+#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
+
+#define UPT_SEGV_IS_FIXABLE(r) REGS_SEGV_IS_FIXABLE(&r->skas)
+
+#define UPT_FAULTINFO(r) (&(r)->faultinfo)
+
+static inline void arch_init_registers(int pid)
+{
+}
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/ptrace_user.h b/arch/um/sys-x86_64/sysdep/ptrace_user.h
new file mode 100644
index 000000000000..4dbccdb58f48
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/ptrace_user.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ *
+ * Licensed under the GPL
+ */
+
+#ifndef __SYSDEP_X86_64_PTRACE_USER_H__
+#define __SYSDEP_X86_64_PTRACE_USER_H__
+
+#define __FRAME_OFFSETS
+#include <sys/ptrace.h>
+#include <linux/ptrace.h>
+#include <asm/ptrace.h>
+#undef __FRAME_OFFSETS
+#include "user_constants.h"
+
+#define PT_INDEX(off) ((off) / sizeof(unsigned long))
+
+#define PT_SYSCALL_NR(regs) ((regs)[PT_INDEX(ORIG_RAX)])
+#define PT_SYSCALL_NR_OFFSET (ORIG_RAX)
+
+#define PT_SYSCALL_ARG1(regs) (((unsigned long *) (regs))[PT_INDEX(RDI)])
+#define PT_SYSCALL_ARG1_OFFSET (RDI)
+
+#define PT_SYSCALL_ARG2(regs) (((unsigned long *) (regs))[PT_INDEX(RSI)])
+#define PT_SYSCALL_ARG2_OFFSET (RSI)
+
+#define PT_SYSCALL_ARG3(regs) (((unsigned long *) (regs))[PT_INDEX(RDX)])
+#define PT_SYSCALL_ARG3_OFFSET (RDX)
+
+#define PT_SYSCALL_ARG4(regs) (((unsigned long *) (regs))[PT_INDEX(RCX)])
+#define PT_SYSCALL_ARG4_OFFSET (RCX)
+
+#define PT_SYSCALL_ARG5(regs) (((unsigned long *) (regs))[PT_INDEX(R8)])
+#define PT_SYSCALL_ARG5_OFFSET (R8)
+
+#define PT_SYSCALL_ARG6(regs) (((unsigned long *) (regs))[PT_INDEX(R9)])
+#define PT_SYSCALL_ARG6_OFFSET (R9)
+
+#define PT_SYSCALL_RET_OFFSET (RAX)
+
+#define PT_IP_OFFSET (RIP)
+#define PT_IP(regs) ((regs)[PT_INDEX(RIP)])
+
+#define PT_SP_OFFSET (RSP)
+#define PT_SP(regs) ((regs)[PT_INDEX(RSP)])
+
+#define PT_ORIG_RAX_OFFSET (ORIG_RAX)
+#define PT_ORIG_RAX(regs) ((regs)[PT_INDEX(ORIG_RAX)])
+
+/*
+ * x86_64 FC3 doesn't define this in /usr/include/linux/ptrace.h even though
+ * it's defined in the kernel's include/linux/ptrace.h. Additionally, use the
+ * 2.4 name and value for 2.4 host compatibility.
+ */
+#ifndef PTRACE_OLDSETOPTIONS
+#define PTRACE_OLDSETOPTIONS 21
+#endif
+
+/*
+ * These are before the system call, so the system call number is RAX
+ * rather than ORIG_RAX, and arg4 is R10 rather than RCX
+ */
+#define REGS_SYSCALL_NR PT_INDEX(RAX)
+#define REGS_SYSCALL_ARG1 PT_INDEX(RDI)
+#define REGS_SYSCALL_ARG2 PT_INDEX(RSI)
+#define REGS_SYSCALL_ARG3 PT_INDEX(RDX)
+#define REGS_SYSCALL_ARG4 PT_INDEX(R10)
+#define REGS_SYSCALL_ARG5 PT_INDEX(R8)
+#define REGS_SYSCALL_ARG6 PT_INDEX(R9)
+
+#define REGS_IP_INDEX PT_INDEX(RIP)
+#define REGS_SP_INDEX PT_INDEX(RSP)
+
+#define FP_SIZE (HOST_FP_SIZE)
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/sc.h b/arch/um/sys-x86_64/sysdep/sc.h
new file mode 100644
index 000000000000..8aee45b07434
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/sc.h
@@ -0,0 +1,45 @@
+#ifndef __SYSDEP_X86_64_SC_H
+#define __SYSDEP_X86_64_SC_H
+
+/* Copyright (C) 2003 - 2004 PathScale, Inc
+ * Released under the GPL
+ */
+
+#include <user_constants.h>
+
+#define SC_OFFSET(sc, field) \
+ *((unsigned long *) &(((char *) (sc))[HOST_##field]))
+
+#define SC_RBX(sc) SC_OFFSET(sc, SC_RBX)
+#define SC_RCX(sc) SC_OFFSET(sc, SC_RCX)
+#define SC_RDX(sc) SC_OFFSET(sc, SC_RDX)
+#define SC_RSI(sc) SC_OFFSET(sc, SC_RSI)
+#define SC_RDI(sc) SC_OFFSET(sc, SC_RDI)
+#define SC_RBP(sc) SC_OFFSET(sc, SC_RBP)
+#define SC_RAX(sc) SC_OFFSET(sc, SC_RAX)
+#define SC_R8(sc) SC_OFFSET(sc, SC_R8)
+#define SC_R9(sc) SC_OFFSET(sc, SC_R9)
+#define SC_R10(sc) SC_OFFSET(sc, SC_R10)
+#define SC_R11(sc) SC_OFFSET(sc, SC_R11)
+#define SC_R12(sc) SC_OFFSET(sc, SC_R12)
+#define SC_R13(sc) SC_OFFSET(sc, SC_R13)
+#define SC_R14(sc) SC_OFFSET(sc, SC_R14)
+#define SC_R15(sc) SC_OFFSET(sc, SC_R15)
+#define SC_IP(sc) SC_OFFSET(sc, SC_IP)
+#define SC_SP(sc) SC_OFFSET(sc, SC_SP)
+#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2)
+#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR)
+#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO)
+#define SC_CS(sc) SC_OFFSET(sc, SC_CS)
+#define SC_FS(sc) SC_OFFSET(sc, SC_FS)
+#define SC_GS(sc) SC_OFFSET(sc, SC_GS)
+#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS)
+#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK)
+#define SC_SS(sc) SC_OFFSET(sc, SC_SS)
+#if 0
+#define SC_ORIG_RAX(sc) SC_OFFSET(sc, SC_ORIG_RAX)
+#define SC_DS(sc) SC_OFFSET(sc, SC_DS)
+#define SC_ES(sc) SC_OFFSET(sc, SC_ES)
+#endif
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/sigcontext.h b/arch/um/sys-x86_64/sysdep/sigcontext.h
new file mode 100644
index 000000000000..0155133b1458
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/sigcontext.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ *
+ * Licensed under the GPL
+ */
+
+#ifndef __SYSDEP_X86_64_SIGCONTEXT_H
+#define __SYSDEP_X86_64_SIGCONTEXT_H
+
+#include <sysdep/sc.h>
+
+#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
+
+#define GET_FAULTINFO_FROM_SC(fi, sc) \
+ { \
+ (fi).cr2 = SC_CR2(sc); \
+ (fi).error_code = SC_ERR(sc); \
+ (fi).trap_no = SC_TRAPNO(sc); \
+ }
+
+/* This is Page Fault */
+#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14)
+
+/* No broken SKAS API, which doesn't pass trap_no, here. */
+#define SEGV_MAYBE_FIXABLE(fi) 0
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/skas_ptrace.h b/arch/um/sys-x86_64/sysdep/skas_ptrace.h
new file mode 100644
index 000000000000..95db4be786e4
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/skas_ptrace.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __SYSDEP_X86_64_SKAS_PTRACE_H
+#define __SYSDEP_X86_64_SKAS_PTRACE_H
+
+struct ptrace_faultinfo {
+ int is_write;
+ unsigned long addr;
+};
+
+struct ptrace_ldt {
+ int func;
+ void *ptr;
+ unsigned long bytecount;
+};
+
+#define PTRACE_LDT 54
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/stub.h b/arch/um/sys-x86_64/sysdep/stub.h
new file mode 100644
index 000000000000..655f9c2de3ac
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/stub.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
+ * Licensed under the GPL
+ */
+
+#ifndef __SYSDEP_STUB_H
+#define __SYSDEP_STUB_H
+
+#include <sys/mman.h>
+#include <asm/unistd.h>
+#include <sysdep/ptrace_user.h>
+#include "as-layout.h"
+#include "stub-data.h"
+#include "kern_constants.h"
+#include "uml-config.h"
+
+extern void stub_segv_handler(int sig);
+extern void stub_clone_handler(void);
+
+#define STUB_SYSCALL_RET PT_INDEX(RAX)
+#define STUB_MMAP_NR __NR_mmap
+#define MMAP_OFFSET(o) (o)
+
+#define __syscall_clobber "r11","rcx","memory"
+#define __syscall "syscall"
+
+static inline long stub_syscall0(long syscall)
+{
+ long ret;
+
+ __asm__ volatile (__syscall
+ : "=a" (ret)
+ : "0" (syscall) : __syscall_clobber );
+
+ return ret;
+}
+
+static inline long stub_syscall2(long syscall, long arg1, long arg2)
+{
+ long ret;
+
+ __asm__ volatile (__syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
+
+ return ret;
+}
+
+static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
+{
+ long ret;
+
+ __asm__ volatile (__syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
+ : __syscall_clobber );
+
+ return ret;
+}
+
+static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
+ long arg4)
+{
+ long ret;
+
+ __asm__ volatile ("movq %5,%%r10 ; " __syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
+ "g" (arg4)
+ : __syscall_clobber, "r10" );
+
+ return ret;
+}
+
+static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
+ long arg4, long arg5)
+{
+ long ret;
+
+ __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
+ "g" (arg4), "g" (arg5)
+ : __syscall_clobber, "r10", "r8" );
+
+ return ret;
+}
+
+static inline void trap_myself(void)
+{
+ __asm("int3");
+}
+
+static inline void remap_stack(long fd, unsigned long offset)
+{
+ __asm__ volatile ("movq %4,%%r10 ; movq %5,%%r8 ; "
+ "movq %6, %%r9; " __syscall "; movq %7, %%rbx ; "
+ "movq %%rax, (%%rbx)":
+ : "a" (STUB_MMAP_NR), "D" (STUB_DATA),
+ "S" (UM_KERN_PAGE_SIZE),
+ "d" (PROT_READ | PROT_WRITE),
+ "g" (MAP_FIXED | MAP_SHARED), "g" (fd),
+ "g" (offset),
+ "i" (&((struct stub_data *) STUB_DATA)->err)
+ : __syscall_clobber, "r10", "r8", "r9" );
+}
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/syscalls.h b/arch/um/sys-x86_64/sysdep/syscalls.h
new file mode 100644
index 000000000000..7cfb0b085655
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/syscalls.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2003 PathScale, Inc.
+ *
+ * Licensed under the GPL
+ */
+
+#ifndef __SYSDEP_X86_64_SYSCALLS_H__
+#define __SYSDEP_X86_64_SYSCALLS_H__
+
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <kern_constants.h>
+
+typedef long syscall_handler_t(void);
+
+extern syscall_handler_t *sys_call_table[];
+
+#define EXECUTE_SYSCALL(syscall, regs) \
+ (((long (*)(long, long, long, long, long, long)) \
+ (*sys_call_table[syscall]))(UPT_SYSCALL_ARG1(&regs->regs), \
+ UPT_SYSCALL_ARG2(&regs->regs), \
+ UPT_SYSCALL_ARG3(&regs->regs), \
+ UPT_SYSCALL_ARG4(&regs->regs), \
+ UPT_SYSCALL_ARG5(&regs->regs), \
+ UPT_SYSCALL_ARG6(&regs->regs)))
+
+extern long old_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+extern syscall_handler_t sys_modify_ldt;
+extern syscall_handler_t sys_arch_prctl;
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/system.h b/arch/um/sys-x86_64/sysdep/system.h
new file mode 100644
index 000000000000..d1b93c436200
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/system.h
@@ -0,0 +1,132 @@
+#ifndef _ASM_X86_SYSTEM_H_
+#define _ASM_X86_SYSTEM_H_
+
+#include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
+#include <asm/nops.h>
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+/* entries in ARCH_DLINFO: */
+#ifdef CONFIG_IA32_EMULATION
+# define AT_VECTOR_SIZE_ARCH 2
+#else
+# define AT_VECTOR_SIZE_ARCH 1
+#endif
+
+extern unsigned long arch_align_stack(unsigned long sp);
+
+void default_idle(void);
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#ifdef CONFIG_X86_32
+/*
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define mb() asm volatile("mfence":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+#define wmb() asm volatile("sfence" ::: "memory")
+#endif
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like this where there are no data dependencies.
+ **/
+
+#define read_barrier_depends() do { } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#ifdef CONFIG_X86_PPRO_FENCE
+# define smp_rmb() rmb()
+#else
+# define smp_rmb() barrier()
+#endif
+#ifdef CONFIG_X86_OOSTORE
+# define smp_wmb() wmb()
+#else
+# define smp_wmb() barrier()
+#endif
+#define smp_read_barrier_depends() read_barrier_depends()
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static inline void rdtsc_barrier(void)
+{
+ alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+ alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
+
+#endif
diff --git a/arch/um/sys-x86_64/sysdep/tls.h b/arch/um/sys-x86_64/sysdep/tls.h
new file mode 100644
index 000000000000..18c000d0357a
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/tls.h
@@ -0,0 +1,29 @@
+#ifndef _SYSDEP_TLS_H
+#define _SYSDEP_TLS_H
+
+# ifndef __KERNEL__
+
+/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
+ * may be named user_desc (but in 2.4 and in header matching its API was named
+ * modify_ldt_ldt_s). */
+
+typedef struct um_dup_user_desc {
+ unsigned int entry_number;
+ unsigned int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+ unsigned int lm:1;
+} user_desc_t;
+
+# else /* __KERNEL__ */
+
+# include <ldt.h>
+typedef struct user_desc user_desc_t;
+
+# endif /* __KERNEL__ */
+#endif /* _SYSDEP_TLS_H */
diff --git a/arch/um/sys-x86_64/sysdep/vm-flags.h b/arch/um/sys-x86_64/sysdep/vm-flags.h
new file mode 100644
index 000000000000..3213edfa7888
--- /dev/null
+++ b/arch/um/sys-x86_64/sysdep/vm-flags.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
+ * Copyright 2003 PathScale, Inc.
+ * Licensed under the GPL
+ */
+
+#ifndef __VM_FLAGS_X86_64_H
+#define __VM_FLAGS_X86_64_H
+
+#define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | \
+ VM_EXEC | VM_MAYREAD | VM_MAYWRITE | \
+ VM_MAYEXEC)
+
+extern unsigned long vm_stack_flags, vm_stack_flags32;
+extern unsigned long vm_data_default_flags, vm_data_default_flags32;
+extern unsigned long vm_force_exec32;
+
+#ifdef TIF_IA32
+#define VM_DATA_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_IA32) ? vm_data_default_flags32 : \
+ vm_data_default_flags)
+
+#define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags)
+#endif
+
+#define VM_DATA_DEFAULT_FLAGS vm_data_default_flags
+
+#define VM_STACK_DEFAULT_FLAGS vm_stack_flags
+
+#endif