summaryrefslogtreecommitdiff
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig31
-rw-r--r--arch/sparc64/Kconfig.debug2
-rw-r--r--arch/sparc64/defconfig213
-rw-r--r--arch/sparc64/kernel/Makefile7
-rw-r--r--arch/sparc64/kernel/auxio.c2
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c4
-rw-r--r--arch/sparc64/kernel/cpu.c36
-rw-r--r--arch/sparc64/kernel/ebus.c12
-rw-r--r--arch/sparc64/kernel/head.S167
-rw-r--r--arch/sparc64/kernel/init_task.c1
-rw-r--r--arch/sparc64/kernel/irq.c249
-rw-r--r--arch/sparc64/kernel/isa.c7
-rw-r--r--arch/sparc64/kernel/mdesc.c860
-rw-r--r--arch/sparc64/kernel/of_device.c243
-rw-r--r--arch/sparc64/kernel/pci.c77
-rw-r--r--arch/sparc64/kernel/pci_common.c4
-rw-r--r--arch/sparc64/kernel/pci_fire.c24
-rw-r--r--arch/sparc64/kernel/pci_iommu.c823
-rw-r--r--arch/sparc64/kernel/pci_psycho.c32
-rw-r--r--arch/sparc64/kernel/pci_sabre.c35
-rw-r--r--arch/sparc64/kernel/pci_schizo.c42
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c190
-rw-r--r--arch/sparc64/kernel/power.c72
-rw-r--r--arch/sparc64/kernel/process.c28
-rw-r--r--arch/sparc64/kernel/prom.c255
-rw-r--r--arch/sparc64/kernel/sbus.c568
-rw-r--r--arch/sparc64/kernel/setup.c75
-rw-r--r--arch/sparc64/kernel/signal.c15
-rw-r--r--arch/sparc64/kernel/smp.c283
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c19
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c12
-rw-r--r--arch/sparc64/kernel/sysfs.c2
-rw-r--r--arch/sparc64/kernel/systbls.S11
-rw-r--r--arch/sparc64/kernel/time.c238
-rw-r--r--arch/sparc64/kernel/trampoline.S47
-rw-r--r--arch/sparc64/kernel/traps.c1
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S8
-rw-r--r--arch/sparc64/lib/Makefile8
-rw-r--r--arch/sparc64/lib/NGcopy_from_user.S8
-rw-r--r--arch/sparc64/lib/NGcopy_to_user.S8
-rw-r--r--arch/sparc64/lib/NGmemcpy.S371
-rw-r--r--arch/sparc64/lib/NGpage.S1
-rw-r--r--arch/sparc64/lib/delay.c46
-rw-r--r--arch/sparc64/mm/fault.c24
-rw-r--r--arch/sparc64/mm/hugetlbpage.c2
-rw-r--r--arch/sparc64/mm/tsb.c3
-rw-r--r--arch/sparc64/prom/console.c85
-rw-r--r--arch/sparc64/prom/misc.c33
-rw-r--r--arch/sparc64/prom/p1275.c1
-rw-r--r--arch/sparc64/prom/tree.c21
-rw-r--r--arch/sparc64/solaris/socksys.c3
51 files changed, 2138 insertions, 3171 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 89a1b469b93d..33dabf588bdd 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -23,6 +23,10 @@ config GENERIC_TIME
bool
default y
+config GENERIC_CMOS_UPDATE
+ bool
+ default y
+
config GENERIC_CLOCKEVENTS
bool
default y
@@ -62,6 +66,12 @@ config AUDIT_ARCH
bool
default y
+config ARCH_NO_VIRT_TO_BUS
+ def_bool y
+
+config OF
+ def_bool y
+
choice
prompt "Kernel page size"
default SPARC64_PAGE_SIZE_8KB
@@ -108,6 +118,15 @@ config SECCOMP
source kernel/Kconfig.hz
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
+ select HOTPLUG
+ ---help---
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu/cpu#.
+ Say N if you want to disable CPU hotplug.
+
source "init/Kconfig"
config SYSVIPC_COMPAT
@@ -305,6 +324,12 @@ config SUN_IO
bool
default y
+config SUN_LDOMS
+ bool "Sun Logical Domains support"
+ help
+ Say Y here is you want to support virtual devices via
+ Logical Domains.
+
config PCI
bool "PCI support"
select ARCH_SUPPORTS_MSI
@@ -320,8 +345,10 @@ config PCI
doesn't.
config PCI_DOMAINS
- bool
- default PCI
+ def_bool PCI
+
+config PCI_SYSCALL
+ def_bool PCI
source "drivers/pci/Kconfig"
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index 1f130f3b6c24..a5faa3683bd6 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -29,7 +29,7 @@ config DEBUG_BOOTMEM
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
- depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
+ depends on DEBUG_KERNEL && !HIBERNATION
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 65840a62bb9c..7d07297db878 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,11 +1,12 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22-rc1
-# Mon May 14 04:17:48 2007
+# Linux kernel version: 2.6.23-rc6
+# Sun Sep 16 09:52:11 2007
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_64BIT=y
CONFIG_MMU=y
@@ -16,6 +17,8 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_AUDIT_ARCH=y
+CONFIG_ARCH_NO_VIRT_TO_BUS=y
+CONFIG_OF=y
CONFIG_SPARC64_PAGE_SIZE_8KB=y
# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
# CONFIG_SPARC64_PAGE_SIZE_512KB is not set
@@ -29,25 +32,20 @@ CONFIG_HZ=100
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
-# Code maturity level options
+# General setup
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=18
@@ -82,22 +80,15 @@ CONFIG_SLUB=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
-
-#
-# Block layer
-#
CONFIG_BLOCK=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_BSG=y
#
# IO Schedulers
@@ -160,8 +151,10 @@ CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
CONFIG_SUN_AUXIO=y
CONFIG_SUN_IO=y
+# CONFIG_SUN_LDOMS is not set
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
CONFIG_ARCH_SUPPORTS_MSI=y
CONFIG_PCI_MSI=y
# CONFIG_PCI_DEBUG is not set
@@ -246,10 +239,6 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_IPV6_MULTIPLE_TABLES is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
-
-#
-# DCCP Configuration (EXPERIMENTAL)
-#
CONFIG_IP_DCCP=m
CONFIG_INET_DCCP_DIAG=m
CONFIG_IP_DCCP_ACKVEC=y
@@ -269,15 +258,7 @@ CONFIG_IP_DCCP_CCID3_RTO=100
#
# CONFIG_IP_DCCP_DEBUG is not set
# CONFIG_NET_DCCPPROBE is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
# CONFIG_IP_SCTP is not set
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -314,6 +295,7 @@ CONFIG_NET_TCPPROBE=m
# CONFIG_MAC80211 is not set
# CONFIG_IEEE80211 is not set
# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -328,28 +310,12 @@ CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
CONFIG_CONNECTOR=m
# CONFIG_MTD is not set
-
-#
-# Parallel port support
-#
+CONFIG_OF_DEVICE=y
# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
# CONFIG_BLK_DEV_UMEM is not set
@@ -364,18 +330,11 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
CONFIG_CDROM_PKTCDVD_WCACHE=y
CONFIG_ATA_OVER_ETH=m
-
-#
-# Misc devices
-#
+CONFIG_MISC_DEVICES=y
# CONFIG_PHANTOM is not set
+# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
-# CONFIG_BLINK is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
CONFIG_IDE=y
CONFIG_BLK_DEV_IDE=y
@@ -440,6 +399,7 @@ CONFIG_BLK_DEV_IDEDMA=y
#
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
CONFIG_SCSI_NETLINK=y
CONFIG_SCSI_PROC_FS=y
@@ -472,10 +432,7 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m
# CONFIG_SCSI_SAS_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
-
-#
-# SCSI low-level drivers
-#
+CONFIG_SCSI_LOWLEVEL=y
CONFIG_ISCSI_TCP=m
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
@@ -505,14 +462,9 @@ CONFIG_ISCSI_TCP=m
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_ESP_CORE is not set
# CONFIG_SCSI_SUNESP is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_ATA is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
@@ -545,30 +497,16 @@ CONFIG_DM_ZERO=m
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
# CONFIG_I2O is not set
-
-#
-# Network device support
-#
CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
-
-#
-# ARCnet devices
-#
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
CONFIG_NET_ETHERNET=y
CONFIG_MII=m
# CONFIG_SUNLANCE is not set
@@ -578,10 +516,6 @@ CONFIG_MII=m
# CONFIG_SUNGEM is not set
CONFIG_CASSINI=m
# CONFIG_NET_VENDOR_3COM is not set
-
-#
-# Tulip family network device support
-#
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
CONFIG_NET_PCI=y
@@ -631,11 +565,6 @@ CONFIG_NETDEV_10000=y
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
# CONFIG_MLX4_CORE is not set
-CONFIG_MLX4_DEBUG=y
-
-#
-# Token Ring devices
-#
# CONFIG_TR is not set
#
@@ -665,6 +594,7 @@ CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
# CONFIG_SLIP is not set
CONFIG_SLHC=m
# CONFIG_NET_FC is not set
@@ -672,15 +602,7 @@ CONFIG_SLHC=m
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
# CONFIG_PHONE is not set
#
@@ -688,6 +610,7 @@ CONFIG_SLHC=m
#
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
#
# Userland interfaces
@@ -733,7 +656,6 @@ CONFIG_INPUT_SPARCSPKR=y
# CONFIG_INPUT_POWERMATE is not set
# CONFIG_INPUT_YEALINK is not set
# CONFIG_INPUT_UINPUT is not set
-# CONFIG_INPUT_POLLDEV is not set
#
# Hardware I/O ports
@@ -773,22 +695,13 @@ CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set
-
-#
-# IPMI
-#
# CONFIG_IPMI_HANDLER is not set
# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_RTC=y
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
CONFIG_I2C=y
@@ -822,6 +735,7 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_I2C_SIS5595 is not set
# CONFIG_I2C_SIS630 is not set
# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_TAOS_EVM is not set
# CONFIG_I2C_STUB is not set
# CONFIG_I2C_TINY_USB is not set
# CONFIG_I2C_VIA is not set
@@ -833,11 +747,13 @@ CONFIG_I2C_ALGOBIT=y
#
# CONFIG_SENSORS_DS1337 is not set
# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_DS1682 is not set
# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -848,14 +764,12 @@ CONFIG_I2C_ALGOBIT=y
#
# CONFIG_SPI is not set
# CONFIG_SPI_MASTER is not set
-
-#
-# Dallas's 1-wire bus
-#
# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
# CONFIG_SENSORS_AD7418 is not set
# CONFIG_SENSORS_ADM1021 is not set
# CONFIG_SENSORS_ADM1025 is not set
@@ -882,14 +796,17 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_LM87 is not set
# CONFIG_SENSORS_LM90 is not set
# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_MAX6650 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47M192 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_THMC50 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_VT8231 is not set
@@ -924,6 +841,7 @@ CONFIG_HWMON=y
#
# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
CONFIG_FB_DDC=y
@@ -949,6 +867,8 @@ CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_SBUS is not set
+# CONFIG_FB_XVR500 is not set
+# CONFIG_FB_XVR2500 is not set
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
@@ -970,9 +890,6 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_FB_TRIDENT is not set
# CONFIG_FB_ARK is not set
# CONFIG_FB_PM3 is not set
-# CONFIG_FB_XVR500 is not set
-# CONFIG_FB_XVR2500 is not set
-# CONFIG_FB_PCI is not set
# CONFIG_FB_VIRTUAL is not set
#
@@ -981,6 +898,7 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_PROM_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
CONFIG_FONTS=y
# CONFIG_FONT_8x8 is not set
@@ -1017,7 +935,6 @@ CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
CONFIG_SND_SEQUENCER_OSS=y
-# CONFIG_SND_RTCTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
@@ -1114,14 +1031,15 @@ CONFIG_SND_SUN_CS4231=m
# CONFIG_SND_SOC is not set
#
-# Open Sound System
+# SoC Audio support for SuperH
#
-# CONFIG_SOUND_PRIME is not set
-CONFIG_AC97_BUS=m
#
-# HID Devices
+# Open Sound System
#
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HID_DEBUG is not set
@@ -1132,10 +1050,7 @@ CONFIG_USB_HID=y
# CONFIG_USB_HIDINPUT_POWERBOOK is not set
# CONFIG_HID_FF is not set
CONFIG_USB_HIDDEV=y
-
-#
-# USB support
-#
+CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1157,7 +1072,6 @@ CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1165,6 +1079,7 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=m
# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
#
# USB Device Class drivers
@@ -1243,32 +1158,8 @@ CONFIG_USB_STORAGE=m
#
# CONFIG_USB_GADGET is not set
# CONFIG_MMC is not set
-
-#
-# LED devices
-#
# CONFIG_NEW_LEDS is not set
-
-#
-# LED drivers
-#
-
-#
-# LED Triggers
-#
-
-#
-# InfiniBand support
-#
# CONFIG_INFINIBAND is not set
-
-#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
-
-#
-# Real Time Clock
-#
# CONFIG_RTC_CLASS is not set
#
@@ -1285,10 +1176,14 @@ CONFIG_USB_STORAGE=m
#
#
+# Userspace I/O
+#
+# CONFIG_UIO is not set
+
+#
# Misc Linux/SPARC drivers
#
CONFIG_SUN_OPENPROMIO=m
-CONFIG_SUN_MOSTEK_RTC=y
# CONFIG_OBP_FLASH is not set
# CONFIG_SUN_BPP is not set
# CONFIG_BBC_I2C is not set
@@ -1387,7 +1282,6 @@ CONFIG_RAMFS=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
#
# Partition Types
@@ -1465,14 +1359,17 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
# CONFIG_TIMER_STATS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_LOCK_ALLOC is not set
# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
@@ -1496,10 +1393,10 @@ CONFIG_FORCED_INLINING=y
CONFIG_KEYS=y
# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_BLKCIPHER=y
@@ -1539,10 +1436,7 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_TEST=m
-
-#
-# Hardware crypto devices
-#
+CONFIG_CRYPTO_HW=y
#
# Library routines
@@ -1552,6 +1446,7 @@ CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index f964bf28d21a..40d2f3aae91e 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -8,17 +8,17 @@ EXTRA_CFLAGS := -Werror
extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o setup.o cpu.o idprom.o \
- traps.o auxio.o una_asm.o sysfs.o \
+ traps.o auxio.o una_asm.o sysfs.o iommu.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
-obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
+obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \
pci_psycho.o pci_sabre.o pci_schizo.o \
pci_sun4v.o pci_sun4v_asm.o pci_fire.o
-obj-$(CONFIG_SMP) += smp.o trampoline.o
+obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o
obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
obj-$(CONFIG_BINFMT_AOUT32) += binfmt_aout32.o
@@ -26,6 +26,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o
obj-$(CONFIG_AUDIT) += audit.o
obj-$(CONFIG_AUDIT)$(CONFIG_SPARC32_COMPAT) += compat_audit.o
obj-y += $(obj-yy)
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index 826118ee53d5..7b379761e9f8 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -155,7 +155,7 @@ static struct of_platform_driver auxio_driver = {
static int __init auxio_init(void)
{
- return of_register_driver(&auxio_driver, &of_bus_type);
+ return of_register_driver(&auxio_driver, &of_platform_bus_type);
}
/* Must be after subsys_initcall() so that busses are probed. Must
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
index f205fc7cbcd0..d208cc7804f2 100644
--- a/arch/sparc64/kernel/binfmt_aout32.c
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -177,7 +177,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr
get_user(c,p++);
} while (c);
}
- put_user(NULL,argv);
+ put_user(0,argv);
current->mm->arg_end = current->mm->env_start = (unsigned long) p;
while (envc-->0) {
char c;
@@ -186,7 +186,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr
get_user(c,p++);
} while (c);
}
- put_user(NULL,envp);
+ put_user(0,envp);
current->mm->env_end = (unsigned long) p;
return sp;
}
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 7eb81d3954d9..e43db73f2b91 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -1,7 +1,7 @@
/* cpu.c: Dinky routines to look for the kind of Sparc cpu
* we are on.
*
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
@@ -13,6 +13,7 @@
#include <asm/fpumacro.h>
#include <asm/cpudata.h>
#include <asm/spitfire.h>
+#include <asm/oplib.h>
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
@@ -61,21 +62,40 @@ struct cpu_iu_info linux_sparc_chips[] = {
#define NSPARCCHIPS ARRAY_SIZE(linux_sparc_chips)
-char *sparc_cpu_type = "cpu-oops";
-char *sparc_fpu_type = "fpu-oops";
+char *sparc_cpu_type;
+char *sparc_fpu_type;
unsigned int fsr_storage;
+static void __init sun4v_cpu_probe(void)
+{
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_NIAGARA1:
+ sparc_cpu_type = "UltraSparc T1 (Niagara)";
+ sparc_fpu_type = "UltraSparc T1 integrated FPU";
+ break;
+
+ case SUN4V_CHIP_NIAGARA2:
+ sparc_cpu_type = "UltraSparc T2 (Niagara2)";
+ sparc_fpu_type = "UltraSparc T2 integrated FPU";
+ break;
+
+ default:
+ printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
+ prom_cpu_compatible);
+ sparc_cpu_type = "Unknown SUN4V CPU";
+ sparc_fpu_type = "Unknown SUN4V FPU";
+ break;
+ }
+}
+
void __init cpu_probe(void)
{
unsigned long ver, fpu_vers, manuf, impl, fprs;
int i;
- if (tlb_type == hypervisor) {
- sparc_cpu_type = "UltraSparc T1 (Niagara)";
- sparc_fpu_type = "UltraSparc T1 integrated FPU";
- return;
- }
+ if (tlb_type == hypervisor)
+ return sun4v_cpu_probe();
fprs = fprs_read();
fprs_write(FPRS_FEF);
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index ad55a9bb50dd..04ab81cb4f48 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -362,6 +362,7 @@ static int __init child_regs_nonstandard(struct linux_ebus_device *dev)
static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev)
{
struct linux_ebus_child *child;
+ struct dev_archdata *sd;
struct of_device *op;
int i, len;
@@ -374,7 +375,10 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de
dev->num_addrs = 0;
dev->num_irqs = 0;
} else {
- (void) of_get_property(dp, "reg", &len);
+ const int *regs = of_get_property(dp, "reg", &len);
+
+ if (!regs)
+ len = 0;
dev->num_addrs = len / sizeof(struct linux_prom_registers);
for (i = 0; i < dev->num_addrs; i++)
@@ -387,6 +391,12 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de
dev->irqs[i] = op->irqs[i];
}
+ sd = &dev->ofdev.dev.archdata;
+ sd->prom_node = dp;
+ sd->op = &dev->ofdev;
+ sd->iommu = dev->bus->ofdev.dev.parent->archdata.iommu;
+ sd->stc = dev->bus->ofdev.dev.parent->archdata.stc;
+
dev->ofdev.node = dp;
dev->ofdev.dev.parent = &dev->bus->ofdev.dev;
dev->ofdev.dev.bus = &ebus_bus_type;
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 35feacb6b8ec..c4147ad8677b 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -1,15 +1,15 @@
-/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $
- * head.S: Initial boot code for the Sparc64 port of Linux.
+/* head.S: Initial boot code for the Sparc64 port of Linux.
*
- * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
- * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
*/
#include <linux/version.h>
#include <linux/errno.h>
#include <linux/threads.h>
+#include <linux/init.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
@@ -97,7 +97,8 @@ sparc64_boot:
.globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
.globl prom_boot_mapped_pc, prom_boot_mapping_mode
.globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
- .globl is_sun4v
+ .globl prom_compatible_name, prom_cpu_path, prom_cpu_compatible
+ .globl is_sun4v, sun4v_chip_type, prom_set_trap_table_name
prom_peer_name:
.asciz "peer"
prom_compatible_name:
@@ -106,6 +107,8 @@ prom_finddev_name:
.asciz "finddevice"
prom_chosen_path:
.asciz "/chosen"
+prom_cpu_path:
+ .asciz "/cpu"
prom_getprop_name:
.asciz "getprop"
prom_mmu_name:
@@ -118,11 +121,17 @@ prom_map_name:
.asciz "map"
prom_unmap_name:
.asciz "unmap"
+prom_set_trap_table_name:
+ .asciz "SUNW,set-trap-table"
prom_sun4v_name:
.asciz "sun4v"
+prom_niagara_prefix:
+ .asciz "SUNW,UltraSPARC-T"
.align 4
prom_root_compatible:
.skip 64
+prom_cpu_compatible:
+ .skip 64
prom_root_node:
.word 0
prom_mmu_ihandle_cache:
@@ -138,6 +147,8 @@ prom_boot_mapping_phys_low:
.xword 0
is_sun4v:
.word 0
+sun4v_chip_type:
+ .word SUN4V_CHIP_INVALID
1:
rd %pc, %l0
@@ -296,13 +307,13 @@ is_sun4v:
sethi %hi(prom_sun4v_name), %g7
or %g7, %lo(prom_sun4v_name), %g7
mov 5, %g3
-1: ldub [%g7], %g2
+90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
- bne,pn %icc, 2f
+ bne,pn %icc, 80f
add %g7, 1, %g7
subcc %g3, 1, %g3
- bne,pt %xcc, 1b
+ bne,pt %xcc, 90b
add %g1, 1, %g1
sethi %hi(is_sun4v), %g1
@@ -310,7 +321,80 @@ is_sun4v:
mov 1, %g7
stw %g7, [%g1]
-2:
+ /* cpu_node = prom_finddevice("/cpu") */
+ mov (1b - prom_finddev_name), %l1
+ mov (1b - prom_cpu_path), %l2
+ sub %l0, %l1, %l1
+ sub %l0, %l2, %l2
+ sub %sp, (192 + 128), %sp
+
+ stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
+ mov 1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
+ stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/cpu"
+ stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ ldx [%sp + 2047 + 128 + 0x20], %l4 ! cpu device node
+
+ mov (1b - prom_getprop_name), %l1
+ mov (1b - prom_compatible_name), %l2
+ mov (1b - prom_cpu_compatible), %l5
+ sub %l0, %l1, %l1
+ sub %l0, %l2, %l2
+ sub %l0, %l5, %l5
+
+ /* prom_getproperty(cpu_node, "compatible",
+ * &prom_cpu_compatible, 64)
+ */
+ stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
+ mov 4, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
+ mov 1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
+ stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, cpu_node
+ stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible"
+ stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_cpu_compatible
+ mov 64, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size
+ stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ add %sp, (192 + 128), %sp
+
+ sethi %hi(prom_cpu_compatible), %g1
+ or %g1, %lo(prom_cpu_compatible), %g1
+ sethi %hi(prom_niagara_prefix), %g7
+ or %g7, %lo(prom_niagara_prefix), %g7
+ mov 17, %g3
+90: ldub [%g7], %g2
+ ldub [%g1], %g4
+ cmp %g2, %g4
+ bne,pn %icc, 4f
+ add %g7, 1, %g7
+ subcc %g3, 1, %g3
+ bne,pt %xcc, 90b
+ add %g1, 1, %g1
+
+ sethi %hi(prom_cpu_compatible), %g1
+ or %g1, %lo(prom_cpu_compatible), %g1
+ ldub [%g1 + 17], %g2
+ cmp %g2, '1'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_NIAGARA1, %g4
+ cmp %g2, '2'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_NIAGARA2, %g4
+4:
+ mov SUN4V_CHIP_UNKNOWN, %g4
+5: sethi %hi(sun4v_chip_type), %g2
+ or %g2, %lo(sun4v_chip_type), %g2
+ stw %g4, [%g2]
+
+80:
BRANCH_IF_SUN4V(g1, jump_to_sun4u_init)
BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
@@ -374,6 +458,7 @@ jump_to_sun4u_init:
jmpl %g2 + %g0, %g0
nop
+ .section .text.init.refok
sun4u_init:
BRANCH_IF_SUN4V(g1, sun4v_init)
@@ -413,6 +498,33 @@ niagara_tlb_fixup:
stw %g2, [%g1 + %lo(tlb_type)]
/* Patch copy/clear ops. */
+ sethi %hi(sun4v_chip_type), %g1
+ lduw [%g1 + %lo(sun4v_chip_type)], %g1
+ cmp %g1, SUN4V_CHIP_NIAGARA1
+ be,pt %xcc, niagara_patch
+ cmp %g1, SUN4V_CHIP_NIAGARA2
+ be,pt %xcc, niagara2_patch
+ nop
+
+ call generic_patch_copyops
+ nop
+ call generic_patch_bzero
+ nop
+ call generic_patch_pageops
+ nop
+
+ ba,a,pt %xcc, 80f
+niagara2_patch:
+ call niagara2_patch_copyops
+ nop
+ call niagara_patch_bzero
+ nop
+ call niagara2_patch_pageops
+ nop
+
+ ba,a,pt %xcc, 80f
+
+niagara_patch:
call niagara_patch_copyops
nop
call niagara_patch_bzero
@@ -420,6 +532,7 @@ niagara_tlb_fixup:
call niagara_patch_pageops
nop
+80:
/* Patch TLB/cache ops. */
call hypervisor_patch_cachetlbops
nop
@@ -529,6 +642,8 @@ tlb_fixup_done:
nop
/* Not reached... */
+ .previous
+
/* This is meant to allow the sharing of this code between
* boot processor invocation (via setup_tba() below) and
* secondary processor startup (via trampoline.S). The
@@ -578,15 +693,38 @@ setup_trap_table:
sethi %hi(kern_base), %g3
ldx [%g3 + %lo(kern_base)], %g3
add %g2, %g3, %o1
+ sethi %hi(sparc64_ttable_tl0), %o0
- call prom_set_trap_table_sun4v
- sethi %hi(sparc64_ttable_tl0), %o0
+ set prom_set_trap_table_name, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 2, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 0, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ stx %o0, [%sp + 2047 + 128 + 0x18]
+ stx %o1, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
ba,pt %xcc, 2f
nop
-1: call prom_set_trap_table
- sethi %hi(sparc64_ttable_tl0), %o0
+1: sethi %hi(sparc64_ttable_tl0), %o0
+ set prom_set_trap_table_name, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 0, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ stx %o0, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
/* Start using proper page size encodings in ctx register. */
2: sethi %hi(sparc64_kern_pri_context), %g3
@@ -602,12 +740,13 @@ setup_trap_table:
membar #Sync
+ BRANCH_IF_SUN4V(o2, 1f)
+
/* Kill PROM timer */
sethi %hi(0x80000000), %o2
sllx %o2, 32, %o2
wr %o2, 0, %tick_cmpr
- BRANCH_IF_SUN4V(o2, 1f)
BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
ba,pt %xcc, 2f
diff --git a/arch/sparc64/kernel/init_task.c b/arch/sparc64/kernel/init_task.c
index 329b38fa5c89..90007cf88bac 100644
--- a/arch/sparc64/kernel/init_task.c
+++ b/arch/sparc64/kernel/init_task.c
@@ -1,4 +1,5 @@
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 6b6165d36fd8..23956096b3bf 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -87,7 +87,11 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
*/
#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
-static unsigned int virt_to_real_irq_table[NR_IRQS];
+static struct {
+ unsigned int irq;
+ unsigned int dev_handle;
+ unsigned int dev_ino;
+} virt_to_real_irq_table[NR_IRQS];
static unsigned char virt_irq_alloc(unsigned int real_irq)
{
@@ -96,7 +100,7 @@ static unsigned char virt_irq_alloc(unsigned int real_irq)
BUILD_BUG_ON(NR_IRQS >= 256);
for (ent = 1; ent < NR_IRQS; ent++) {
- if (!virt_to_real_irq_table[ent])
+ if (!virt_to_real_irq_table[ent].irq)
break;
}
if (ent >= NR_IRQS) {
@@ -104,7 +108,7 @@ static unsigned char virt_irq_alloc(unsigned int real_irq)
return 0;
}
- virt_to_real_irq_table[ent] = real_irq;
+ virt_to_real_irq_table[ent].irq = real_irq;
return ent;
}
@@ -117,8 +121,8 @@ static void virt_irq_free(unsigned int virt_irq)
if (virt_irq >= NR_IRQS)
return;
- real_irq = virt_to_real_irq_table[virt_irq];
- virt_to_real_irq_table[virt_irq] = 0;
+ real_irq = virt_to_real_irq_table[virt_irq].irq;
+ virt_to_real_irq_table[virt_irq].irq = 0;
__bucket(real_irq)->virt_irq = 0;
}
@@ -126,7 +130,7 @@ static void virt_irq_free(unsigned int virt_irq)
static unsigned int virt_to_real_irq(unsigned char virt_irq)
{
- return virt_to_real_irq_table[virt_irq];
+ return virt_to_real_irq_table[virt_irq].irq;
}
/*
@@ -213,8 +217,27 @@ struct irq_handler_data {
void (*pre_handler)(unsigned int, void *, void *);
void *pre_handler_arg1;
void *pre_handler_arg2;
+
+ u32 msi;
};
+void sparc64_set_msi(unsigned int virt_irq, u32 msi)
+{
+ struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+ if (data)
+ data->msi = msi;
+}
+
+u32 sparc64_get_msi(unsigned int virt_irq)
+{
+ struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+
+ if (data)
+ return data->msi;
+ return 0xffffffff;
+}
+
static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
{
unsigned int real_irq = virt_to_real_irq(virt_irq);
@@ -293,13 +316,18 @@ static void sun4u_irq_enable(unsigned int virt_irq)
}
}
+static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
+{
+ sun4u_irq_enable(virt_irq);
+}
+
static void sun4u_irq_disable(unsigned int virt_irq)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
if (likely(data)) {
unsigned long imap = data->imap;
- u32 tmp = upa_readq(imap);
+ unsigned long tmp = upa_readq(imap);
tmp &= ~IMAP_VALID;
upa_writeq(tmp, imap);
@@ -309,6 +337,10 @@ static void sun4u_irq_disable(unsigned int virt_irq)
static void sun4u_irq_end(unsigned int virt_irq)
{
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+ struct irq_desc *desc = irq_desc + virt_irq;
+
+ if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ return;
if (likely(data))
upa_writeq(ICLR_IDLE, data->iclr);
@@ -327,19 +359,37 @@ static void sun4v_irq_enable(unsigned int virt_irq)
err = sun4v_intr_settarget(ino, cpuid);
if (err != HV_EOK)
- printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
- ino, cpuid, err);
+ printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
+ "err(%d)\n", ino, cpuid, err);
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
- printk("sun4v_intr_setstate(%x): "
+ printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
if (err != HV_EOK)
- printk("sun4v_intr_setenabled(%x): err(%d)\n",
+ printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
ino, err);
}
}
+static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
+{
+ struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
+ unsigned int ino = bucket - &ivector_table[0];
+
+ if (likely(bucket)) {
+ unsigned long cpuid;
+ int err;
+
+ cpuid = irq_choose_cpu(virt_irq);
+
+ err = sun4v_intr_settarget(ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
+ "err(%d)\n", ino, cpuid, err);
+ }
+}
+
static void sun4v_irq_disable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
@@ -350,7 +400,7 @@ static void sun4v_irq_disable(unsigned int virt_irq)
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
if (err != HV_EOK)
- printk("sun4v_intr_setenabled(%x): "
+ printk(KERN_ERR "sun4v_intr_setenabled(%x): "
"err(%d)\n", ino, err);
}
}
@@ -373,13 +423,17 @@ static void sun4v_irq_end(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
unsigned int ino = bucket - &ivector_table[0];
+ struct irq_desc *desc = irq_desc + virt_irq;
+
+ if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ return;
if (likely(bucket)) {
int err;
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
- printk("sun4v_intr_setstate(%x): "
+ printk(KERN_ERR "sun4v_intr_setstate(%x): "
"err(%d)\n", ino, err);
}
}
@@ -387,7 +441,6 @@ static void sun4v_irq_end(unsigned int virt_irq)
static void sun4v_virq_enable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
if (likely(bucket)) {
unsigned long cpuid, dev_handle, dev_ino;
@@ -395,45 +448,65 @@ static void sun4v_virq_enable(unsigned int virt_irq)
cpuid = irq_choose_cpu(virt_irq);
- dev_handle = ino & IMAP_IGN;
- dev_ino = ino & IMAP_INO;
+ dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
- printk("sun4v_vintr_set_target(%lx,%lx,%lu): "
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
- printk("sun4v_vintr_set_state(%lx,%lx,"
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_ENABLED);
if (err != HV_EOK)
- printk("sun4v_vintr_set_state(%lx,%lx,"
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_ENABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
}
+static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
+{
+ struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
+
+ if (likely(bucket)) {
+ unsigned long cpuid, dev_handle, dev_ino;
+ int err;
+
+ cpuid = irq_choose_cpu(virt_irq);
+
+ dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
+
+ err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+ if (err != HV_EOK)
+ printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
+ "err(%d)\n",
+ dev_handle, dev_ino, cpuid, err);
+ }
+}
+
static void sun4v_virq_disable(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
if (likely(bucket)) {
unsigned long dev_handle, dev_ino;
int err;
- dev_handle = ino & IMAP_IGN;
- dev_ino = ino & IMAP_INO;
+ dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
if (err != HV_EOK)
- printk("sun4v_vintr_set_state(%lx,%lx,"
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_DISABLED): err(%d)\n",
dev_handle, dev_ino, err);
}
@@ -442,19 +515,22 @@ static void sun4v_virq_disable(unsigned int virt_irq)
static void sun4v_virq_end(unsigned int virt_irq)
{
struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
- unsigned int ino = bucket - &ivector_table[0];
+ struct irq_desc *desc = irq_desc + virt_irq;
+
+ if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ return;
if (likely(bucket)) {
unsigned long dev_handle, dev_ino;
int err;
- dev_handle = ino & IMAP_IGN;
- dev_ino = ino & IMAP_INO;
+ dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
+ dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
if (err != HV_EOK)
- printk("sun4v_vintr_set_state(%lx,%lx,"
+ printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
"HV_INTR_STATE_IDLE): err(%d)\n",
dev_handle, dev_ino, err);
}
@@ -477,6 +553,7 @@ static struct irq_chip sun4u_irq = {
.enable = sun4u_irq_enable,
.disable = sun4u_irq_disable,
.end = sun4u_irq_end,
+ .set_affinity = sun4u_set_affinity,
};
static struct irq_chip sun4u_irq_ack = {
@@ -485,6 +562,7 @@ static struct irq_chip sun4u_irq_ack = {
.disable = sun4u_irq_disable,
.ack = run_pre_handler,
.end = sun4u_irq_end,
+ .set_affinity = sun4u_set_affinity,
};
static struct irq_chip sun4v_irq = {
@@ -492,6 +570,7 @@ static struct irq_chip sun4v_irq = {
.enable = sun4v_irq_enable,
.disable = sun4v_irq_disable,
.end = sun4v_irq_end,
+ .set_affinity = sun4v_set_affinity,
};
static struct irq_chip sun4v_irq_ack = {
@@ -500,6 +579,7 @@ static struct irq_chip sun4v_irq_ack = {
.disable = sun4v_irq_disable,
.ack = run_pre_handler,
.end = sun4v_irq_end,
+ .set_affinity = sun4v_set_affinity,
};
#ifdef CONFIG_PCI_MSI
@@ -511,6 +591,7 @@ static struct irq_chip sun4v_msi = {
.disable = sun4v_msi_disable,
.ack = run_pre_handler,
.end = sun4v_irq_end,
+ .set_affinity = sun4v_set_affinity,
};
#endif
@@ -519,6 +600,7 @@ static struct irq_chip sun4v_virq = {
.enable = sun4v_virq_enable,
.disable = sun4v_virq_disable,
.end = sun4v_virq_end,
+ .set_affinity = sun4v_virt_set_affinity,
};
static struct irq_chip sun4v_virq_ack = {
@@ -527,6 +609,7 @@ static struct irq_chip sun4v_virq_ack = {
.disable = sun4v_virq_disable,
.ack = run_pre_handler,
.end = sun4v_virq_end,
+ .set_affinity = sun4v_virt_set_affinity,
};
void irq_install_pre_handler(int virt_irq,
@@ -636,11 +719,12 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
unsigned long sysino, hv_err;
+ unsigned int virq;
- BUG_ON(devhandle & ~IMAP_IGN);
- BUG_ON(devino & ~IMAP_INO);
+ BUG_ON(devhandle & devino);
sysino = devhandle | devino;
+ BUG_ON(sysino & ~(IMAP_IGN | IMAP_INO));
hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
if (hv_err) {
@@ -649,7 +733,12 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
prom_halt();
}
- return sun4v_build_common(sysino, &sun4v_virq);
+ virq = sun4v_build_common(sysino, &sun4v_virq);
+
+ virt_to_real_irq_table[virq].dev_handle = devhandle;
+ virt_to_real_irq_table[virq].dev_ino = devino;
+
+ return virq;
}
#ifdef CONFIG_PCI_MSI
@@ -671,7 +760,7 @@ unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
break;
}
if (devino >= msi_end)
- return 0;
+ return -ENOSPC;
sysino = sun4v_devino_to_sysino(devhandle, devino);
bucket = &ivector_table[sysino];
@@ -685,8 +774,8 @@ unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
if (unlikely(!data)) {
- prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
- prom_halt();
+ virt_irq_free(*virt_irq_p);
+ return -ENOMEM;
}
set_irq_chip_data(bucket->virt_irq, data);
@@ -739,6 +828,26 @@ void handler_irq(int irq, struct pt_regs *regs)
set_irq_regs(old_regs);
}
+#ifdef CONFIG_HOTPLUG_CPU
+void fixup_irqs(void)
+{
+ unsigned int irq;
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ if (irq_desc[irq].action &&
+ !(irq_desc[irq].status & IRQ_PER_CPU)) {
+ if (irq_desc[irq].chip->set_affinity)
+ irq_desc[irq].chip->set_affinity(irq,
+ irq_desc[irq].affinity);
+ }
+ spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ }
+}
+#endif
+
struct sun5_timer {
u64 count0;
u64 limit0;
@@ -839,7 +948,7 @@ static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type
}
}
-static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
+void __cpuinit sun4v_register_mondo_queues(int this_cpu)
{
struct trap_per_cpu *tb = &trap_block[this_cpu];
@@ -853,20 +962,10 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
tb->nonresum_qmask);
}
-static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
+static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- unsigned long order = get_order(size);
- void *p = NULL;
-
- if (use_bootmem) {
- p = __alloc_bootmem_low(size, size, 0);
- } else {
- struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
- if (page)
- p = page_address(page);
- }
-
+ void *p = __alloc_bootmem_low(size, size, 0);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
prom_halt();
@@ -875,19 +974,10 @@ static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask
*pa_ptr = __pa(p);
}
-static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
+static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- unsigned long order = get_order(size);
- void *p = NULL;
-
- if (use_bootmem) {
- p = __alloc_bootmem_low(size, size, 0);
- } else {
- struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
- if (page)
- p = page_address(page);
- }
+ void *p = __alloc_bootmem_low(size, size, 0);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
@@ -897,18 +987,14 @@ static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask,
*pa_ptr = __pa(p);
}
-static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
+static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
void *page;
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
- if (use_bootmem)
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- else
- page = (void *) get_zeroed_page(GFP_ATOMIC);
-
+ page = alloc_bootmem_low_pages(PAGE_SIZE);
if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
prom_halt();
@@ -919,30 +1005,27 @@ static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_
#endif
}
-/* Allocate and register the mondo and error queues for this cpu. */
-void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
+/* Allocate mondo and error queues for all possible cpus. */
+static void __init sun4v_init_mondo_queues(void)
{
- struct trap_per_cpu *tb = &trap_block[cpu];
+ int cpu;
- if (alloc) {
- alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
- alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
- alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
- alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
- alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
- alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
+ for_each_possible_cpu(cpu) {
+ struct trap_per_cpu *tb = &trap_block[cpu];
- init_cpu_send_mondo_info(tb, use_bootmem);
- }
+ alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
+ alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
+ alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
+ alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
+ alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
+ alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
+ tb->nonresum_qmask);
- if (load) {
- if (cpu != hard_smp_processor_id()) {
- prom_printf("SUN4V: init mondo on cpu %d not %d\n",
- cpu, hard_smp_processor_id());
- prom_halt();
- }
- sun4v_register_mondo_queues(cpu);
+ init_cpu_send_mondo_info(tb);
}
+
+ /* Load up the boot cpu's entries. */
+ sun4v_register_mondo_queues(hard_smp_processor_id());
}
static struct irqaction timer_irq_action = {
@@ -957,7 +1040,7 @@ void __init init_IRQ(void)
memset(&ivector_table[0], 0, sizeof(ivector_table));
if (tlb_type == hypervisor)
- sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
+ sun4v_init_mondo_queues();
/* We need to clear any IRQ's pending in the soft interrupt
* registers, a spurious one could be left around from the
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index 6a6882e57ff2..0f19dce1c905 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -79,6 +79,7 @@ static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
while (dp) {
struct sparc_isa_device *isa_dev;
+ struct dev_archdata *sd;
isa_dev = kzalloc(sizeof(*isa_dev), GFP_KERNEL);
if (!isa_dev) {
@@ -86,6 +87,12 @@ static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
return;
}
+ sd = &isa_dev->ofdev.dev.archdata;
+ sd->prom_node = dp;
+ sd->op = &isa_dev->ofdev;
+ sd->iommu = isa_br->ofdev.dev.parent->archdata.iommu;
+ sd->stc = isa_br->ofdev.dev.parent->archdata.stc;
+
isa_dev->ofdev.node = dp;
isa_dev->ofdev.dev.parent = &isa_br->ofdev.dev;
isa_dev->ofdev.dev.bus = &isa_bus_type;
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
index f0e16045fb16..856659bb1311 100644
--- a/arch/sparc64/kernel/mdesc.c
+++ b/arch/sparc64/kernel/mdesc.c
@@ -6,6 +6,10 @@
#include <linux/types.h>
#include <linux/bootmem.h>
#include <linux/log2.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
#include <asm/hypervisor.h>
#include <asm/mdesc.h>
@@ -29,7 +33,7 @@ struct mdesc_hdr {
u32 node_sz; /* node block size */
u32 name_sz; /* name block size */
u32 data_sz; /* data block size */
-};
+} __attribute__((aligned(16)));
struct mdesc_elem {
u8 tag;
@@ -53,338 +57,536 @@ struct mdesc_elem {
} d;
};
-static struct mdesc_hdr *main_mdesc;
-static struct mdesc_node *allnodes;
+struct mdesc_mem_ops {
+ struct mdesc_handle *(*alloc)(unsigned int mdesc_size);
+ void (*free)(struct mdesc_handle *handle);
+};
+
+struct mdesc_handle {
+ struct list_head list;
+ struct mdesc_mem_ops *mops;
+ void *self_base;
+ atomic_t refcnt;
+ unsigned int handle_size;
+ struct mdesc_hdr mdesc;
+};
-static struct mdesc_node *allnodes_tail;
-static unsigned int unique_id;
+static void mdesc_handle_init(struct mdesc_handle *hp,
+ unsigned int handle_size,
+ void *base)
+{
+ BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1));
-static struct mdesc_node **mdesc_hash;
-static unsigned int mdesc_hash_size;
+ memset(hp, 0, handle_size);
+ INIT_LIST_HEAD(&hp->list);
+ hp->self_base = base;
+ atomic_set(&hp->refcnt, 1);
+ hp->handle_size = handle_size;
+}
-static inline unsigned int node_hashfn(u64 node)
+static struct mdesc_handle * __init mdesc_bootmem_alloc(unsigned int mdesc_size)
{
- return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16)))
- & (mdesc_hash_size - 1);
+ struct mdesc_handle *hp;
+ unsigned int handle_size, alloc_size;
+
+ handle_size = (sizeof(struct mdesc_handle) -
+ sizeof(struct mdesc_hdr) +
+ mdesc_size);
+ alloc_size = PAGE_ALIGN(handle_size);
+
+ hp = __alloc_bootmem(alloc_size, PAGE_SIZE, 0UL);
+ if (hp)
+ mdesc_handle_init(hp, handle_size, hp);
+
+ return hp;
}
-static inline void hash_node(struct mdesc_node *mp)
+static void mdesc_bootmem_free(struct mdesc_handle *hp)
{
- struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)];
+ unsigned int alloc_size, handle_size = hp->handle_size;
+ unsigned long start, end;
- mp->hash_next = *head;
- *head = mp;
+ BUG_ON(atomic_read(&hp->refcnt) != 0);
+ BUG_ON(!list_empty(&hp->list));
- if (allnodes_tail) {
- allnodes_tail->allnodes_next = mp;
- allnodes_tail = mp;
- } else {
- allnodes = allnodes_tail = mp;
+ alloc_size = PAGE_ALIGN(handle_size);
+
+ start = (unsigned long) hp;
+ end = start + alloc_size;
+
+ while (start < end) {
+ struct page *p;
+
+ p = virt_to_page(start);
+ ClearPageReserved(p);
+ __free_page(p);
+ start += PAGE_SIZE;
}
}
-static struct mdesc_node *find_node(u64 node)
+static struct mdesc_mem_ops bootmem_mdesc_ops = {
+ .alloc = mdesc_bootmem_alloc,
+ .free = mdesc_bootmem_free,
+};
+
+static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
{
- struct mdesc_node *mp = mdesc_hash[node_hashfn(node)];
+ unsigned int handle_size;
+ void *base;
+
+ handle_size = (sizeof(struct mdesc_handle) -
+ sizeof(struct mdesc_hdr) +
+ mdesc_size);
- while (mp) {
- if (mp->node == node)
- return mp;
+ base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
+ if (base) {
+ struct mdesc_handle *hp;
+ unsigned long addr;
- mp = mp->hash_next;
+ addr = (unsigned long)base;
+ addr = (addr + 15UL) & ~15UL;
+ hp = (struct mdesc_handle *) addr;
+
+ mdesc_handle_init(hp, handle_size, base);
+ return hp;
}
+
return NULL;
}
-struct property *md_find_property(const struct mdesc_node *mp,
- const char *name,
- int *lenp)
+static void mdesc_kfree(struct mdesc_handle *hp)
{
- struct property *pp;
+ BUG_ON(atomic_read(&hp->refcnt) != 0);
+ BUG_ON(!list_empty(&hp->list));
- for (pp = mp->properties; pp != 0; pp = pp->next) {
- if (strcasecmp(pp->name, name) == 0) {
- if (lenp)
- *lenp = pp->length;
- break;
- }
- }
- return pp;
+ kfree(hp->self_base);
}
-EXPORT_SYMBOL(md_find_property);
-/*
- * Find a property with a given name for a given node
- * and return the value.
- */
-const void *md_get_property(const struct mdesc_node *mp, const char *name,
- int *lenp)
+static struct mdesc_mem_ops kmalloc_mdesc_memops = {
+ .alloc = mdesc_kmalloc,
+ .free = mdesc_kfree,
+};
+
+static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size,
+ struct mdesc_mem_ops *mops)
{
- struct property *pp = md_find_property(mp, name, lenp);
- return pp ? pp->value : NULL;
+ struct mdesc_handle *hp = mops->alloc(mdesc_size);
+
+ if (hp)
+ hp->mops = mops;
+
+ return hp;
}
-EXPORT_SYMBOL(md_get_property);
-struct mdesc_node *md_find_node_by_name(struct mdesc_node *from,
- const char *name)
+static void mdesc_free(struct mdesc_handle *hp)
{
- struct mdesc_node *mp;
+ hp->mops->free(hp);
+}
- mp = from ? from->allnodes_next : allnodes;
- for (; mp != NULL; mp = mp->allnodes_next) {
- if (strcmp(mp->name, name) == 0)
- break;
+static struct mdesc_handle *cur_mdesc;
+static LIST_HEAD(mdesc_zombie_list);
+static DEFINE_SPINLOCK(mdesc_lock);
+
+struct mdesc_handle *mdesc_grab(void)
+{
+ struct mdesc_handle *hp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdesc_lock, flags);
+ hp = cur_mdesc;
+ if (hp)
+ atomic_inc(&hp->refcnt);
+ spin_unlock_irqrestore(&mdesc_lock, flags);
+
+ return hp;
+}
+EXPORT_SYMBOL(mdesc_grab);
+
+void mdesc_release(struct mdesc_handle *hp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdesc_lock, flags);
+ if (atomic_dec_and_test(&hp->refcnt)) {
+ list_del_init(&hp->list);
+ hp->mops->free(hp);
}
- return mp;
+ spin_unlock_irqrestore(&mdesc_lock, flags);
}
-EXPORT_SYMBOL(md_find_node_by_name);
+EXPORT_SYMBOL(mdesc_release);
-static unsigned int mdesc_early_allocated;
+static DEFINE_MUTEX(mdesc_mutex);
+static struct mdesc_notifier_client *client_list;
-static void * __init mdesc_early_alloc(unsigned long size)
+void mdesc_register_notifier(struct mdesc_notifier_client *client)
{
- void *ret;
+ u64 node;
- ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
- if (ret == NULL) {
- prom_printf("MDESC: alloc of %lu bytes failed.\n", size);
- prom_halt();
+ mutex_lock(&mdesc_mutex);
+ client->next = client_list;
+ client_list = client;
+
+ mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
+ client->add(cur_mdesc, node);
+
+ mutex_unlock(&mdesc_mutex);
+}
+
+static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
+{
+ const u64 *id;
+ u64 a;
+
+ id = NULL;
+ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 target;
+
+ target = mdesc_arc_target(hp, a);
+ id = mdesc_get_property(hp, target,
+ "cfg-handle", NULL);
+ if (id)
+ break;
}
- memset(ret, 0, size);
+ return id;
+}
- mdesc_early_allocated += size;
+/* Run 'func' on nodes which are in A but not in B. */
+static void invoke_on_missing(const char *name,
+ struct mdesc_handle *a,
+ struct mdesc_handle *b,
+ void (*func)(struct mdesc_handle *, u64))
+{
+ u64 node;
- return ret;
+ mdesc_for_each_node_by_name(a, node, name) {
+ int found = 0, is_vdc_port = 0;
+ const char *name_prop;
+ const u64 *id;
+ u64 fnode;
+
+ name_prop = mdesc_get_property(a, node, "name", NULL);
+ if (name_prop && !strcmp(name_prop, "vdc-port")) {
+ is_vdc_port = 1;
+ id = parent_cfg_handle(a, node);
+ } else
+ id = mdesc_get_property(a, node, "id", NULL);
+
+ if (!id) {
+ printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
+ (name_prop ? name_prop : name));
+ continue;
+ }
+
+ mdesc_for_each_node_by_name(b, fnode, name) {
+ const u64 *fid;
+
+ if (is_vdc_port) {
+ name_prop = mdesc_get_property(b, fnode,
+ "name", NULL);
+ if (!name_prop ||
+ strcmp(name_prop, "vdc-port"))
+ continue;
+ fid = parent_cfg_handle(b, fnode);
+ if (!fid) {
+ printk(KERN_ERR "MD: Cannot find ID "
+ "for vdc-port node.\n");
+ continue;
+ }
+ } else
+ fid = mdesc_get_property(b, fnode,
+ "id", NULL);
+
+ if (*id == *fid) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ func(a, node);
+ }
}
-static unsigned int __init count_arcs(struct mdesc_elem *ep)
+static void notify_one(struct mdesc_notifier_client *p,
+ struct mdesc_handle *old_hp,
+ struct mdesc_handle *new_hp)
{
- unsigned int ret = 0;
+ invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
+ invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
+}
- ep++;
- while (ep->tag != MD_NODE_END) {
- if (ep->tag == MD_PROP_ARC)
- ret++;
- ep++;
+static void mdesc_notify_clients(struct mdesc_handle *old_hp,
+ struct mdesc_handle *new_hp)
+{
+ struct mdesc_notifier_client *p = client_list;
+
+ while (p) {
+ notify_one(p, old_hp, new_hp);
+ p = p->next;
}
- return ret;
}
-static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names)
+void mdesc_update(void)
{
- unsigned int num_arcs = count_arcs(ep);
- struct mdesc_node *mp;
+ unsigned long len, real_len, status;
+ struct mdesc_handle *hp, *orig_hp;
+ unsigned long flags;
+
+ mutex_lock(&mdesc_mutex);
+
+ (void) sun4v_mach_desc(0UL, 0UL, &len);
+
+ hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
+ if (!hp) {
+ printk(KERN_ERR "MD: mdesc alloc fails\n");
+ goto out;
+ }
- mp = mdesc_early_alloc(sizeof(*mp) +
- (num_arcs * sizeof(struct mdesc_arc)));
- mp->name = names + ep->name_offset;
- mp->node = node;
- mp->unique_id = unique_id++;
- mp->num_arcs = num_arcs;
+ status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
+ if (status != HV_EOK || real_len > len) {
+ printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
+ status);
+ atomic_dec(&hp->refcnt);
+ mdesc_free(hp);
+ goto out;
+ }
+
+ spin_lock_irqsave(&mdesc_lock, flags);
+ orig_hp = cur_mdesc;
+ cur_mdesc = hp;
+ spin_unlock_irqrestore(&mdesc_lock, flags);
+
+ mdesc_notify_clients(orig_hp, hp);
+
+ spin_lock_irqsave(&mdesc_lock, flags);
+ if (atomic_dec_and_test(&orig_hp->refcnt))
+ mdesc_free(orig_hp);
+ else
+ list_add(&orig_hp->list, &mdesc_zombie_list);
+ spin_unlock_irqrestore(&mdesc_lock, flags);
- hash_node(mp);
+out:
+ mutex_unlock(&mdesc_mutex);
}
-static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
+static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{
return (struct mdesc_elem *) (mdesc + 1);
}
-static inline void *name_block(struct mdesc_hdr *mdesc)
+static void *name_block(struct mdesc_hdr *mdesc)
{
return ((void *) node_block(mdesc)) + mdesc->node_sz;
}
-static inline void *data_block(struct mdesc_hdr *mdesc)
+static void *data_block(struct mdesc_hdr *mdesc)
{
return ((void *) name_block(mdesc)) + mdesc->name_sz;
}
-/* In order to avoid recursion (the graph can be very deep) we use a
- * two pass algorithm. First we allocate all the nodes and hash them.
- * Then we iterate over each node, filling in the arcs and properties.
- */
-static void __init build_all_nodes(struct mdesc_hdr *mdesc)
+u64 mdesc_node_by_name(struct mdesc_handle *hp,
+ u64 from_node, const char *name)
{
- struct mdesc_elem *start, *ep;
- struct mdesc_node *mp;
- const char *names;
- void *data;
- u64 last_node;
+ struct mdesc_elem *ep = node_block(&hp->mdesc);
+ const char *names = name_block(&hp->mdesc);
+ u64 last_node = hp->mdesc.node_sz / 16;
+ u64 ret;
+
+ if (from_node == MDESC_NODE_NULL) {
+ ret = from_node = 0;
+ } else if (from_node >= last_node) {
+ return MDESC_NODE_NULL;
+ } else {
+ ret = ep[from_node].d.val;
+ }
- start = ep = node_block(mdesc);
- last_node = mdesc->node_sz / 16;
+ while (ret < last_node) {
+ if (ep[ret].tag != MD_NODE)
+ return MDESC_NODE_NULL;
+ if (!strcmp(names + ep[ret].name_offset, name))
+ break;
+ ret = ep[ret].d.val;
+ }
+ if (ret >= last_node)
+ ret = MDESC_NODE_NULL;
+ return ret;
+}
+EXPORT_SYMBOL(mdesc_node_by_name);
- names = name_block(mdesc);
+const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
+ const char *name, int *lenp)
+{
+ const char *names = name_block(&hp->mdesc);
+ u64 last_node = hp->mdesc.node_sz / 16;
+ void *data = data_block(&hp->mdesc);
+ struct mdesc_elem *ep;
- while (1) {
- u64 node = ep - start;
+ if (node == MDESC_NODE_NULL || node >= last_node)
+ return NULL;
- if (ep->tag == MD_LIST_END)
+ ep = node_block(&hp->mdesc) + node;
+ ep++;
+ for (; ep->tag != MD_NODE_END; ep++) {
+ void *val = NULL;
+ int len = 0;
+
+ switch (ep->tag) {
+ case MD_PROP_VAL:
+ val = &ep->d.val;
+ len = 8;
break;
- if (ep->tag != MD_NODE) {
- prom_printf("MDESC: Inconsistent element list.\n");
- prom_halt();
- }
-
- mdesc_node_alloc(node, ep, names);
+ case MD_PROP_STR:
+ case MD_PROP_DATA:
+ val = data + ep->d.data.data_offset;
+ len = ep->d.data.data_len;
+ break;
- if (ep->d.val >= last_node) {
- printk("MDESC: Warning, early break out of node scan.\n");
- printk("MDESC: Next node [%lu] last_node [%lu].\n",
- node, last_node);
+ default:
break;
}
+ if (!val)
+ continue;
- ep = start + ep->d.val;
+ if (!strcmp(names + ep->name_offset, name)) {
+ if (lenp)
+ *lenp = len;
+ return val;
+ }
}
- data = data_block(mdesc);
- for (mp = allnodes; mp; mp = mp->allnodes_next) {
- struct mdesc_elem *ep = start + mp->node;
- struct property **link = &mp->properties;
- unsigned int this_arc = 0;
-
- ep++;
- while (ep->tag != MD_NODE_END) {
- switch (ep->tag) {
- case MD_PROP_ARC: {
- struct mdesc_node *target;
-
- if (this_arc >= mp->num_arcs) {
- prom_printf("MDESC: ARC overrun [%u:%u]\n",
- this_arc, mp->num_arcs);
- prom_halt();
- }
- target = find_node(ep->d.val);
- if (!target) {
- printk("MDESC: Warning, arc points to "
- "missing node, ignoring.\n");
- break;
- }
- mp->arcs[this_arc].name =
- (names + ep->name_offset);
- mp->arcs[this_arc].arc = target;
- this_arc++;
- break;
- }
+ return NULL;
+}
+EXPORT_SYMBOL(mdesc_get_property);
- case MD_PROP_VAL:
- case MD_PROP_STR:
- case MD_PROP_DATA: {
- struct property *p = mdesc_early_alloc(sizeof(*p));
-
- p->unique_id = unique_id++;
- p->name = (char *) names + ep->name_offset;
- if (ep->tag == MD_PROP_VAL) {
- p->value = &ep->d.val;
- p->length = 8;
- } else {
- p->value = data + ep->d.data.data_offset;
- p->length = ep->d.data.data_len;
- }
- *link = p;
- link = &p->next;
- break;
- }
+u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type)
+{
+ struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
+ const char *names = name_block(&hp->mdesc);
+ u64 last_node = hp->mdesc.node_sz / 16;
- case MD_NOOP:
- break;
+ if (from == MDESC_NODE_NULL || from >= last_node)
+ return MDESC_NODE_NULL;
- default:
- printk("MDESC: Warning, ignoring unknown tag type %02x\n",
- ep->tag);
- }
- ep++;
- }
+ ep = base + from;
+
+ ep++;
+ for (; ep->tag != MD_NODE_END; ep++) {
+ if (ep->tag != MD_PROP_ARC)
+ continue;
+
+ if (strcmp(names + ep->name_offset, arc_type))
+ continue;
+
+ return ep - base;
}
+
+ return MDESC_NODE_NULL;
}
+EXPORT_SYMBOL(mdesc_next_arc);
-static unsigned int __init count_nodes(struct mdesc_hdr *mdesc)
+u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc)
{
- struct mdesc_elem *ep = node_block(mdesc);
- struct mdesc_elem *end;
- unsigned int cnt = 0;
+ struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
- end = ((void *)ep) + mdesc->node_sz;
- while (ep < end) {
- if (ep->tag == MD_NODE)
- cnt++;
- ep++;
- }
- return cnt;
+ ep = base + arc;
+
+ return ep->d.val;
+}
+EXPORT_SYMBOL(mdesc_arc_target);
+
+const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
+{
+ struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
+ const char *names = name_block(&hp->mdesc);
+ u64 last_node = hp->mdesc.node_sz / 16;
+
+ if (node == MDESC_NODE_NULL || node >= last_node)
+ return NULL;
+
+ ep = base + node;
+ if (ep->tag != MD_NODE)
+ return NULL;
+
+ return names + ep->name_offset;
}
+EXPORT_SYMBOL(mdesc_node_name);
static void __init report_platform_properties(void)
{
- struct mdesc_node *pn = md_find_node_by_name(NULL, "platform");
+ struct mdesc_handle *hp = mdesc_grab();
+ u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
const char *s;
const u64 *v;
- if (!pn) {
+ if (pn == MDESC_NODE_NULL) {
prom_printf("No platform node in machine-description.\n");
prom_halt();
}
- s = md_get_property(pn, "banner-name", NULL);
+ s = mdesc_get_property(hp, pn, "banner-name", NULL);
printk("PLATFORM: banner-name [%s]\n", s);
- s = md_get_property(pn, "name", NULL);
+ s = mdesc_get_property(hp, pn, "name", NULL);
printk("PLATFORM: name [%s]\n", s);
- v = md_get_property(pn, "hostid", NULL);
+ v = mdesc_get_property(hp, pn, "hostid", NULL);
if (v)
printk("PLATFORM: hostid [%08lx]\n", *v);
- v = md_get_property(pn, "serial#", NULL);
+ v = mdesc_get_property(hp, pn, "serial#", NULL);
if (v)
printk("PLATFORM: serial# [%08lx]\n", *v);
- v = md_get_property(pn, "stick-frequency", NULL);
+ v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
printk("PLATFORM: stick-frequency [%08lx]\n", *v);
- v = md_get_property(pn, "mac-address", NULL);
+ v = mdesc_get_property(hp, pn, "mac-address", NULL);
if (v)
printk("PLATFORM: mac-address [%lx]\n", *v);
- v = md_get_property(pn, "watchdog-resolution", NULL);
+ v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
if (v)
printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v);
- v = md_get_property(pn, "watchdog-max-timeout", NULL);
+ v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
if (v)
printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v);
- v = md_get_property(pn, "max-cpus", NULL);
+ v = mdesc_get_property(hp, pn, "max-cpus", NULL);
if (v)
printk("PLATFORM: max-cpus [%lu]\n", *v);
-}
-static int inline find_in_proplist(const char *list, const char *match, int len)
-{
- while (len > 0) {
- int l;
+#ifdef CONFIG_SMP
+ {
+ int max_cpu, i;
- if (!strcmp(list, match))
- return 1;
- l = strlen(list) + 1;
- list += l;
- len -= l;
+ if (v) {
+ max_cpu = *v;
+ if (max_cpu > NR_CPUS)
+ max_cpu = NR_CPUS;
+ } else {
+ max_cpu = NR_CPUS;
+ }
+ for (i = 0; i < max_cpu; i++)
+ cpu_set(i, cpu_possible_map);
}
- return 0;
+#endif
+
+ mdesc_release(hp);
}
-static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp)
+static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
+ struct mdesc_handle *hp,
+ u64 mp)
{
- const u64 *level = md_get_property(mp, "level", NULL);
- const u64 *size = md_get_property(mp, "size", NULL);
- const u64 *line_size = md_get_property(mp, "line-size", NULL);
+ const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
+ const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
+ const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL);
const char *type;
int type_len;
- type = md_get_property(mp, "type", &type_len);
+ type = mdesc_get_property(hp, mp, "type", &type_len);
switch (*level) {
case 1:
- if (find_in_proplist(type, "instn", type_len)) {
+ if (of_find_in_proplist(type, "instn", type_len)) {
c->icache_size = *size;
c->icache_line_size = *line_size;
- } else if (find_in_proplist(type, "data", type_len)) {
+ } else if (of_find_in_proplist(type, "data", type_len)) {
c->dcache_size = *size;
c->dcache_line_size = *line_size;
}
@@ -400,48 +602,45 @@ static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp)
}
if (*level == 1) {
- unsigned int i;
-
- for (i = 0; i < mp->num_arcs; i++) {
- struct mdesc_node *t = mp->arcs[i].arc;
+ u64 a;
- if (strcmp(mp->arcs[i].name, "fwd"))
- continue;
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+ u64 target = mdesc_arc_target(hp, a);
+ const char *name = mdesc_node_name(hp, target);
- if (!strcmp(t->name, "cache"))
- fill_in_one_cache(c, t);
+ if (!strcmp(name, "cache"))
+ fill_in_one_cache(c, hp, target);
}
}
}
-static void __init mark_core_ids(struct mdesc_node *mp, int core_id)
+static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
+ int core_id)
{
- unsigned int i;
+ u64 a;
- for (i = 0; i < mp->num_arcs; i++) {
- struct mdesc_node *t = mp->arcs[i].arc;
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
+ u64 t = mdesc_arc_target(hp, a);
+ const char *name;
const u64 *id;
- if (strcmp(mp->arcs[i].name, "back"))
- continue;
-
- if (!strcmp(t->name, "cpu")) {
- id = md_get_property(t, "id", NULL);
+ name = mdesc_node_name(hp, t);
+ if (!strcmp(name, "cpu")) {
+ id = mdesc_get_property(hp, t, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).core_id = core_id;
} else {
- unsigned int j;
+ u64 j;
- for (j = 0; j < t->num_arcs; j++) {
- struct mdesc_node *n = t->arcs[j].arc;
+ mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
+ u64 n = mdesc_arc_target(hp, j);
+ const char *n_name;
- if (strcmp(t->arcs[j].name, "back"))
+ n_name = mdesc_node_name(hp, n);
+ if (strcmp(n_name, "cpu"))
continue;
- if (strcmp(n->name, "cpu"))
- continue;
-
- id = md_get_property(n, "id", NULL);
+ id = mdesc_get_property(hp, n, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).core_id = core_id;
}
@@ -449,78 +648,81 @@ static void __init mark_core_ids(struct mdesc_node *mp, int core_id)
}
}
-static void __init set_core_ids(void)
+static void __devinit set_core_ids(struct mdesc_handle *hp)
{
- struct mdesc_node *mp;
int idx;
+ u64 mp;
idx = 1;
- md_for_each_node_by_name(mp, "cache") {
- const u64 *level = md_get_property(mp, "level", NULL);
+ mdesc_for_each_node_by_name(hp, mp, "cache") {
+ const u64 *level;
const char *type;
int len;
+ level = mdesc_get_property(hp, mp, "level", NULL);
if (*level != 1)
continue;
- type = md_get_property(mp, "type", &len);
- if (!find_in_proplist(type, "instn", len))
+ type = mdesc_get_property(hp, mp, "type", &len);
+ if (!of_find_in_proplist(type, "instn", len))
continue;
- mark_core_ids(mp, idx);
+ mark_core_ids(hp, mp, idx);
idx++;
}
}
-static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
+static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
+ int proc_id)
{
- int i;
+ u64 a;
- for (i = 0; i < mp->num_arcs; i++) {
- struct mdesc_node *t = mp->arcs[i].arc;
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
+ u64 t = mdesc_arc_target(hp, a);
+ const char *name;
const u64 *id;
- if (strcmp(mp->arcs[i].name, "back"))
+ name = mdesc_node_name(hp, t);
+ if (strcmp(name, "cpu"))
continue;
- if (strcmp(t->name, "cpu"))
- continue;
-
- id = md_get_property(t, "id", NULL);
+ id = mdesc_get_property(hp, t, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).proc_id = proc_id;
}
}
-static void __init __set_proc_ids(const char *exec_unit_name)
+static void __devinit __set_proc_ids(struct mdesc_handle *hp,
+ const char *exec_unit_name)
{
- struct mdesc_node *mp;
int idx;
+ u64 mp;
idx = 0;
- md_for_each_node_by_name(mp, exec_unit_name) {
+ mdesc_for_each_node_by_name(hp, mp, exec_unit_name) {
const char *type;
int len;
- type = md_get_property(mp, "type", &len);
- if (!find_in_proplist(type, "int", len) &&
- !find_in_proplist(type, "integer", len))
+ type = mdesc_get_property(hp, mp, "type", &len);
+ if (!of_find_in_proplist(type, "int", len) &&
+ !of_find_in_proplist(type, "integer", len))
continue;
- mark_proc_ids(mp, idx);
+ mark_proc_ids(hp, mp, idx);
idx++;
}
}
-static void __init set_proc_ids(void)
+static void __devinit set_proc_ids(struct mdesc_handle *hp)
{
- __set_proc_ids("exec_unit");
- __set_proc_ids("exec-unit");
+ __set_proc_ids(hp, "exec_unit");
+ __set_proc_ids(hp, "exec-unit");
}
-static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
+static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
+ unsigned char def)
{
u64 val;
@@ -538,42 +740,50 @@ use_default:
*mask = ((1U << def) * 64U) - 1U;
}
-static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb)
+static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
+ struct trap_per_cpu *tb)
{
const u64 *val;
- val = md_get_property(mp, "q-cpu-mondo-#bits", NULL);
+ val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
- val = md_get_property(mp, "q-dev-mondo-#bits", NULL);
+ val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
- val = md_get_property(mp, "q-resumable-#bits", NULL);
+ val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
get_one_mondo_bits(val, &tb->resum_qmask, 6);
- val = md_get_property(mp, "q-nonresumable-#bits", NULL);
+ val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
}
-static void __init mdesc_fill_in_cpu_data(void)
+void __devinit mdesc_fill_in_cpu_data(cpumask_t mask)
{
- struct mdesc_node *mp;
+ struct mdesc_handle *hp = mdesc_grab();
+ u64 mp;
ncpus_probed = 0;
- md_for_each_node_by_name(mp, "cpu") {
- const u64 *id = md_get_property(mp, "id", NULL);
- const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL);
+ mdesc_for_each_node_by_name(hp, mp, "cpu") {
+ const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
+ const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
struct trap_per_cpu *tb;
cpuinfo_sparc *c;
- unsigned int i;
int cpuid;
+ u64 a;
ncpus_probed++;
cpuid = *id;
#ifdef CONFIG_SMP
- if (cpuid >= NR_CPUS)
+ if (cpuid >= NR_CPUS) {
+ printk(KERN_WARNING "Ignoring CPU %d which is "
+ ">= NR_CPUS (%d)\n",
+ cpuid, NR_CPUS);
+ continue;
+ }
+ if (!cpu_isset(cpuid, mask))
continue;
#else
/* On uniprocessor we only want the values for the
@@ -589,35 +799,30 @@ static void __init mdesc_fill_in_cpu_data(void)
c->clock_tick = *cfreq;
tb = &trap_block[cpuid];
- get_mondo_data(mp, tb);
+ get_mondo_data(hp, mp, tb);
- for (i = 0; i < mp->num_arcs; i++) {
- struct mdesc_node *t = mp->arcs[i].arc;
- unsigned int j;
-
- if (strcmp(mp->arcs[i].name, "fwd"))
- continue;
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+ u64 j, t = mdesc_arc_target(hp, a);
+ const char *t_name;
- if (!strcmp(t->name, "cache")) {
- fill_in_one_cache(c, t);
+ t_name = mdesc_node_name(hp, t);
+ if (!strcmp(t_name, "cache")) {
+ fill_in_one_cache(c, hp, t);
continue;
}
- for (j = 0; j < t->num_arcs; j++) {
- struct mdesc_node *n;
+ mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
+ u64 n = mdesc_arc_target(hp, j);
+ const char *n_name;
- n = t->arcs[j].arc;
- if (strcmp(t->arcs[j].name, "fwd"))
- continue;
-
- if (!strcmp(n->name, "cache"))
- fill_in_one_cache(c, n);
+ n_name = mdesc_node_name(hp, n);
+ if (!strcmp(n_name, "cache"))
+ fill_in_one_cache(c, hp, n);
}
}
#ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map);
- cpu_set(cpuid, phys_cpu_present_map);
#endif
c->core_id = 0;
@@ -628,45 +833,80 @@ static void __init mdesc_fill_in_cpu_data(void)
sparc64_multi_core = 1;
#endif
- set_core_ids();
- set_proc_ids();
+ set_core_ids(hp);
+ set_proc_ids(hp);
smp_fill_in_sib_core_maps();
+
+ mdesc_release(hp);
}
+static ssize_t mdesc_read(struct file *file, char __user *buf,
+ size_t len, loff_t *offp)
+{
+ struct mdesc_handle *hp = mdesc_grab();
+ int err;
+
+ if (!hp)
+ return -ENODEV;
+
+ err = hp->handle_size;
+ if (len < hp->handle_size)
+ err = -EMSGSIZE;
+ else if (copy_to_user(buf, &hp->mdesc, hp->handle_size))
+ err = -EFAULT;
+ mdesc_release(hp);
+
+ return err;
+}
+
+static const struct file_operations mdesc_fops = {
+ .read = mdesc_read,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice mdesc_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mdesc",
+ .fops = &mdesc_fops,
+};
+
+static int __init mdesc_misc_init(void)
+{
+ return misc_register(&mdesc_misc);
+}
+
+__initcall(mdesc_misc_init);
+
void __init sun4v_mdesc_init(void)
{
+ struct mdesc_handle *hp;
unsigned long len, real_len, status;
+ cpumask_t mask;
(void) sun4v_mach_desc(0UL, 0UL, &len);
printk("MDESC: Size is %lu bytes.\n", len);
- main_mdesc = mdesc_early_alloc(len);
+ hp = mdesc_alloc(len, &bootmem_mdesc_ops);
+ if (hp == NULL) {
+ prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
+ prom_halt();
+ }
- status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len);
+ status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
if (status != HV_EOK || real_len > len) {
prom_printf("sun4v_mach_desc fails, err(%lu), "
"len(%lu), real_len(%lu)\n",
status, len, real_len);
+ mdesc_free(hp);
prom_halt();
}
- len = count_nodes(main_mdesc);
- printk("MDESC: %lu nodes.\n", len);
-
- len = roundup_pow_of_two(len);
-
- mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *));
- mdesc_hash_size = len;
-
- printk("MDESC: Hash size %lu entries.\n", len);
-
- build_all_nodes(main_mdesc);
-
- printk("MDESC: Built graph with %u bytes of memory.\n",
- mdesc_early_allocated);
+ cur_mdesc = hp;
report_platform_properties();
- mdesc_fill_in_cpu_data();
+
+ cpus_setall(mask);
+ mdesc_fill_in_cpu_data(mask);
}
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 6676b93219dc..4cc77485f536 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -1,132 +1,13 @@
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-
-#include <asm/errno.h>
-#include <asm/of_device.h>
-
-/**
- * of_match_device - Tell if an of_device structure has a matching
- * of_match structure
- * @ids: array of of device match structures to search in
- * @dev: the of device structure to match against
- *
- * Used by a driver to check whether an of_device present in the
- * system is in its list of supported devices.
- */
-const struct of_device_id *of_match_device(const struct of_device_id *matches,
- const struct of_device *dev)
-{
- if (!dev->node)
- return NULL;
- while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
- int match = 1;
- if (matches->name[0])
- match &= dev->node->name
- && !strcmp(matches->name, dev->node->name);
- if (matches->type[0])
- match &= dev->node->type
- && !strcmp(matches->type, dev->node->type);
- if (matches->compatible[0])
- match &= of_device_is_compatible(dev->node,
- matches->compatible);
- if (match)
- return matches;
- matches++;
- }
- return NULL;
-}
-
-static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * of_drv = to_of_platform_driver(drv);
- const struct of_device_id * matches = of_drv->match_table;
-
- if (!matches)
- return 0;
-
- return of_match_device(matches, of_dev) != NULL;
-}
-
-struct of_device *of_dev_get(struct of_device *dev)
-{
- struct device *tmp;
-
- if (!dev)
- return NULL;
- tmp = get_device(&dev->dev);
- if (tmp)
- return to_of_device(tmp);
- else
- return NULL;
-}
-
-void of_dev_put(struct of_device *dev)
-{
- if (dev)
- put_device(&dev->dev);
-}
-
-
-static int of_device_probe(struct device *dev)
-{
- int error = -ENODEV;
- struct of_platform_driver *drv;
- struct of_device *of_dev;
- const struct of_device_id *match;
-
- drv = to_of_platform_driver(dev->driver);
- of_dev = to_of_device(dev);
-
- if (!drv->probe)
- return error;
-
- of_dev_get(of_dev);
-
- match = of_match_device(drv->match_table, of_dev);
- if (match)
- error = drv->probe(of_dev, match);
- if (error)
- of_dev_put(of_dev);
-
- return error;
-}
-
-static int of_device_remove(struct device *dev)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-
- if (dev->driver && drv->remove)
- drv->remove(of_dev);
- return 0;
-}
-
-static int of_device_suspend(struct device *dev, pm_message_t state)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
- int error = 0;
-
- if (dev->driver && drv->suspend)
- error = drv->suspend(of_dev, state);
- return error;
-}
-
-static int of_device_resume(struct device * dev)
-{
- struct of_device * of_dev = to_of_device(dev);
- struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
- int error = 0;
-
- if (dev->driver && drv->resume)
- error = drv->resume(of_dev);
- return error;
-}
+#include <linux/errno.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
{
@@ -163,7 +44,7 @@ static int node_match(struct device *dev, void *data)
struct of_device *of_find_device_by_node(struct device_node *dp)
{
- struct device *dev = bus_find_device(&of_bus_type, NULL,
+ struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
dp, node_match);
if (dev)
@@ -174,48 +55,20 @@ struct of_device *of_find_device_by_node(struct device_node *dp)
EXPORT_SYMBOL(of_find_device_by_node);
#ifdef CONFIG_PCI
-struct bus_type isa_bus_type = {
- .name = "isa",
- .match = of_platform_bus_match,
- .probe = of_device_probe,
- .remove = of_device_remove,
- .suspend = of_device_suspend,
- .resume = of_device_resume,
-};
+struct bus_type isa_bus_type;
EXPORT_SYMBOL(isa_bus_type);
-struct bus_type ebus_bus_type = {
- .name = "ebus",
- .match = of_platform_bus_match,
- .probe = of_device_probe,
- .remove = of_device_remove,
- .suspend = of_device_suspend,
- .resume = of_device_resume,
-};
+struct bus_type ebus_bus_type;
EXPORT_SYMBOL(ebus_bus_type);
#endif
#ifdef CONFIG_SBUS
-struct bus_type sbus_bus_type = {
- .name = "sbus",
- .match = of_platform_bus_match,
- .probe = of_device_probe,
- .remove = of_device_remove,
- .suspend = of_device_suspend,
- .resume = of_device_resume,
-};
+struct bus_type sbus_bus_type;
EXPORT_SYMBOL(sbus_bus_type);
#endif
-struct bus_type of_bus_type = {
- .name = "of",
- .match = of_platform_bus_match,
- .probe = of_device_probe,
- .remove = of_device_remove,
- .suspend = of_device_suspend,
- .resume = of_device_resume,
-};
-EXPORT_SYMBOL(of_bus_type);
+struct bus_type of_platform_bus_type;
+EXPORT_SYMBOL(of_platform_bus_type);
static inline u64 of_read_addr(const u32 *cell, int size)
{
@@ -899,11 +752,16 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
{
struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
const unsigned int *irq;
+ struct dev_archdata *sd;
int len, i;
if (!op)
return NULL;
+ sd = &op->dev.archdata;
+ sd->prom_node = dp;
+ sd->op = op;
+
op->node = dp;
op->clock_freq = of_getintprop_default(dp, "clock-frequency",
@@ -933,7 +791,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]);
op->dev.parent = parent;
- op->dev.bus = &of_bus_type;
+ op->dev.bus = &of_platform_bus_type;
if (!parent)
strcpy(op->dev.bus_id, "root");
else
@@ -977,16 +835,16 @@ static int __init of_bus_driver_init(void)
{
int err;
- err = bus_register(&of_bus_type);
+ err = of_bus_type_init(&of_platform_bus_type, "of");
#ifdef CONFIG_PCI
if (!err)
- err = bus_register(&isa_bus_type);
+ err = of_bus_type_init(&isa_bus_type, "isa");
if (!err)
- err = bus_register(&ebus_bus_type);
+ err = of_bus_type_init(&ebus_bus_type, "ebus");
#endif
#ifdef CONFIG_SBUS
if (!err)
- err = bus_register(&sbus_bus_type);
+ err = of_bus_type_init(&sbus_bus_type, "sbus");
#endif
if (!err)
@@ -1020,61 +878,13 @@ int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
/* register with core */
return driver_register(&drv->driver);
}
+EXPORT_SYMBOL(of_register_driver);
void of_unregister_driver(struct of_platform_driver *drv)
{
driver_unregister(&drv->driver);
}
-
-
-static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct of_device *ofdev;
-
- ofdev = to_of_device(dev);
- return sprintf(buf, "%s", ofdev->node->full_name);
-}
-
-static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
-
-/**
- * of_release_dev - free an of device structure when all users of it are finished.
- * @dev: device that's been disconnected
- *
- * Will be called only by the device core when all users of this of device are
- * done.
- */
-void of_release_dev(struct device *dev)
-{
- struct of_device *ofdev;
-
- ofdev = to_of_device(dev);
-
- kfree(ofdev);
-}
-
-int of_device_register(struct of_device *ofdev)
-{
- int rc;
-
- BUG_ON(ofdev->node == NULL);
-
- rc = device_register(&ofdev->dev);
- if (rc)
- return rc;
-
- rc = device_create_file(&ofdev->dev, &dev_attr_devspec);
- if (rc)
- device_unregister(&ofdev->dev);
-
- return rc;
-}
-
-void of_device_unregister(struct of_device *ofdev)
-{
- device_remove_file(&ofdev->dev, &dev_attr_devspec);
- device_unregister(&ofdev->dev);
-}
+EXPORT_SYMBOL(of_unregister_driver);
struct of_device* of_platform_device_create(struct device_node *np,
const char *bus_id,
@@ -1100,13 +910,4 @@ struct of_device* of_platform_device_create(struct device_node *np,
return dev;
}
-
-EXPORT_SYMBOL(of_match_device);
-EXPORT_SYMBOL(of_register_driver);
-EXPORT_SYMBOL(of_unregister_driver);
-EXPORT_SYMBOL(of_device_register);
-EXPORT_SYMBOL(of_device_unregister);
-EXPORT_SYMBOL(of_dev_get);
-EXPORT_SYMBOL(of_dev_put);
EXPORT_SYMBOL(of_platform_device_create);
-EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 3bc136a50404..e8dac81d8a0d 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -283,12 +283,6 @@ int __init pcic_present(void)
return pci_controller_scan(pci_is_controller);
}
-const struct pci_iommu_ops *pci_iommu_ops;
-EXPORT_SYMBOL(pci_iommu_ops);
-
-extern const struct pci_iommu_ops pci_sun4u_iommu_ops,
- pci_sun4v_iommu_ops;
-
/* Find each controller in the system, attach and initialize
* software state structure for each and link into the
* pci_pbm_root. Setup the controller enough such
@@ -296,11 +290,6 @@ extern const struct pci_iommu_ops pci_sun4u_iommu_ops,
*/
static void __init pci_controller_probe(void)
{
- if (tlb_type == hypervisor)
- pci_iommu_ops = &pci_sun4v_iommu_ops;
- else
- pci_iommu_ops = &pci_sun4u_iommu_ops;
-
printk("PCI: Probing for controllers.\n");
pci_controller_scan(pci_controller_init);
@@ -404,7 +393,10 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
sd->host_controller = pbm;
sd->prom_node = node;
sd->op = of_find_device_by_node(node);
- sd->msi_num = 0xffffffff;
+
+ sd = &sd->op->dev.archdata;
+ sd->iommu = pbm->iommu;
+ sd->stc = &pbm->stc;
type = of_get_property(node, "device_type", NULL);
if (type == NULL)
@@ -453,6 +445,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
*/
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
dev->class = class >> 8;
+ dev->revision = class & 0xff;
sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -751,7 +744,7 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
{
struct device_node *child;
const u32 *reg;
- int reglen, devfn;
+ int reglen, devfn, prev_devfn;
struct pci_dev *dev;
if (ofpci_verbose)
@@ -759,14 +752,25 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
node->full_name, bus->number);
child = NULL;
+ prev_devfn = -1;
while ((child = of_get_next_child(node, child)) != NULL) {
if (ofpci_verbose)
printk(" * %s\n", child->full_name);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
+
devfn = (reg[0] >> 8) & 0xff;
+ /* This is a workaround for some device trees
+ * which list PCI devices twice. On the V100
+ * for example, device number 3 is listed twice.
+ * Once as "pm" and once again as "lomp".
+ */
+ if (devfn == prev_devfn)
+ continue;
+ prev_devfn = devfn;
+
/* create a new pci_dev for this device */
dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
if (!dev)
@@ -1225,4 +1229,51 @@ struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
}
EXPORT_SYMBOL(pci_device_to_OF_node);
+static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
+{
+ struct pci_dev *ali_isa_bridge;
+ u8 val;
+
+ /* ALI sound chips generate 31-bits of DMA, a special register
+ * determines what bit 31 is emitted as.
+ */
+ ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
+ PCI_DEVICE_ID_AL_M1533,
+ NULL);
+
+ pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
+ if (set_bit)
+ val |= 0x01;
+ else
+ val &= ~0x01;
+ pci_write_config_byte(ali_isa_bridge, 0x7e, val);
+ pci_dev_put(ali_isa_bridge);
+}
+
+int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
+{
+ u64 dma_addr_mask;
+
+ if (pdev == NULL) {
+ dma_addr_mask = 0xffffffff;
+ } else {
+ struct iommu *iommu = pdev->dev.archdata.iommu;
+
+ dma_addr_mask = iommu->dma_addr_mask;
+
+ if (pdev->vendor == PCI_VENDOR_ID_AL &&
+ pdev->device == PCI_DEVICE_ID_AL_M5451 &&
+ device_mask == 0x7fffffff) {
+ ali_sound_dma_hack(pdev,
+ (dma_addr_mask & 0x80000000) != 0);
+ return 1;
+ }
+ }
+
+ if (device_mask >= (1UL << 32UL))
+ return 0;
+
+ return (device_mask & dma_addr_mask) == dma_addr_mask;
+}
+
#endif /* !(CONFIG_PCI) */
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
index 2f61c4b12596..c76bfbb7da08 100644
--- a/arch/sparc64/kernel/pci_common.c
+++ b/arch/sparc64/kernel/pci_common.c
@@ -264,7 +264,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned int func = PCI_FUNC(devfn);
unsigned long ret;
- if (bus_dev == pbm->pci_bus && devfn == 0x00)
+ if (!bus && devfn == 0x00)
return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
size, value);
if (config_out_of_range(pbm, bus, devfn, where)) {
@@ -300,7 +300,7 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned int func = PCI_FUNC(devfn);
unsigned long ret;
- if (bus_dev == pbm->pci_bus && devfn == 0x00)
+ if (!bus && devfn == 0x00)
return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
size, value);
if (config_out_of_range(pbm, bus, devfn, where)) {
diff --git a/arch/sparc64/kernel/pci_fire.c b/arch/sparc64/kernel/pci_fire.c
index 7f5d473901c4..14d67fe21ab2 100644
--- a/arch/sparc64/kernel/pci_fire.c
+++ b/arch/sparc64/kernel/pci_fire.c
@@ -39,12 +39,12 @@ static void pci_fire_scan_bus(struct pci_pbm_info *pbm)
#define FIRE_IOMMU_FLUSH 0x40100UL
#define FIRE_IOMMU_FLUSHINV 0x40108UL
-static void pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
+static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
u32 vdma[2], dma_mask;
u64 control;
- int tsbsize;
+ int tsbsize, err;
/* No virtual-dma property on these guys, use largest size. */
vdma[0] = 0xc0000000; /* base */
@@ -68,7 +68,9 @@ static void pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
*/
fire_write(iommu->iommu_flushinv, ~(u64)0);
- pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
+ err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
+ if (err)
+ return err;
fire_write(iommu->iommu_tsbbase, __pa(iommu->page_table) | 0x7UL);
@@ -78,6 +80,8 @@ static void pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
0x00000002 /* Bypass enable */ |
0x00000001 /* Translation enable */);
fire_write(iommu->iommu_control, control);
+
+ return 0;
}
/* Based at pbm->controller_regs */
@@ -167,8 +171,8 @@ static void pci_fire_hw_init(struct pci_pbm_info *pbm)
fire_write(pbm->pbm_regs + FIRE_PEC_IENAB, ~(u64)0);
}
-static void pci_fire_pbm_init(struct pci_controller_info *p,
- struct device_node *dp, u32 portid)
+static int pci_fire_pbm_init(struct pci_controller_info *p,
+ struct device_node *dp, u32 portid)
{
const struct linux_prom64_registers *regs;
struct pci_pbm_info *pbm;
@@ -203,7 +207,8 @@ static void pci_fire_pbm_init(struct pci_controller_info *p,
pci_get_pbm_props(pbm);
pci_fire_hw_init(pbm);
- pci_fire_pbm_iommu_init(pbm);
+
+ return pci_fire_pbm_iommu_init(pbm);
}
static inline int portid_compare(u32 x, u32 y)
@@ -222,7 +227,8 @@ void fire_pci_init(struct device_node *dp, const char *model_name)
for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
if (portid_compare(pbm->portid, portid)) {
- pci_fire_pbm_init(pbm->parent, dp, portid);
+ if (pci_fire_pbm_init(pbm->parent, dp, portid))
+ goto fatal_memory_error;
return;
}
}
@@ -250,7 +256,9 @@ void fire_pci_init(struct device_node *dp, const char *model_name)
*/
pci_memspace_mask = 0x7fffffffUL;
- pci_fire_pbm_init(p, dp, portid);
+ if (pci_fire_pbm_init(p, dp, portid))
+ goto fatal_memory_error;
+
return;
fatal_memory_error:
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
deleted file mode 100644
index 70d2364fdfe0..000000000000
--- a/arch/sparc64/kernel/pci_iommu.c
+++ /dev/null
@@ -1,823 +0,0 @@
-/* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
- *
- * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
-#include <asm/oplib.h>
-
-#include "iommu_common.h"
-#include "pci_impl.h"
-
-#define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
- ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
-
-/* Accessing IOMMU and Streaming Buffer registers.
- * REG parameter is a physical address. All registers
- * are 64-bits in size.
- */
-#define pci_iommu_read(__reg) \
-({ u64 __ret; \
- __asm__ __volatile__("ldxa [%1] %2, %0" \
- : "=r" (__ret) \
- : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
- : "memory"); \
- __ret; \
-})
-#define pci_iommu_write(__reg, __val) \
- __asm__ __volatile__("stxa %0, [%1] %2" \
- : /* no outputs */ \
- : "r" (__val), "r" (__reg), \
- "i" (ASI_PHYS_BYPASS_EC_E))
-
-/* Must be invoked under the IOMMU lock. */
-static void __iommu_flushall(struct iommu *iommu)
-{
- if (iommu->iommu_flushinv) {
- pci_iommu_write(iommu->iommu_flushinv, ~(u64)0);
- } else {
- unsigned long tag;
- int entry;
-
- tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
- for (entry = 0; entry < 16; entry++) {
- pci_iommu_write(tag, 0);
- tag += 8;
- }
-
- /* Ensure completion of previous PIO writes. */
- (void) pci_iommu_read(iommu->write_complete_reg);
- }
-}
-
-#define IOPTE_CONSISTENT(CTX) \
- (IOPTE_VALID | IOPTE_CACHE | \
- (((CTX) << 47) & IOPTE_CONTEXT))
-
-#define IOPTE_STREAMING(CTX) \
- (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
-
-/* Existing mappings are never marked invalid, instead they
- * are pointed to a dummy page.
- */
-#define IOPTE_IS_DUMMY(iommu, iopte) \
- ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
-
-static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
-{
- unsigned long val = iopte_val(*iopte);
-
- val &= ~IOPTE_PAGE;
- val |= iommu->dummy_page_pa;
-
- iopte_val(*iopte) = val;
-}
-
-/* Based largely upon the ppc64 iommu allocator. */
-static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
-{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long n, i, start, end, limit;
- int pass;
-
- limit = arena->limit;
- start = arena->hint;
- pass = 0;
-
-again:
- n = find_next_zero_bit(arena->map, limit, start);
- end = n + npages;
- if (unlikely(end >= limit)) {
- if (likely(pass < 1)) {
- limit = start;
- start = 0;
- __iommu_flushall(iommu);
- pass++;
- goto again;
- } else {
- /* Scanned the whole thing, give up. */
- return -1;
- }
- }
-
- for (i = n; i < end; i++) {
- if (test_bit(i, arena->map)) {
- start = i + 1;
- goto again;
- }
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, arena->map);
-
- arena->hint = end;
-
- return n;
-}
-
-static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
-{
- unsigned long i;
-
- for (i = base; i < (base + npages); i++)
- __clear_bit(i, arena->map);
-}
-
-void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
-{
- unsigned long i, tsbbase, order, sz, num_tsb_entries;
-
- num_tsb_entries = tsbsize / sizeof(iopte_t);
-
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->ctx_lowest_free = 1;
- iommu->page_table_map_base = dma_offset;
- iommu->dma_addr_mask = dma_addr_mask;
-
- /* Allocate and initialize the free area map. */
- sz = num_tsb_entries / 8;
- sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
- prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
- prom_halt();
- }
- iommu->arena.limit = num_tsb_entries;
-
- /* Allocate and initialize the dummy page which we
- * set inactive IO PTEs to point to.
- */
- iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
- if (!iommu->dummy_page) {
- prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
- prom_halt();
- }
- memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
- iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
-
- /* Now allocate and setup the IOMMU page table itself. */
- order = get_order(tsbsize);
- tsbbase = __get_free_pages(GFP_KERNEL, order);
- if (!tsbbase) {
- prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
- prom_halt();
- }
- iommu->page_table = (iopte_t *)tsbbase;
-
- for (i = 0; i < num_tsb_entries; i++)
- iopte_make_dummy(iommu, &iommu->page_table[i]);
-}
-
-static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
-{
- long entry;
-
- entry = pci_arena_alloc(iommu, npages);
- if (unlikely(entry < 0))
- return NULL;
-
- return iommu->page_table + entry;
-}
-
-static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
-{
- pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
-}
-
-static int iommu_alloc_ctx(struct iommu *iommu)
-{
- int lowest = iommu->ctx_lowest_free;
- int sz = IOMMU_NUM_CTXS - lowest;
- int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
-
- if (unlikely(n == sz)) {
- n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
- if (unlikely(n == lowest)) {
- printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
- n = 0;
- }
- }
- if (n)
- __set_bit(n, iommu->ctx_bitmap);
-
- return n;
-}
-
-static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
-{
- if (likely(ctx)) {
- __clear_bit(ctx, iommu->ctx_bitmap);
- if (ctx < iommu->ctx_lowest_free)
- iommu->ctx_lowest_free = ctx;
- }
-}
-
-/* Allocate and map kernel buffer of size SIZE using consistent mode
- * DMA for PCI device PDEV. Return non-NULL cpu-side address if
- * successful and set *DMA_ADDRP to the PCI side dma address.
- */
-static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
-{
- struct iommu *iommu;
- iopte_t *iopte;
- unsigned long flags, order, first_page;
- void *ret;
- int npages;
-
- size = IO_PAGE_ALIGN(size);
- order = get_order(size);
- if (order >= 10)
- return NULL;
-
- first_page = __get_free_pages(gfp, order);
- if (first_page == 0UL)
- return NULL;
- memset((char *)first_page, 0, PAGE_SIZE << order);
-
- iommu = pdev->dev.archdata.iommu;
-
- spin_lock_irqsave(&iommu->lock, flags);
- iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- if (unlikely(iopte == NULL)) {
- free_pages(first_page, order);
- return NULL;
- }
-
- *dma_addrp = (iommu->page_table_map_base +
- ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
- ret = (void *) first_page;
- npages = size >> IO_PAGE_SHIFT;
- first_page = __pa(first_page);
- while (npages--) {
- iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
- IOPTE_WRITE |
- (first_page & IOPTE_PAGE));
- iopte++;
- first_page += IO_PAGE_SIZE;
- }
-
- return ret;
-}
-
-/* Free and unmap a consistent DMA translation. */
-static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
-{
- struct iommu *iommu;
- iopte_t *iopte;
- unsigned long flags, order, npages;
-
- npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
- iommu = pdev->dev.archdata.iommu;
- iopte = iommu->page_table +
- ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- free_npages(iommu, dvma - iommu->page_table_map_base, npages);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- order = get_order(size);
- if (order < 10)
- free_pages((unsigned long)cpu, order);
-}
-
-/* Map a single buffer at PTR of SZ bytes for PCI DMA
- * in streaming mode.
- */
-static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
-{
- struct iommu *iommu;
- struct strbuf *strbuf;
- iopte_t *base;
- unsigned long flags, npages, oaddr;
- unsigned long i, base_paddr, ctx;
- u32 bus_addr, ret;
- unsigned long iopte_protection;
-
- iommu = pdev->dev.archdata.iommu;
- strbuf = pdev->dev.archdata.stc;
-
- if (unlikely(direction == PCI_DMA_NONE))
- goto bad_no_ctx;
-
- oaddr = (unsigned long)ptr;
- npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
- npages >>= IO_PAGE_SHIFT;
-
- spin_lock_irqsave(&iommu->lock, flags);
- base = alloc_npages(iommu, npages);
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = iommu_alloc_ctx(iommu);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- if (unlikely(!base))
- goto bad;
-
- bus_addr = (iommu->page_table_map_base +
- ((base - iommu->page_table) << IO_PAGE_SHIFT));
- ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
- base_paddr = __pa(oaddr & IO_PAGE_MASK);
- if (strbuf->strbuf_enabled)
- iopte_protection = IOPTE_STREAMING(ctx);
- else
- iopte_protection = IOPTE_CONSISTENT(ctx);
- if (direction != PCI_DMA_TODEVICE)
- iopte_protection |= IOPTE_WRITE;
-
- for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
- iopte_val(*base) = iopte_protection | base_paddr;
-
- return ret;
-
-bad:
- iommu_free_ctx(iommu, ctx);
-bad_no_ctx:
- if (printk_ratelimit())
- WARN_ON(1);
- return PCI_DMA_ERROR_CODE;
-}
-
-static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
-{
- int limit;
-
- if (strbuf->strbuf_ctxflush &&
- iommu->iommu_ctxflush) {
- unsigned long matchreg, flushreg;
- u64 val;
-
- flushreg = strbuf->strbuf_ctxflush;
- matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
-
- pci_iommu_write(flushreg, ctx);
- val = pci_iommu_read(matchreg);
- val &= 0xffff;
- if (!val)
- goto do_flush_sync;
-
- while (val) {
- if (val & 0x1)
- pci_iommu_write(flushreg, ctx);
- val >>= 1;
- }
- val = pci_iommu_read(matchreg);
- if (unlikely(val)) {
- printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
- "timeout matchreg[%lx] ctx[%lx]\n",
- val, ctx);
- goto do_page_flush;
- }
- } else {
- unsigned long i;
-
- do_page_flush:
- for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
- pci_iommu_write(strbuf->strbuf_pflush, vaddr);
- }
-
-do_flush_sync:
- /* If the device could not have possibly put dirty data into
- * the streaming cache, no flush-flag synchronization needs
- * to be performed.
- */
- if (direction == PCI_DMA_TODEVICE)
- return;
-
- PCI_STC_FLUSHFLAG_INIT(strbuf);
- pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
- (void) pci_iommu_read(iommu->write_complete_reg);
-
- limit = 100000;
- while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
- limit--;
- if (!limit)
- break;
- udelay(1);
- rmb();
- }
- if (!limit)
- printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
- "vaddr[%08x] ctx[%lx] npages[%ld]\n",
- vaddr, ctx, npages);
-}
-
-/* Unmap a single streaming mode DMA translation. */
-static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
-{
- struct iommu *iommu;
- struct strbuf *strbuf;
- iopte_t *base;
- unsigned long flags, npages, ctx, i;
-
- if (unlikely(direction == PCI_DMA_NONE)) {
- if (printk_ratelimit())
- WARN_ON(1);
- return;
- }
-
- iommu = pdev->dev.archdata.iommu;
- strbuf = pdev->dev.archdata.stc;
-
- npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
- npages >>= IO_PAGE_SHIFT;
- base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-#ifdef DEBUG_PCI_IOMMU
- if (IOPTE_IS_DUMMY(iommu, base))
- printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
- bus_addr, sz, __builtin_return_address(0));
-#endif
- bus_addr &= IO_PAGE_MASK;
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- /* Record the context, if any. */
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
-
- /* Step 1: Kick data out of streaming buffers if necessary. */
- if (strbuf->strbuf_enabled)
- pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
- npages, direction);
-
- /* Step 2: Clear out TSB entries. */
- for (i = 0; i < npages; i++)
- iopte_make_dummy(iommu, base + i);
-
- free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
-
- iommu_free_ctx(iommu, ctx);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-#define SG_ENT_PHYS_ADDRESS(SG) \
- (__pa(page_address((SG)->page)) + (SG)->offset)
-
-static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
- int nused, int nelems, unsigned long iopte_protection)
-{
- struct scatterlist *dma_sg = sg;
- struct scatterlist *sg_end = sg + nelems;
- int i;
-
- for (i = 0; i < nused; i++) {
- unsigned long pteval = ~0UL;
- u32 dma_npages;
-
- dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
- dma_sg->dma_length +
- ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
- do {
- unsigned long offset;
- signed int len;
-
- /* If we are here, we know we have at least one
- * more page to map. So walk forward until we
- * hit a page crossing, and begin creating new
- * mappings from that spot.
- */
- for (;;) {
- unsigned long tmp;
-
- tmp = SG_ENT_PHYS_ADDRESS(sg);
- len = sg->length;
- if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = tmp & IO_PAGE_MASK;
- offset = tmp & (IO_PAGE_SIZE - 1UL);
- break;
- }
- if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
- offset = 0UL;
- len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
- break;
- }
- sg++;
- }
-
- pteval = iopte_protection | (pteval & IOPTE_PAGE);
- while (len > 0) {
- *iopte++ = __iopte(pteval);
- pteval += IO_PAGE_SIZE;
- len -= (IO_PAGE_SIZE - offset);
- offset = 0;
- dma_npages--;
- }
-
- pteval = (pteval & IOPTE_PAGE) + len;
- sg++;
-
- /* Skip over any tail mappings we've fully mapped,
- * adjusting pteval along the way. Stop when we
- * detect a page crossing event.
- */
- while (sg < sg_end &&
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
- (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
- ((pteval ^
- (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
- pteval += sg->length;
- sg++;
- }
- if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
- pteval = ~0UL;
- } while (dma_npages != 0);
- dma_sg++;
- }
-}
-
-/* Map a set of buffers described by SGLIST with NELEMS array
- * elements in streaming mode for PCI DMA.
- * When making changes here, inspect the assembly output. I was having
- * hard time to keep this routine out of using stack slots for holding variables.
- */
-static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
-{
- struct iommu *iommu;
- struct strbuf *strbuf;
- unsigned long flags, ctx, npages, iopte_protection;
- iopte_t *base;
- u32 dma_base;
- struct scatterlist *sgtmp;
- int used;
-
- /* Fast path single entry scatterlists. */
- if (nelems == 1) {
- sglist->dma_address =
- pci_4u_map_single(pdev,
- (page_address(sglist->page) + sglist->offset),
- sglist->length, direction);
- if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
- return 0;
- sglist->dma_length = sglist->length;
- return 1;
- }
-
- iommu = pdev->dev.archdata.iommu;
- strbuf = pdev->dev.archdata.stc;
-
- if (unlikely(direction == PCI_DMA_NONE))
- goto bad_no_ctx;
-
- /* Step 1: Prepare scatter list. */
-
- npages = prepare_sg(sglist, nelems);
-
- /* Step 2: Allocate a cluster and context, if necessary. */
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- base = alloc_npages(iommu, npages);
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = iommu_alloc_ctx(iommu);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- if (base == NULL)
- goto bad;
-
- dma_base = iommu->page_table_map_base +
- ((base - iommu->page_table) << IO_PAGE_SHIFT);
-
- /* Step 3: Normalize DMA addresses. */
- used = nelems;
-
- sgtmp = sglist;
- while (used && sgtmp->dma_length) {
- sgtmp->dma_address += dma_base;
- sgtmp++;
- used--;
- }
- used = nelems - used;
-
- /* Step 4: Create the mappings. */
- if (strbuf->strbuf_enabled)
- iopte_protection = IOPTE_STREAMING(ctx);
- else
- iopte_protection = IOPTE_CONSISTENT(ctx);
- if (direction != PCI_DMA_TODEVICE)
- iopte_protection |= IOPTE_WRITE;
-
- fill_sg(base, sglist, used, nelems, iopte_protection);
-
-#ifdef VERIFY_SG
- verify_sglist(sglist, nelems, base, npages);
-#endif
-
- return used;
-
-bad:
- iommu_free_ctx(iommu, ctx);
-bad_no_ctx:
- if (printk_ratelimit())
- WARN_ON(1);
- return 0;
-}
-
-/* Unmap a set of streaming mode DMA translations. */
-static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
-{
- struct iommu *iommu;
- struct strbuf *strbuf;
- iopte_t *base;
- unsigned long flags, ctx, i, npages;
- u32 bus_addr;
-
- if (unlikely(direction == PCI_DMA_NONE)) {
- if (printk_ratelimit())
- WARN_ON(1);
- }
-
- iommu = pdev->dev.archdata.iommu;
- strbuf = pdev->dev.archdata.stc;
-
- bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
- for (i = 1; i < nelems; i++)
- if (sglist[i].dma_length == 0)
- break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
- bus_addr) >> IO_PAGE_SHIFT;
-
- base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
-#ifdef DEBUG_PCI_IOMMU
- if (IOPTE_IS_DUMMY(iommu, base))
- printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
-#endif
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- /* Record the context, if any. */
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
-
- /* Step 1: Kick data out of streaming buffers if necessary. */
- if (strbuf->strbuf_enabled)
- pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
-
- /* Step 2: Clear out the TSB entries. */
- for (i = 0; i < npages; i++)
- iopte_make_dummy(iommu, base + i);
-
- free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
-
- iommu_free_ctx(iommu, ctx);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- */
-static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
-{
- struct iommu *iommu;
- struct strbuf *strbuf;
- unsigned long flags, ctx, npages;
-
- iommu = pdev->dev.archdata.iommu;
- strbuf = pdev->dev.archdata.stc;
-
- if (!strbuf->strbuf_enabled)
- return;
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
- npages >>= IO_PAGE_SHIFT;
- bus_addr &= IO_PAGE_MASK;
-
- /* Step 1: Record the context, if any. */
- ctx = 0;
- if (iommu->iommu_ctxflush &&
- strbuf->strbuf_ctxflush) {
- iopte_t *iopte;
-
- iopte = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
- ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
- }
-
- /* Step 2: Kick data out of streaming buffers. */
- pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- */
-static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
-{
- struct iommu *iommu;
- struct strbuf *strbuf;
- unsigned long flags, ctx, npages, i;
- u32 bus_addr;
-
- iommu = pdev->dev.archdata.iommu;
- strbuf = pdev->dev.archdata.stc;
-
- if (!strbuf->strbuf_enabled)
- return;
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- /* Step 1: Record the context, if any. */
- ctx = 0;
- if (iommu->iommu_ctxflush &&
- strbuf->strbuf_ctxflush) {
- iopte_t *iopte;
-
- iopte = iommu->page_table +
- ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
- ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
- }
-
- /* Step 2: Kick data out of streaming buffers. */
- bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
- for(i = 1; i < nelems; i++)
- if (!sglist[i].dma_length)
- break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
- - bus_addr) >> IO_PAGE_SHIFT;
- pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-const struct pci_iommu_ops pci_sun4u_iommu_ops = {
- .alloc_consistent = pci_4u_alloc_consistent,
- .free_consistent = pci_4u_free_consistent,
- .map_single = pci_4u_map_single,
- .unmap_single = pci_4u_unmap_single,
- .map_sg = pci_4u_map_sg,
- .unmap_sg = pci_4u_unmap_sg,
- .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu,
- .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu,
-};
-
-static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
-{
- struct pci_dev *ali_isa_bridge;
- u8 val;
-
- /* ALI sound chips generate 31-bits of DMA, a special register
- * determines what bit 31 is emitted as.
- */
- ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
- PCI_DEVICE_ID_AL_M1533,
- NULL);
-
- pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
- if (set_bit)
- val |= 0x01;
- else
- val &= ~0x01;
- pci_write_config_byte(ali_isa_bridge, 0x7e, val);
- pci_dev_put(ali_isa_bridge);
-}
-
-int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
-{
- u64 dma_addr_mask;
-
- if (pdev == NULL) {
- dma_addr_mask = 0xffffffff;
- } else {
- struct iommu *iommu = pdev->dev.archdata.iommu;
-
- dma_addr_mask = iommu->dma_addr_mask;
-
- if (pdev->vendor == PCI_VENDOR_ID_AL &&
- pdev->device == PCI_DEVICE_ID_AL_M5451 &&
- device_mask == 0x7fffffff) {
- ali_sound_dma_hack(pdev,
- (dma_addr_mask & 0x80000000) != 0);
- return 1;
- }
- }
-
- if (device_mask >= (1UL << 32UL))
- return 0;
-
- return (device_mask & dma_addr_mask) == dma_addr_mask;
-}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 598393a2df16..b6b4cfea5b5f 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -813,16 +813,19 @@ static void psycho_scan_bus(struct pci_pbm_info *pbm)
psycho_register_error_handlers(pbm);
}
-static void psycho_iommu_init(struct pci_pbm_info *pbm)
+static int psycho_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
unsigned long i;
u64 control;
+ int err;
/* Register addresses. */
iommu->iommu_control = pbm->controller_regs + PSYCHO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->controller_regs + PSYCHO_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->controller_regs + PSYCHO_IOMMU_FLUSH;
+ iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
+
/* PSYCHO's IOMMU lacks ctx flushing. */
iommu->iommu_ctxflush = 0;
@@ -845,7 +848,9 @@ static void psycho_iommu_init(struct pci_pbm_info *pbm)
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
- pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
+ err = iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
+ if (err)
+ return err;
psycho_write(pbm->controller_regs + PSYCHO_IOMMU_TSBBASE,
__pa(iommu->page_table));
@@ -858,6 +863,8 @@ static void psycho_iommu_init(struct pci_pbm_info *pbm)
/* If necessary, hook us up for starfire IRQ translations. */
if (this_is_starfire)
starfire_hookup(pbm->portid);
+
+ return 0;
}
#define PSYCHO_IRQ_RETRY 0x1a00UL
@@ -1031,15 +1038,12 @@ void psycho_init(struct device_node *dp, char *model_name)
}
p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
- if (!p) {
- prom_printf("PSYCHO: Fatal memory allocation error.\n");
- prom_halt();
- }
+ if (!p)
+ goto fatal_memory_error;
iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
- if (!iommu) {
- prom_printf("PSYCHO: Fatal memory allocation error.\n");
- prom_halt();
- }
+ if (!iommu)
+ goto fatal_memory_error;
+
p->pbm_A.iommu = p->pbm_B.iommu = iommu;
p->pbm_A.portid = upa_portid;
@@ -1062,8 +1066,14 @@ void psycho_init(struct device_node *dp, char *model_name)
psycho_controller_hwinit(&p->pbm_A);
- psycho_iommu_init(&p->pbm_A);
+ if (psycho_iommu_init(&p->pbm_A))
+ goto fatal_memory_error;
is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
psycho_pbm_init(p, dp, is_pbm_a);
+ return;
+
+fatal_memory_error:
+ prom_printf("PSYCHO: Fatal memory allocation error.\n");
+ prom_halt();
}
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 22e1be5c7489..fba67c3d8809 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -672,18 +672,20 @@ static void sabre_scan_bus(struct pci_pbm_info *pbm)
sabre_register_error_handlers(pbm);
}
-static void sabre_iommu_init(struct pci_pbm_info *pbm,
- int tsbsize, unsigned long dvma_offset,
- u32 dma_mask)
+static int sabre_iommu_init(struct pci_pbm_info *pbm,
+ int tsbsize, unsigned long dvma_offset,
+ u32 dma_mask)
{
struct iommu *iommu = pbm->iommu;
unsigned long i;
u64 control;
+ int err;
/* Register addresses. */
iommu->iommu_control = pbm->controller_regs + SABRE_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->controller_regs + SABRE_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->controller_regs + SABRE_IOMMU_FLUSH;
+ iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
iommu->write_complete_reg = pbm->controller_regs + SABRE_WRSYNC;
/* Sabre's IOMMU lacks ctx flushing. */
iommu->iommu_ctxflush = 0;
@@ -701,7 +703,10 @@ static void sabre_iommu_init(struct pci_pbm_info *pbm,
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
- pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask);
+ err = iommu_table_init(iommu, tsbsize * 1024 * 8,
+ dvma_offset, dma_mask);
+ if (err)
+ return err;
sabre_write(pbm->controller_regs + SABRE_IOMMU_TSBBASE,
__pa(iommu->page_table));
@@ -722,6 +727,8 @@ static void sabre_iommu_init(struct pci_pbm_info *pbm,
break;
}
sabre_write(pbm->controller_regs + SABRE_IOMMU_CONTROL, control);
+
+ return 0;
}
static void sabre_pbm_init(struct pci_controller_info *p, struct pci_pbm_info *pbm, struct device_node *dp)
@@ -775,16 +782,12 @@ void sabre_init(struct device_node *dp, char *model_name)
}
p = kzalloc(sizeof(*p), GFP_ATOMIC);
- if (!p) {
- prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n");
- prom_halt();
- }
+ if (!p)
+ goto fatal_memory_error;
iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
- if (!iommu) {
- prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
- prom_halt();
- }
+ if (!iommu)
+ goto fatal_memory_error;
pbm = &p->pbm_A;
pbm->iommu = iommu;
@@ -847,10 +850,16 @@ void sabre_init(struct device_node *dp, char *model_name)
prom_halt();
}
- sabre_iommu_init(pbm, tsbsize, vdma[0], dma_mask);
+ if (sabre_iommu_init(pbm, tsbsize, vdma[0], dma_mask))
+ goto fatal_memory_error;
/*
* Look for APB underneath.
*/
sabre_pbm_init(p, pbm, dp);
+ return;
+
+fatal_memory_error:
+ prom_printf("SABRE: Fatal memory allocation error.\n");
+ prom_halt();
}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index ae76898bbe2b..3c30bfa1f3a3 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -1148,14 +1148,14 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
#define SCHIZO_IOMMU_FLUSH (0x00210UL)
#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL)
-static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
+static int schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
unsigned long i, tagbase, database;
struct property *prop;
u32 vdma[2], dma_mask;
+ int tsbsize, err;
u64 control;
- int tsbsize;
prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
if (prop) {
@@ -1195,6 +1195,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
+ iommu->iommu_tags = iommu->iommu_flush + (0xa580UL - 0x0210UL);
iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
/* We use the main control/status register of SCHIZO as the write
@@ -1219,7 +1220,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
- pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
+ err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
+ if (err)
+ return err;
schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
@@ -1236,6 +1239,8 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
control |= SCHIZO_IOMMU_CTRL_ENAB;
schizo_write(iommu->iommu_control, control);
+
+ return 0;
}
#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL)
@@ -1328,14 +1333,14 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
}
}
-static void schizo_pbm_init(struct pci_controller_info *p,
- struct device_node *dp, u32 portid,
- int chip_type)
+static int schizo_pbm_init(struct pci_controller_info *p,
+ struct device_node *dp, u32 portid,
+ int chip_type)
{
const struct linux_prom64_registers *regs;
struct pci_pbm_info *pbm;
const char *chipset_name;
- int is_pbm_a;
+ int is_pbm_a, err;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
@@ -1406,8 +1411,13 @@ static void schizo_pbm_init(struct pci_controller_info *p,
pci_get_pbm_props(pbm);
- schizo_pbm_iommu_init(pbm);
+ err = schizo_pbm_iommu_init(pbm);
+ if (err)
+ return err;
+
schizo_pbm_strbuf_init(pbm);
+
+ return 0;
}
static inline int portid_compare(u32 x, u32 y, int chip_type)
@@ -1431,34 +1441,38 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
if (portid_compare(pbm->portid, portid, chip_type)) {
- schizo_pbm_init(pbm->parent, dp, portid, chip_type);
+ if (schizo_pbm_init(pbm->parent, dp,
+ portid, chip_type))
+ goto fatal_memory_error;
return;
}
}
p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
if (!p)
- goto memfail;
+ goto fatal_memory_error;
iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu)
- goto memfail;
+ goto fatal_memory_error;
p->pbm_A.iommu = iommu;
iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu)
- goto memfail;
+ goto fatal_memory_error;
p->pbm_B.iommu = iommu;
/* Like PSYCHO we have a 2GB aligned area for memory space. */
pci_memspace_mask = 0x7fffffffUL;
- schizo_pbm_init(p, dp, portid, chip_type);
+ if (schizo_pbm_init(p, dp, portid, chip_type))
+ goto fatal_memory_error;
+
return;
-memfail:
+fatal_memory_error:
prom_printf("SCHIZO: Fatal memory allocation error.\n");
prom_halt();
}
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 6b3fe2c1d65e..da724b13e89e 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -33,30 +33,30 @@ static unsigned long vpci_minor = 1;
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct iommu_batch {
- struct pci_dev *pdev; /* Device mapping is for. */
+ struct device *dev; /* Device mapping is for. */
unsigned long prot; /* IOMMU page protections */
unsigned long entry; /* Index into IOTSB. */
u64 *pglist; /* List of physical pages */
unsigned long npages; /* Number of pages in list. */
};
-static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch);
+static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
/* Interrupts must be disabled. */
-static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
+static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
{
- struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+ struct iommu_batch *p = &__get_cpu_var(iommu_batch);
- p->pdev = pdev;
+ p->dev = dev;
p->prot = prot;
p->entry = entry;
p->npages = 0;
}
/* Interrupts must be disabled. */
-static long pci_iommu_batch_flush(struct iommu_batch *p)
+static long iommu_batch_flush(struct iommu_batch *p)
{
- struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller;
+ struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
unsigned long devhandle = pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
@@ -70,7 +70,7 @@ static long pci_iommu_batch_flush(struct iommu_batch *p)
npages, prot, __pa(pglist));
if (unlikely(num < 0)) {
if (printk_ratelimit())
- printk("pci_iommu_batch_flush: IOMMU map of "
+ printk("iommu_batch_flush: IOMMU map of "
"[%08lx:%08lx:%lx:%lx:%lx] failed with "
"status %ld\n",
devhandle, HV_PCI_TSBID(0, entry),
@@ -90,30 +90,30 @@ static long pci_iommu_batch_flush(struct iommu_batch *p)
}
/* Interrupts must be disabled. */
-static inline long pci_iommu_batch_add(u64 phys_page)
+static inline long iommu_batch_add(u64 phys_page)
{
- struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+ struct iommu_batch *p = &__get_cpu_var(iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
p->pglist[p->npages++] = phys_page;
if (p->npages == PGLIST_NENTS)
- return pci_iommu_batch_flush(p);
+ return iommu_batch_flush(p);
return 0;
}
/* Interrupts must be disabled. */
-static inline long pci_iommu_batch_end(void)
+static inline long iommu_batch_end(void)
{
- struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
+ struct iommu_batch *p = &__get_cpu_var(iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
- return pci_iommu_batch_flush(p);
+ return iommu_batch_flush(p);
}
-static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages)
+static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
{
unsigned long n, i, start, end, limit;
int pass;
@@ -152,7 +152,8 @@ again:
return n;
}
-static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
+static void arena_free(struct iommu_arena *arena, unsigned long base,
+ unsigned long npages)
{
unsigned long i;
@@ -160,7 +161,8 @@ static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsign
__clear_bit(i, arena->map);
}
-static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
+static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addrp, gfp_t gfp)
{
struct iommu *iommu;
unsigned long flags, order, first_page, npages, n;
@@ -180,10 +182,10 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
memset((char *)first_page, 0, PAGE_SIZE << order);
- iommu = pdev->dev.archdata.iommu;
+ iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
- entry = pci_arena_alloc(&iommu->arena, npages);
+ entry = arena_alloc(&iommu->arena, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry < 0L))
@@ -196,18 +198,18 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
local_irq_save(flags);
- pci_iommu_batch_start(pdev,
- (HV_PCI_MAP_ATTR_READ |
- HV_PCI_MAP_ATTR_WRITE),
- entry);
+ iommu_batch_start(dev,
+ (HV_PCI_MAP_ATTR_READ |
+ HV_PCI_MAP_ATTR_WRITE),
+ entry);
for (n = 0; n < npages; n++) {
- long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
+ long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(pci_iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end() < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
@@ -217,7 +219,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
iommu_map_fail:
/* Interrupts are disabled. */
spin_lock(&iommu->lock);
- pci_arena_free(&iommu->arena, entry, npages);
+ arena_free(&iommu->arena, entry, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
arena_alloc_fail:
@@ -225,7 +227,8 @@ arena_alloc_fail:
return NULL;
}
-static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
+static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
+ dma_addr_t dvma)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -233,14 +236,14 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
- iommu = pdev->dev.archdata.iommu;
- pbm = pdev->dev.archdata.host_controller;
+ iommu = dev->archdata.iommu;
+ pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
- pci_arena_free(&iommu->arena, entry, npages);
+ arena_free(&iommu->arena, entry, npages);
do {
unsigned long num;
@@ -258,7 +261,8 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
+static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
+ enum dma_data_direction direction)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
@@ -267,9 +271,9 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
unsigned long prot;
long entry;
- iommu = pdev->dev.archdata.iommu;
+ iommu = dev->archdata.iommu;
- if (unlikely(direction == PCI_DMA_NONE))
+ if (unlikely(direction == DMA_NONE))
goto bad;
oaddr = (unsigned long)ptr;
@@ -277,7 +281,7 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
- entry = pci_arena_alloc(&iommu->arena, npages);
+ entry = arena_alloc(&iommu->arena, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry < 0L))
@@ -288,19 +292,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
- if (direction != PCI_DMA_TODEVICE)
+ if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
local_irq_save(flags);
- pci_iommu_batch_start(pdev, prot, entry);
+ iommu_batch_start(dev, prot, entry);
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
- long err = pci_iommu_batch_add(base_paddr);
+ long err = iommu_batch_add(base_paddr);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(pci_iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end() < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
@@ -310,18 +314,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
bad:
if (printk_ratelimit())
WARN_ON(1);
- return PCI_DMA_ERROR_CODE;
+ return DMA_ERROR_CODE;
iommu_map_fail:
/* Interrupts are disabled. */
spin_lock(&iommu->lock);
- pci_arena_free(&iommu->arena, entry, npages);
+ arena_free(&iommu->arena, entry, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
- return PCI_DMA_ERROR_CODE;
+ return DMA_ERROR_CODE;
}
-static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
+ size_t sz, enum dma_data_direction direction)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -329,14 +334,14 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
long entry;
u32 devhandle;
- if (unlikely(direction == PCI_DMA_NONE)) {
+ if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
- iommu = pdev->dev.archdata.iommu;
- pbm = pdev->dev.archdata.host_controller;
+ iommu = dev->archdata.iommu;
+ pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
@@ -346,7 +351,7 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
spin_lock_irqsave(&iommu->lock, flags);
entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- pci_arena_free(&iommu->arena, entry, npages);
+ arena_free(&iommu->arena, entry, npages);
do {
unsigned long num;
@@ -363,7 +368,7 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
#define SG_ENT_PHYS_ADDRESS(SG) \
(__pa(page_address((SG)->page)) + (SG)->offset)
-static inline long fill_sg(long entry, struct pci_dev *pdev,
+static inline long fill_sg(long entry, struct device *dev,
struct scatterlist *sg,
int nused, int nelems, unsigned long prot)
{
@@ -374,7 +379,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
local_irq_save(flags);
- pci_iommu_batch_start(pdev, prot, entry);
+ iommu_batch_start(dev, prot, entry);
for (i = 0; i < nused; i++) {
unsigned long pteval = ~0UL;
@@ -415,7 +420,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
while (len > 0) {
long err;
- err = pci_iommu_batch_add(pteval);
+ err = iommu_batch_add(pteval);
if (unlikely(err < 0L))
goto iommu_map_failed;
@@ -446,7 +451,7 @@ static inline long fill_sg(long entry, struct pci_dev *pdev,
dma_sg++;
}
- if (unlikely(pci_iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end() < 0L))
goto iommu_map_failed;
local_irq_restore(flags);
@@ -457,7 +462,8 @@ iommu_map_failed:
return -1L;
}
-static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction)
{
struct iommu *iommu;
unsigned long flags, npages, prot;
@@ -469,18 +475,19 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
/* Fast path single entry scatterlists. */
if (nelems == 1) {
sglist->dma_address =
- pci_4v_map_single(pdev,
- (page_address(sglist->page) + sglist->offset),
+ dma_4v_map_single(dev,
+ (page_address(sglist->page) +
+ sglist->offset),
sglist->length, direction);
- if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
+ if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
sglist->dma_length = sglist->length;
return 1;
}
- iommu = pdev->dev.archdata.iommu;
+ iommu = dev->archdata.iommu;
- if (unlikely(direction == PCI_DMA_NONE))
+ if (unlikely(direction == DMA_NONE))
goto bad;
/* Step 1: Prepare scatter list. */
@@ -488,7 +495,7 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
/* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags);
- entry = pci_arena_alloc(&iommu->arena, npages);
+ entry = arena_alloc(&iommu->arena, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry < 0L))
@@ -510,10 +517,10 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
/* Step 4: Create the mappings. */
prot = HV_PCI_MAP_ATTR_READ;
- if (direction != PCI_DMA_TODEVICE)
+ if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
- err = fill_sg(entry, pdev, sglist, used, nelems, prot);
+ err = fill_sg(entry, dev, sglist, used, nelems, prot);
if (unlikely(err < 0L))
goto iommu_map_failed;
@@ -526,13 +533,14 @@ bad:
iommu_map_failed:
spin_lock_irqsave(&iommu->lock, flags);
- pci_arena_free(&iommu->arena, entry, npages);
+ arena_free(&iommu->arena, entry, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
-static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -540,13 +548,13 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
long entry;
u32 devhandle, bus_addr;
- if (unlikely(direction == PCI_DMA_NONE)) {
+ if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
}
- iommu = pdev->dev.archdata.iommu;
- pbm = pdev->dev.archdata.host_controller;
+ iommu = dev->archdata.iommu;
+ pbm = dev->archdata.host_controller;
devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
@@ -562,7 +570,7 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
spin_lock_irqsave(&iommu->lock, flags);
- pci_arena_free(&iommu->arena, entry, npages);
+ arena_free(&iommu->arena, entry, npages);
do {
unsigned long num;
@@ -576,25 +584,29 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+static void dma_4v_sync_single_for_cpu(struct device *dev,
+ dma_addr_t bus_addr, size_t sz,
+ enum dma_data_direction direction)
{
/* Nothing to do... */
}
-static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+static void dma_4v_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
{
/* Nothing to do... */
}
-const struct pci_iommu_ops pci_sun4v_iommu_ops = {
- .alloc_consistent = pci_4v_alloc_consistent,
- .free_consistent = pci_4v_free_consistent,
- .map_single = pci_4v_map_single,
- .unmap_single = pci_4v_unmap_single,
- .map_sg = pci_4v_map_sg,
- .unmap_sg = pci_4v_unmap_sg,
- .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
- .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
+const struct dma_ops sun4v_dma_ops = {
+ .alloc_coherent = dma_4v_alloc_coherent,
+ .free_coherent = dma_4v_free_coherent,
+ .map_single = dma_4v_map_single,
+ .unmap_single = dma_4v_unmap_single,
+ .map_sg = dma_4v_map_sg,
+ .unmap_sg = dma_4v_unmap_sg,
+ .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
+ .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
};
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
@@ -928,13 +940,13 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
if (msi_num < 0)
return msi_num;
- devino = sun4v_build_msi(pbm->devhandle, virt_irq_p,
- pbm->msiq_first_devino,
- (pbm->msiq_first_devino +
- pbm->msiq_num));
- err = -ENOMEM;
- if (!devino)
+ err = sun4v_build_msi(pbm->devhandle, virt_irq_p,
+ pbm->msiq_first_devino,
+ (pbm->msiq_first_devino +
+ pbm->msiq_num));
+ if (err < 0)
goto out_err;
+ devino = err;
msiqid = ((devino - pbm->msiq_first_devino) +
pbm->msiq_first);
@@ -959,7 +971,7 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
goto out_err;
- pdev->dev.archdata.msi_num = msi_num;
+ sparc64_set_msi(*virt_irq_p, msi_num);
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
@@ -981,8 +993,6 @@ static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
out_err:
free_msi(pbm, msi_num);
- sun4v_destroy_msi(*virt_irq_p);
- *virt_irq_p = 0;
return err;
}
@@ -994,7 +1004,7 @@ static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
unsigned long msiqid, err;
unsigned int msi_num;
- msi_num = pdev->dev.archdata.msi_num;
+ msi_num = sparc64_get_msi(virt_irq);
err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
if (err) {
printk(KERN_ERR "%s: getmsiq gives error %lu\n",
@@ -1129,7 +1139,7 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
}
#endif /* !(CONFIG_PCI_MSI) */
-static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
+static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
{
struct pci_pbm_info *pbm;
@@ -1163,7 +1173,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
pci_sun4v_msi_init(pbm);
}
-void sun4v_pci_init(struct device_node *dp, char *model_name)
+void __init sun4v_pci_init(struct device_node *dp, char *model_name)
{
static int hvapi_negotiated = 0;
struct pci_controller_info *p;
@@ -1186,6 +1196,8 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
}
printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
vpci_major, vpci_minor);
+
+ dma_ops = &sun4v_dma_ops;
}
prop = of_find_property(dp, "reg", NULL);
@@ -1206,7 +1218,7 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
if (!page)
goto fatal_memory_error;
- per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
+ per_cpu(iommu_batch, i).pglist = (u64 *) page;
}
p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 5d6adea3967f..881a09ee4c4c 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -1,7 +1,6 @@
-/* $Id: power.c,v 1.10 2001/12/11 01:57:16 davem Exp $
- * power.c: Power management driver.
+/* power.c: Power management driver.
*
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
@@ -13,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/syscalls.h>
+#include <linux/reboot.h>
#include <asm/system.h>
#include <asm/auxio.h>
@@ -29,24 +29,15 @@
*/
int scons_pwroff = 1;
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
static void __iomem *power_reg;
-static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
-static int button_pressed;
-
static irqreturn_t power_handler(int irq, void *dev_id)
{
- if (button_pressed == 0) {
- button_pressed = 1;
- wake_up(&powerd_wait);
- }
+ orderly_poweroff(true);
/* FIXME: Check registers for status... */
return IRQ_HANDLED;
}
-#endif /* CONFIG_PCI */
extern void machine_halt(void);
extern void machine_alt_power_off(void);
@@ -55,20 +46,19 @@ static void (*poweroff_method)(void) = machine_alt_power_off;
void machine_power_off(void)
{
sstate_poweroff();
- if (!serial_console || scons_pwroff) {
-#ifdef CONFIG_PCI
+ if (strcmp(of_console_device->type, "serial") || scons_pwroff) {
if (power_reg) {
/* Both register bits seem to have the
* same effect, so until I figure out
* what the difference is...
*/
writel(AUXIO_PCIO_CPWR_OFF | AUXIO_PCIO_SPWR_OFF, power_reg);
- } else
-#endif /* CONFIG_PCI */
+ } else {
if (poweroff_method != NULL) {
poweroff_method();
/* not reached */
}
+ }
}
machine_halt();
}
@@ -76,40 +66,9 @@ void machine_power_off(void)
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
-#ifdef CONFIG_PCI
-static int powerd(void *__unused)
-{
- static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
- DECLARE_WAITQUEUE(wait, current);
-
- daemonize("powerd");
-
- add_wait_queue(&powerd_wait, &wait);
-again:
- for (;;) {
- set_task_state(current, TASK_INTERRUPTIBLE);
- if (button_pressed)
- break;
- flush_signals(current);
- schedule();
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&powerd_wait, &wait);
-
- /* Ok, down we go... */
- button_pressed = 0;
- if (kernel_execve("/sbin/shutdown", argv, envp) < 0) {
- printk("powerd: shutdown execution failed\n");
- add_wait_queue(&powerd_wait, &wait);
- goto again;
- }
- return 0;
-}
-
static int __init has_button_interrupt(unsigned int irq, struct device_node *dp)
{
- if (irq == PCI_IRQ_NONE)
+ if (irq == 0xffffffff)
return 0;
if (!of_find_property(dp, "button", NULL))
return 0;
@@ -124,23 +83,15 @@ static int __devinit power_probe(struct of_device *op, const struct of_device_id
power_reg = of_ioremap(res, 0, 0x4, "power");
- printk("%s: Control reg at %lx ... ",
+ printk(KERN_INFO "%s: Control reg at %lx\n",
op->node->name, res->start);
poweroff_method = machine_halt; /* able to use the standard halt */
if (has_button_interrupt(irq, op->node)) {
- if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
- printk("Failed to start power daemon.\n");
- return 0;
- }
- printk("powerd running.\n");
-
if (request_irq(irq,
power_handler, 0, "power", NULL) < 0)
- printk("power: Error, cannot register IRQ handler.\n");
- } else {
- printk("not using powerd.\n");
+ printk(KERN_ERR "power: Cannot setup IRQ handler.\n");
}
return 0;
@@ -161,7 +112,6 @@ static struct of_platform_driver power_driver = {
void __init power_init(void)
{
- of_register_driver(&power_driver, &of_bus_type);
+ of_register_driver(&power_driver, &of_platform_bus_type);
return;
}
-#endif /* CONFIG_PCI */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index f5f97e2c669c..ca7cdfd55f72 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/ptrace.h>
@@ -29,6 +30,7 @@
#include <linux/compat.h>
#include <linux/tick.h>
#include <linux/init.h>
+#include <linux/cpu.h>
#include <asm/oplib.h>
#include <asm/uaccess.h>
@@ -49,7 +51,7 @@
/* #define VERBOSE_SHOWREGS */
-static void sparc64_yield(void)
+static void sparc64_yield(int cpu)
{
if (tlb_type != hypervisor)
return;
@@ -57,7 +59,7 @@ static void sparc64_yield(void)
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
- while (!need_resched()) {
+ while (!need_resched() && !cpu_is_offline(cpu)) {
unsigned long pstate;
/* Disable interrupts. */
@@ -68,7 +70,7 @@ static void sparc64_yield(void)
: "=&r" (pstate)
: "i" (PSTATE_IE));
- if (!need_resched())
+ if (!need_resched() && !cpu_is_offline(cpu))
sun4v_cpu_yield();
/* Re-enable interrupts. */
@@ -86,15 +88,25 @@ static void sparc64_yield(void)
/* The idle loop on sparc64. */
void cpu_idle(void)
{
+ int cpu = smp_processor_id();
+
set_thread_flag(TIF_POLLING_NRFLAG);
while(1) {
tick_nohz_stop_sched_tick();
- while (!need_resched())
- sparc64_yield();
+
+ while (!need_resched() && !cpu_is_offline(cpu))
+ sparc64_yield(cpu);
+
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_is_offline(cpu))
+ cpu_play_dead();
+#endif
+
schedule();
preempt_disable();
}
@@ -108,7 +120,7 @@ extern void (*prom_keyboard)(void);
void machine_halt(void)
{
sstate_halt();
- if (!serial_console && prom_palette)
+ if (prom_palette)
prom_palette (1);
if (prom_keyboard)
prom_keyboard();
@@ -119,7 +131,7 @@ void machine_halt(void)
void machine_alt_power_off(void)
{
sstate_poweroff();
- if (!serial_console && prom_palette)
+ if (prom_palette)
prom_palette(1);
if (prom_keyboard)
prom_keyboard();
@@ -134,7 +146,7 @@ void machine_restart(char * cmd)
sstate_reboot();
p = strchr (reboot_command, '\n');
if (p) *p = 0;
- if (!serial_console && prom_palette)
+ if (prom_palette)
prom_palette (1);
if (prom_keyboard)
prom_keyboard();
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 61036b346664..a246e962e5a7 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -30,73 +30,9 @@
#include <asm/upa.h>
#include <asm/smp.h>
-static struct device_node *allnodes;
+extern struct device_node *allnodes; /* temporary while merging */
-/* use when traversing tree through the allnext, child, sibling,
- * or parent members of struct device_node.
- */
-static DEFINE_RWLOCK(devtree_lock);
-
-int of_device_is_compatible(const struct device_node *device,
- const char *compat)
-{
- const char* cp;
- int cplen, l;
-
- cp = of_get_property(device, "compatible", &cplen);
- if (cp == NULL)
- return 0;
- while (cplen > 0) {
- if (strncmp(cp, compat, strlen(compat)) == 0)
- return 1;
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(of_device_is_compatible);
-
-struct device_node *of_get_parent(const struct device_node *node)
-{
- struct device_node *np;
-
- if (!node)
- return NULL;
-
- np = node->parent;
-
- return np;
-}
-EXPORT_SYMBOL(of_get_parent);
-
-struct device_node *of_get_next_child(const struct device_node *node,
- struct device_node *prev)
-{
- struct device_node *next;
-
- next = prev ? prev->sibling : node->child;
- for (; next != 0; next = next->sibling) {
- break;
- }
-
- return next;
-}
-EXPORT_SYMBOL(of_get_next_child);
-
-struct device_node *of_find_node_by_path(const char *path)
-{
- struct device_node *np = allnodes;
-
- for (; np != 0; np = np->allnext) {
- if (np->full_name != 0 && strcmp(np->full_name, path) == 0)
- break;
- }
-
- return np;
-}
-EXPORT_SYMBOL(of_find_node_by_path);
+extern rwlock_t devtree_lock; /* temporary while merging */
struct device_node *of_find_node_by_phandle(phandle handle)
{
@@ -110,81 +46,6 @@ struct device_node *of_find_node_by_phandle(phandle handle)
}
EXPORT_SYMBOL(of_find_node_by_phandle);
-struct device_node *of_find_node_by_name(struct device_node *from,
- const char *name)
-{
- struct device_node *np;
-
- np = from ? from->allnext : allnodes;
- for (; np != NULL; np = np->allnext)
- if (np->name != NULL && strcmp(np->name, name) == 0)
- break;
-
- return np;
-}
-EXPORT_SYMBOL(of_find_node_by_name);
-
-struct device_node *of_find_node_by_type(struct device_node *from,
- const char *type)
-{
- struct device_node *np;
-
- np = from ? from->allnext : allnodes;
- for (; np != 0; np = np->allnext)
- if (np->type != 0 && strcmp(np->type, type) == 0)
- break;
-
- return np;
-}
-EXPORT_SYMBOL(of_find_node_by_type);
-
-struct device_node *of_find_compatible_node(struct device_node *from,
- const char *type, const char *compatible)
-{
- struct device_node *np;
-
- np = from ? from->allnext : allnodes;
- for (; np != 0; np = np->allnext) {
- if (type != NULL
- && !(np->type != 0 && strcmp(np->type, type) == 0))
- continue;
- if (of_device_is_compatible(np, compatible))
- break;
- }
-
- return np;
-}
-EXPORT_SYMBOL(of_find_compatible_node);
-
-struct property *of_find_property(const struct device_node *np,
- const char *name,
- int *lenp)
-{
- struct property *pp;
-
- for (pp = np->properties; pp != 0; pp = pp->next) {
- if (strcasecmp(pp->name, name) == 0) {
- if (lenp != 0)
- *lenp = pp->length;
- break;
- }
- }
- return pp;
-}
-EXPORT_SYMBOL(of_find_property);
-
-/*
- * Find a property with a given name for a given node
- * and return the value.
- */
-const void *of_get_property(const struct device_node *np, const char *name,
- int *lenp)
-{
- struct property *pp = of_find_property(np,name,lenp);
- return pp ? pp->value : NULL;
-}
-EXPORT_SYMBOL(of_get_property);
-
int of_getintprop_default(struct device_node *np, const char *name, int def)
{
struct property *prop;
@@ -198,36 +59,6 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
}
EXPORT_SYMBOL(of_getintprop_default);
-int of_n_addr_cells(struct device_node *np)
-{
- const int* ip;
- do {
- if (np->parent)
- np = np->parent;
- ip = of_get_property(np, "#address-cells", NULL);
- if (ip != NULL)
- return *ip;
- } while (np->parent);
- /* No #address-cells property for the root node, default to 2 */
- return 2;
-}
-EXPORT_SYMBOL(of_n_addr_cells);
-
-int of_n_size_cells(struct device_node *np)
-{
- const int* ip;
- do {
- if (np->parent)
- np = np->parent;
- ip = of_get_property(np, "#size-cells", NULL);
- if (ip != NULL)
- return *ip;
- } while (np->parent);
- /* No #size-cells property for the root node, default to 1 */
- return 1;
-}
-EXPORT_SYMBOL(of_n_size_cells);
-
int of_set_property(struct device_node *dp, const char *name, void *val, int len)
{
struct property **prevp;
@@ -276,6 +107,21 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
}
EXPORT_SYMBOL(of_set_property);
+int of_find_in_proplist(const char *list, const char *match, int len)
+{
+ while (len > 0) {
+ int l;
+
+ if (!strcmp(list, match))
+ return 1;
+ l = strlen(list) + 1;
+ list += l;
+ len -= l;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(of_find_in_proplist);
+
static unsigned int prom_early_allocated;
static void * __init prom_early_alloc(unsigned long size)
@@ -1200,7 +1046,8 @@ static void __init irq_trans_init(struct device_node *dp)
if (!strcmp(dp->name, "fhc") &&
!strcmp(dp->parent->name, "central"))
return central_irq_trans_init(dp);
- if (!strcmp(dp->name, "virtual-devices"))
+ if (!strcmp(dp->name, "virtual-devices") ||
+ !strcmp(dp->name, "niu"))
return sun4v_vdev_irq_trans_init(dp);
}
@@ -1737,8 +1584,12 @@ static void __init of_fill_in_cpu_data(void)
ncpus_probed++;
#ifdef CONFIG_SMP
- if (cpuid >= NR_CPUS)
+ if (cpuid >= NR_CPUS) {
+ printk(KERN_WARNING "Ignoring CPU %d which is "
+ ">= NR_CPUS (%d)\n",
+ cpuid, NR_CPUS);
continue;
+ }
#else
/* On uniprocessor we only want the values for the
* real physical cpu the kernel booted onto, however
@@ -1808,13 +1659,67 @@ static void __init of_fill_in_cpu_data(void)
#ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map);
- cpu_set(cpuid, phys_cpu_present_map);
+ cpu_set(cpuid, cpu_possible_map);
#endif
}
smp_fill_in_sib_core_maps();
}
+struct device_node *of_console_device;
+EXPORT_SYMBOL(of_console_device);
+
+char *of_console_path;
+EXPORT_SYMBOL(of_console_path);
+
+char *of_console_options;
+EXPORT_SYMBOL(of_console_options);
+
+static void __init of_console_init(void)
+{
+ char *msg = "OF stdout device is: %s\n";
+ struct device_node *dp;
+ const char *type;
+ phandle node;
+
+ of_console_path = prom_early_alloc(256);
+ if (prom_ihandle2path(prom_stdout, of_console_path, 256) < 0) {
+ prom_printf("Cannot obtain path of stdout.\n");
+ prom_halt();
+ }
+ of_console_options = strrchr(of_console_path, ':');
+ if (of_console_options) {
+ of_console_options++;
+ if (*of_console_options == '\0')
+ of_console_options = NULL;
+ }
+
+ node = prom_inst2pkg(prom_stdout);
+ if (!node) {
+ prom_printf("Cannot resolve stdout node from "
+ "instance %08x.\n", prom_stdout);
+ prom_halt();
+ }
+
+ dp = of_find_node_by_phandle(node);
+ type = of_get_property(dp, "device_type", NULL);
+ if (!type) {
+ prom_printf("Console stdout lacks device_type property.\n");
+ prom_halt();
+ }
+
+ if (strcmp(type, "display") && strcmp(type, "serial")) {
+ prom_printf("Console device_type is neither display "
+ "nor serial.\n");
+ prom_halt();
+ }
+
+ of_console_device = dp;
+
+ prom_printf(msg, of_console_path);
+ printk(msg, of_console_path);
+}
+
void __init prom_build_devicetree(void)
{
struct device_node **nextp;
@@ -1827,6 +1732,8 @@ void __init prom_build_devicetree(void)
allnodes->child = build_tree(allnodes,
prom_getchild(allnodes->node),
&nextp);
+ of_console_init();
+
printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index a1fd9bcc0b87..d1fb13ba02b5 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -26,11 +26,6 @@
#define MAP_BASE ((u32)0xc0000000)
-struct sbus_info {
- struct iommu iommu;
- struct strbuf strbuf;
-};
-
/* Offsets from iommu_regs */
#define SYSIO_IOMMUREG_BASE 0x2400UL
#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
@@ -44,19 +39,6 @@ struct sbus_info {
#define IOMMU_DRAM_VALID (1UL << 30UL)
-static void __iommu_flushall(struct iommu *iommu)
-{
- unsigned long tag;
- int entry;
-
- tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
- for (entry = 0; entry < 16; entry++) {
- upa_writeq(0, tag);
- tag += 8UL;
- }
- upa_readq(iommu->write_complete_reg);
-}
-
/* Offsets from strbuf_regs */
#define SYSIO_STRBUFREG_BASE 0x2800UL
#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
@@ -69,511 +51,10 @@ static void __iommu_flushall(struct iommu *iommu)
#define STRBUF_TAG_VALID 0x02UL
-static void sbus_strbuf_flush(struct iommu *iommu, struct strbuf *strbuf, u32 base, unsigned long npages, int direction)
-{
- unsigned long n;
- int limit;
-
- n = npages;
- while (n--)
- upa_writeq(base + (n << IO_PAGE_SHIFT), strbuf->strbuf_pflush);
-
- /* If the device could not have possibly put dirty data into
- * the streaming cache, no flush-flag synchronization needs
- * to be performed.
- */
- if (direction == SBUS_DMA_TODEVICE)
- return;
-
- *(strbuf->strbuf_flushflag) = 0UL;
-
- /* Whoopee cushion! */
- upa_writeq(strbuf->strbuf_flushflag_pa, strbuf->strbuf_fsync);
- upa_readq(iommu->write_complete_reg);
-
- limit = 100000;
- while (*(strbuf->strbuf_flushflag) == 0UL) {
- limit--;
- if (!limit)
- break;
- udelay(1);
- rmb();
- }
- if (!limit)
- printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
- "vaddr[%08x] npages[%ld]\n",
- base, npages);
-}
-
-/* Based largely upon the ppc64 iommu allocator. */
-static long sbus_arena_alloc(struct iommu *iommu, unsigned long npages)
-{
- struct iommu_arena *arena = &iommu->arena;
- unsigned long n, i, start, end, limit;
- int pass;
-
- limit = arena->limit;
- start = arena->hint;
- pass = 0;
-
-again:
- n = find_next_zero_bit(arena->map, limit, start);
- end = n + npages;
- if (unlikely(end >= limit)) {
- if (likely(pass < 1)) {
- limit = start;
- start = 0;
- __iommu_flushall(iommu);
- pass++;
- goto again;
- } else {
- /* Scanned the whole thing, give up. */
- return -1;
- }
- }
-
- for (i = n; i < end; i++) {
- if (test_bit(i, arena->map)) {
- start = i + 1;
- goto again;
- }
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, arena->map);
-
- arena->hint = end;
-
- return n;
-}
-
-static void sbus_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
-{
- unsigned long i;
-
- for (i = base; i < (base + npages); i++)
- __clear_bit(i, arena->map);
-}
-
-static void sbus_iommu_table_init(struct iommu *iommu, unsigned int tsbsize)
-{
- unsigned long tsbbase, order, sz, num_tsb_entries;
-
- num_tsb_entries = tsbsize / sizeof(iopte_t);
-
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->page_table_map_base = MAP_BASE;
-
- /* Allocate and initialize the free area map. */
- sz = num_tsb_entries / 8;
- sz = (sz + 7UL) & ~7UL;
- iommu->arena.map = kzalloc(sz, GFP_KERNEL);
- if (!iommu->arena.map) {
- prom_printf("SBUS_IOMMU: Error, kmalloc(arena.map) failed.\n");
- prom_halt();
- }
- iommu->arena.limit = num_tsb_entries;
-
- /* Now allocate and setup the IOMMU page table itself. */
- order = get_order(tsbsize);
- tsbbase = __get_free_pages(GFP_KERNEL, order);
- if (!tsbbase) {
- prom_printf("IOMMU: Error, gfp(tsb) failed.\n");
- prom_halt();
- }
- iommu->page_table = (iopte_t *)tsbbase;
- memset(iommu->page_table, 0, tsbsize);
-}
-
-static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
-{
- long entry;
-
- entry = sbus_arena_alloc(iommu, npages);
- if (unlikely(entry < 0))
- return NULL;
-
- return iommu->page_table + entry;
-}
-
-static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
-{
- sbus_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
-}
-
-void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
-{
- struct sbus_info *info;
- struct iommu *iommu;
- iopte_t *iopte;
- unsigned long flags, order, first_page;
- void *ret;
- int npages;
-
- size = IO_PAGE_ALIGN(size);
- order = get_order(size);
- if (order >= 10)
- return NULL;
-
- first_page = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
- if (first_page == 0UL)
- return NULL;
- memset((char *)first_page, 0, PAGE_SIZE << order);
-
- info = sdev->bus->iommu;
- iommu = &info->iommu;
-
- spin_lock_irqsave(&iommu->lock, flags);
- iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- if (unlikely(iopte == NULL)) {
- free_pages(first_page, order);
- return NULL;
- }
-
- *dvma_addr = (iommu->page_table_map_base +
- ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
- ret = (void *) first_page;
- npages = size >> IO_PAGE_SHIFT;
- first_page = __pa(first_page);
- while (npages--) {
- iopte_val(*iopte) = (IOPTE_VALID | IOPTE_CACHE |
- IOPTE_WRITE |
- (first_page & IOPTE_PAGE));
- iopte++;
- first_page += IO_PAGE_SIZE;
- }
-
- return ret;
-}
-
-void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
-{
- struct sbus_info *info;
- struct iommu *iommu;
- iopte_t *iopte;
- unsigned long flags, order, npages;
-
- npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
- info = sdev->bus->iommu;
- iommu = &info->iommu;
- iopte = iommu->page_table +
- ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
- free_npages(iommu, dvma - iommu->page_table_map_base, npages);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- order = get_order(size);
- if (order < 10)
- free_pages((unsigned long)cpu, order);
-}
-
-dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t sz, int direction)
-{
- struct sbus_info *info;
- struct iommu *iommu;
- iopte_t *base;
- unsigned long flags, npages, oaddr;
- unsigned long i, base_paddr;
- u32 bus_addr, ret;
- unsigned long iopte_protection;
-
- info = sdev->bus->iommu;
- iommu = &info->iommu;
-
- if (unlikely(direction == SBUS_DMA_NONE))
- BUG();
-
- oaddr = (unsigned long)ptr;
- npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
- npages >>= IO_PAGE_SHIFT;
-
- spin_lock_irqsave(&iommu->lock, flags);
- base = alloc_npages(iommu, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- if (unlikely(!base))
- BUG();
-
- bus_addr = (iommu->page_table_map_base +
- ((base - iommu->page_table) << IO_PAGE_SHIFT));
- ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
- base_paddr = __pa(oaddr & IO_PAGE_MASK);
-
- iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
- if (direction != SBUS_DMA_TODEVICE)
- iopte_protection |= IOPTE_WRITE;
-
- for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
- iopte_val(*base) = iopte_protection | base_paddr;
-
- return ret;
-}
-
-void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
-{
- struct sbus_info *info = sdev->bus->iommu;
- struct iommu *iommu = &info->iommu;
- struct strbuf *strbuf = &info->strbuf;
- iopte_t *base;
- unsigned long flags, npages, i;
-
- if (unlikely(direction == SBUS_DMA_NONE))
- BUG();
-
- npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
- npages >>= IO_PAGE_SHIFT;
- base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
- bus_addr &= IO_PAGE_MASK;
-
- spin_lock_irqsave(&iommu->lock, flags);
- sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
- for (i = 0; i < npages; i++)
- iopte_val(base[i]) = 0UL;
- free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-#define SG_ENT_PHYS_ADDRESS(SG) \
- (__pa(page_address((SG)->page)) + (SG)->offset)
-
-static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
- int nused, int nelems, unsigned long iopte_protection)
-{
- struct scatterlist *dma_sg = sg;
- struct scatterlist *sg_end = sg + nelems;
- int i;
-
- for (i = 0; i < nused; i++) {
- unsigned long pteval = ~0UL;
- u32 dma_npages;
-
- dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
- dma_sg->dma_length +
- ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
- do {
- unsigned long offset;
- signed int len;
-
- /* If we are here, we know we have at least one
- * more page to map. So walk forward until we
- * hit a page crossing, and begin creating new
- * mappings from that spot.
- */
- for (;;) {
- unsigned long tmp;
-
- tmp = SG_ENT_PHYS_ADDRESS(sg);
- len = sg->length;
- if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = tmp & IO_PAGE_MASK;
- offset = tmp & (IO_PAGE_SIZE - 1UL);
- break;
- }
- if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
- offset = 0UL;
- len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
- break;
- }
- sg++;
- }
-
- pteval = iopte_protection | (pteval & IOPTE_PAGE);
- while (len > 0) {
- *iopte++ = __iopte(pteval);
- pteval += IO_PAGE_SIZE;
- len -= (IO_PAGE_SIZE - offset);
- offset = 0;
- dma_npages--;
- }
-
- pteval = (pteval & IOPTE_PAGE) + len;
- sg++;
-
- /* Skip over any tail mappings we've fully mapped,
- * adjusting pteval along the way. Stop when we
- * detect a page crossing event.
- */
- while (sg < sg_end &&
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
- (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
- ((pteval ^
- (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
- pteval += sg->length;
- sg++;
- }
- if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
- pteval = ~0UL;
- } while (dma_npages != 0);
- dma_sg++;
- }
-}
-
-int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
-{
- struct sbus_info *info;
- struct iommu *iommu;
- unsigned long flags, npages, iopte_protection;
- iopte_t *base;
- u32 dma_base;
- struct scatterlist *sgtmp;
- int used;
-
- /* Fast path single entry scatterlists. */
- if (nelems == 1) {
- sglist->dma_address =
- sbus_map_single(sdev,
- (page_address(sglist->page) + sglist->offset),
- sglist->length, direction);
- sglist->dma_length = sglist->length;
- return 1;
- }
-
- info = sdev->bus->iommu;
- iommu = &info->iommu;
-
- if (unlikely(direction == SBUS_DMA_NONE))
- BUG();
-
- npages = prepare_sg(sglist, nelems);
-
- spin_lock_irqsave(&iommu->lock, flags);
- base = alloc_npages(iommu, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
-
- if (unlikely(base == NULL))
- BUG();
-
- dma_base = iommu->page_table_map_base +
- ((base - iommu->page_table) << IO_PAGE_SHIFT);
-
- /* Normalize DVMA addresses. */
- used = nelems;
-
- sgtmp = sglist;
- while (used && sgtmp->dma_length) {
- sgtmp->dma_address += dma_base;
- sgtmp++;
- used--;
- }
- used = nelems - used;
-
- iopte_protection = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
- if (direction != SBUS_DMA_TODEVICE)
- iopte_protection |= IOPTE_WRITE;
-
- fill_sg(base, sglist, used, nelems, iopte_protection);
-
-#ifdef VERIFY_SG
- verify_sglist(sglist, nelems, base, npages);
-#endif
-
- return used;
-}
-
-void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
-{
- struct sbus_info *info;
- struct iommu *iommu;
- struct strbuf *strbuf;
- iopte_t *base;
- unsigned long flags, i, npages;
- u32 bus_addr;
-
- if (unlikely(direction == SBUS_DMA_NONE))
- BUG();
-
- info = sdev->bus->iommu;
- iommu = &info->iommu;
- strbuf = &info->strbuf;
-
- bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
- for (i = 1; i < nelems; i++)
- if (sglist[i].dma_length == 0)
- break;
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
- bus_addr) >> IO_PAGE_SHIFT;
-
- base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
-
- spin_lock_irqsave(&iommu->lock, flags);
- sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
- for (i = 0; i < npages; i++)
- iopte_val(base[i]) = 0UL;
- free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t bus_addr, size_t sz, int direction)
-{
- struct sbus_info *info;
- struct iommu *iommu;
- struct strbuf *strbuf;
- unsigned long flags, npages;
-
- info = sdev->bus->iommu;
- iommu = &info->iommu;
- strbuf = &info->strbuf;
-
- npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
- npages >>= IO_PAGE_SHIFT;
- bus_addr &= IO_PAGE_MASK;
-
- spin_lock_irqsave(&iommu->lock, flags);
- sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
-{
-}
-
-void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sglist, int nelems, int direction)
-{
- struct sbus_info *info;
- struct iommu *iommu;
- struct strbuf *strbuf;
- unsigned long flags, npages, i;
- u32 bus_addr;
-
- info = sdev->bus->iommu;
- iommu = &info->iommu;
- strbuf = &info->strbuf;
-
- bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
- for (i = 0; i < nelems; i++) {
- if (!sglist[i].dma_length)
- break;
- }
- i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
- - bus_addr) >> IO_PAGE_SHIFT;
-
- spin_lock_irqsave(&iommu->lock, flags);
- sbus_strbuf_flush(iommu, strbuf, bus_addr, npages, direction);
- spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
-void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
-{
-}
-
/* Enable 64-bit DVMA mode for the given device. */
void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
{
- struct sbus_info *info = sdev->bus->iommu;
- struct iommu *iommu = &info->iommu;
+ struct iommu *iommu = sdev->ofdev.dev.archdata.iommu;
int slot = sdev->slot;
unsigned long cfg_reg;
u64 val;
@@ -713,8 +194,7 @@ static unsigned long sysio_imap_to_iclr(unsigned long imap)
unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
{
struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
- struct sbus_info *info = sbus->iommu;
- struct iommu *iommu = &info->iommu;
+ struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long imap, iclr;
int sbus_level = 0;
@@ -776,8 +256,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
{
struct sbus_bus *sbus = dev_id;
- struct sbus_info *info = sbus->iommu;
- struct iommu *iommu = &info->iommu;
+ struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
@@ -849,8 +328,7 @@ static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
{
struct sbus_bus *sbus = dev_id;
- struct sbus_info *info = sbus->iommu;
- struct iommu *iommu = &info->iommu;
+ struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
@@ -927,8 +405,7 @@ static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
{
struct sbus_bus *sbus = dev_id;
- struct sbus_info *info = sbus->iommu;
- struct iommu *iommu = &info->iommu;
+ struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
unsigned long afsr_reg, afar_reg, reg_base;
unsigned long afsr, afar, error_bits;
int reported;
@@ -995,8 +472,7 @@ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
{
- struct sbus_info *info = sbus->iommu;
- struct iommu *iommu = &info->iommu;
+ struct iommu *iommu = sbus->ofdev.dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned int irq;
u64 control;
@@ -1041,7 +517,6 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
{
const struct linux_prom64_registers *pr;
struct device_node *dp;
- struct sbus_info *info;
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long regs, reg_base;
@@ -1054,25 +529,28 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
pr = of_get_property(dp, "reg", NULL);
if (!pr) {
- prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
+ prom_printf("sbus_iommu_init: Cannot map SYSIO "
+ "control registers.\n");
prom_halt();
}
regs = pr->phys_addr;
- info = kzalloc(sizeof(*info), GFP_ATOMIC);
- if (info == NULL) {
- prom_printf("sbus_iommu_init: Fatal error, "
- "kmalloc(info) failed\n");
- prom_halt();
- }
+ iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
+ if (!iommu)
+ goto fatal_memory_error;
+ strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
+ if (!strbuf)
+ goto fatal_memory_error;
- iommu = &info->iommu;
- strbuf = &info->strbuf;
+ sbus->ofdev.dev.archdata.iommu = iommu;
+ sbus->ofdev.dev.archdata.stc = strbuf;
reg_base = regs + SYSIO_IOMMUREG_BASE;
iommu->iommu_control = reg_base + IOMMU_CONTROL;
iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
iommu->iommu_flush = reg_base + IOMMU_FLUSH;
+ iommu->iommu_tags = iommu->iommu_control +
+ (IOMMU_TAGDIAG - IOMMU_CONTROL);
reg_base = regs + SYSIO_STRBUFREG_BASE;
strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
@@ -1093,14 +571,12 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
*/
iommu->write_complete_reg = regs + 0x2000UL;
- /* Link into SYSIO software state. */
- sbus->iommu = info;
-
printk("SYSIO: UPA portID %x, at %016lx\n",
sbus->portid, regs);
/* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
- sbus_iommu_table_init(iommu, IO_TSB_SIZE);
+ if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff))
+ goto fatal_memory_error;
control = upa_readq(iommu->iommu_control);
control = ((7UL << 16UL) |
@@ -1157,6 +633,10 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
starfire_hookup(sbus->portid);
sysio_register_error_handlers(sbus);
+ return;
+
+fatal_memory_error:
+ prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
}
void sbus_fill_device_irq(struct sbus_dev *sdev)
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 7490cc670a53..0f5be828ee92 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -133,33 +133,6 @@ static void __init process_switch(char c)
}
}
-static void __init process_console(char *commands)
-{
- serial_console = 0;
- commands += 8;
- /* Linux-style serial */
- if (!strncmp(commands, "ttyS", 4))
- serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
- else if (!strncmp(commands, "tty", 3)) {
- char c = *(commands + 3);
- /* Solaris-style serial */
- if (c == 'a' || c == 'b') {
- serial_console = c - 'a' + 1;
- prom_printf ("Using /dev/tty%c as console.\n", c);
- }
- /* else Linux-style fbcon, not serial */
- }
-#if defined(CONFIG_PROM_CONSOLE)
- if (!strncmp(commands, "prom", 4)) {
- char *p;
-
- for (p = commands - 8; *p && *p != ' '; p++)
- *p = ' ';
- conswitchp = &prom_con;
- }
-#endif
-}
-
static void __init boot_flags_init(char *commands)
{
while (*commands) {
@@ -176,9 +149,7 @@ static void __init boot_flags_init(char *commands)
process_switch(*commands++);
continue;
}
- if (!strncmp(commands, "console=", 8)) {
- process_console(commands);
- } else if (!strncmp(commands, "mem=", 4)) {
+ if (!strncmp(commands, "mem=", 4)) {
/*
* "mem=XXX[kKmM]" overrides the PROM-reported
* memory size.
@@ -378,44 +349,6 @@ void __init setup_arch(char **cmdline_p)
paging_init();
}
-static int __init set_preferred_console(void)
-{
- int idev, odev;
-
- /* The user has requested a console so this is already set up. */
- if (serial_console >= 0)
- return -EBUSY;
-
- idev = prom_query_input_device();
- odev = prom_query_output_device();
- if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
- serial_console = 0;
- } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
- serial_console = 1;
- } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
- serial_console = 2;
- } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) {
- serial_console = 3;
- } else if (idev == PROMDEV_IVCONS && odev == PROMDEV_OVCONS) {
- /* sunhv_console_init() doesn't check the serial_console
- * value anyways...
- */
- serial_console = 4;
- return add_preferred_console("ttyHV", 0, NULL);
- } else {
- prom_printf("Inconsistent console: "
- "input %d, output %d\n",
- idev, odev);
- prom_halt();
- }
-
- if (serial_console)
- return add_preferred_console("ttyS", serial_console - 1, NULL);
-
- return -ENODEV;
-}
-console_initcall(set_preferred_console);
-
/* BUFFER is PAGE_SIZE bytes long. */
extern char *sparc_cpu_type;
@@ -442,7 +375,6 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
"D$ parity tl1\t: %u\n"
"I$ parity tl1\t: %u\n"
#ifndef CONFIG_SMP
- "Cpu0Bogo\t: %lu.%02lu\n"
"Cpu0ClkTck\t: %016lx\n"
#endif
,
@@ -457,9 +389,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
dcache_parity_tl1_occurred,
icache_parity_tl1_occurred
#ifndef CONFIG_SMP
- , cpu_data(0).udelay_val/(500000/HZ),
- (cpu_data(0).udelay_val/(5000/HZ)) % 100,
- cpu_data(0).clock_tick
+ , cpu_data(0).clock_tick
#endif
);
#ifdef CONFIG_SMP
@@ -511,5 +441,4 @@ void sun_do_break(void)
prom_cmdline();
}
-int serial_console = -1;
int stop_a_enabled = 1;
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index 203e87301005..fb13775b3682 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -289,9 +289,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
struct rt_signal_frame __user *sf;
unsigned long tpc, tnpc, tstate;
__siginfo_fpu_t __user *fpu_save;
- mm_segment_t old_fs;
sigset_t set;
- stack_t st;
int err;
/* Always make any pending restarted system calls return -EINTR */
@@ -327,20 +325,13 @@ void do_rt_sigreturn(struct pt_regs *regs)
err |= restore_fpu_state(regs, &sf->fpu_state);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
- err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
-
+ err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
+
if (err)
goto segv;
-
+
regs->tpc = tpc;
regs->tnpc = tnpc;
-
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
- set_fs(old_fs);
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 4dcd7d0b60f2..c73b7a48b036 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1,6 +1,6 @@
/* smp.c: Sparc64 SMP support.
*
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
@@ -28,6 +28,8 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/cpudata.h>
+#include <asm/hvtramp.h>
+#include <asm/io.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
@@ -41,22 +43,26 @@
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/mdesc.h>
+#include <asm/ldc.h>
+#include <asm/hypervisor.h>
extern void calibrate_delay(void);
int sparc64_multi_core __read_mostly;
-/* Please don't make this stuff initdata!!! --DaveM */
-unsigned char boot_cpu_id;
-
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
-cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+
+EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(cpu_sibling_map);
+EXPORT_SYMBOL(cpu_core_map);
+
static cpumask_t smp_commenced_mask;
-static cpumask_t cpu_callout_map;
void smp_info(struct seq_file *m)
{
@@ -73,18 +79,17 @@ void smp_bogo(struct seq_file *m)
for_each_online_cpu(i)
seq_printf(m,
- "Cpu%dBogo\t: %lu.%02lu\n"
"Cpu%dClkTck\t: %016lx\n",
- i, cpu_data(i).udelay_val / (500000/HZ),
- (cpu_data(i).udelay_val / (5000/HZ)) % 100,
i, cpu_data(i).clock_tick);
}
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
+
extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
-void __init smp_callin(void)
+void __devinit smp_callin(void)
{
int cpuid = hard_smp_processor_id();
@@ -102,8 +107,6 @@ void __init smp_callin(void)
local_irq_enable();
- calibrate_delay();
- cpu_data(cpuid).udelay_val = loops_per_jiffy;
callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory");
@@ -120,7 +123,9 @@ void __init smp_callin(void)
while (!cpu_isset(cpuid, smp_commenced_mask))
rmb();
+ spin_lock(&call_lock);
cpu_set(cpuid, cpu_online_map);
+ spin_unlock(&call_lock);
/* idle thread is expected to have preempt disabled */
preempt_disable();
@@ -268,7 +273,66 @@ static void smp_synchronize_one_tick(int cpu)
spin_unlock_irqrestore(&itc_sync_lock, flags);
}
-extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
+#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
+/* XXX Put this in some common place. XXX */
+static unsigned long kimage_addr_to_ra(void *p)
+{
+ unsigned long val = (unsigned long) p;
+
+ return kern_base + (val - KERNBASE);
+}
+
+static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+{
+ extern unsigned long sparc64_ttable_tl0;
+ extern unsigned long kern_locked_tte_data;
+ extern int bigkernel;
+ struct hvtramp_descr *hdesc;
+ unsigned long trampoline_ra;
+ struct trap_per_cpu *tb;
+ u64 tte_vaddr, tte_data;
+ unsigned long hv_err;
+
+ hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
+ if (!hdesc) {
+ printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
+ "hvtramp_descr.\n");
+ return;
+ }
+
+ hdesc->cpu = cpu;
+ hdesc->num_mappings = (bigkernel ? 2 : 1);
+
+ tb = &trap_block[cpu];
+ tb->hdesc = hdesc;
+
+ hdesc->fault_info_va = (unsigned long) &tb->fault_info;
+ hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
+
+ hdesc->thread_reg = thread_reg;
+
+ tte_vaddr = (unsigned long) KERNBASE;
+ tte_data = kern_locked_tte_data;
+
+ hdesc->maps[0].vaddr = tte_vaddr;
+ hdesc->maps[0].tte = tte_data;
+ if (bigkernel) {
+ tte_vaddr += 0x400000;
+ tte_data += 0x400000;
+ hdesc->maps[1].vaddr = tte_vaddr;
+ hdesc->maps[1].tte = tte_data;
+ }
+
+ trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
+
+ hv_err = sun4v_cpu_start(cpu, trampoline_ra,
+ kimage_addr_to_ra(&sparc64_ttable_tl0),
+ __pa(hdesc));
+ if (hv_err)
+ printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
+ "gives error %lu\n", hv_err);
+}
+#endif
extern unsigned long sparc64_cpu_startup;
@@ -280,6 +344,7 @@ static struct thread_info *cpu_new_thread = NULL;
static int __devinit smp_boot_one_cpu(unsigned int cpu)
{
+ struct trap_per_cpu *tb = &trap_block[cpu];
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
@@ -288,22 +353,26 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
int timeout, ret;
p = fork_idle(cpu);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
callin_flag = 0;
cpu_new_thread = task_thread_info(p);
- cpu_set(cpu, cpu_callout_map);
if (tlb_type == hypervisor) {
- /* Alloc the mondo queues, cpu will load them. */
- sun4v_init_mondo_queues(0, cpu, 1, 0);
-
- prom_startcpu_cpuid(cpu, entry, cookie);
+#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
+ if (ldom_domaining_enabled)
+ ldom_startcpu_cpuid(cpu,
+ (unsigned long) cpu_new_thread);
+ else
+#endif
+ prom_startcpu_cpuid(cpu, entry, cookie);
} else {
struct device_node *dp = of_find_node_by_cpuid(cpu);
prom_startcpu(dp->node, entry, cookie);
}
- for (timeout = 0; timeout < 5000000; timeout++) {
+ for (timeout = 0; timeout < 50000; timeout++) {
if (callin_flag)
break;
udelay(100);
@@ -313,11 +382,15 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
ret = 0;
} else {
printk("Processor %d is stuck.\n", cpu);
- cpu_clear(cpu, cpu_callout_map);
ret = -ENODEV;
}
cpu_new_thread = NULL;
+ if (tb->hdesc) {
+ kfree(tb->hdesc);
+ tb->hdesc = NULL;
+ }
+
return ret;
}
@@ -720,7 +793,6 @@ struct call_data_struct {
int wait;
};
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
static struct call_data_struct *call_data;
extern unsigned long xcall_call_function;
@@ -1152,61 +1224,14 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable();
}
-void __init smp_tick_init(void)
-{
- boot_cpu_id = hard_smp_processor_id();
-}
-
/* /proc/profile writes can call this, don't __init it please. */
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
-static void __init smp_tune_scheduling(void)
-{
- unsigned int smallest = ~0U;
- int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- unsigned int val = cpu_data(i).ecache_size;
-
- if (val && val < smallest)
- smallest = val;
- }
-
- /* Any value less than 256K is nonsense. */
- if (smallest < (256U * 1024U))
- smallest = 256 * 1024;
-
- max_cache_size = smallest;
-
- if (smallest < 1U * 1024U * 1024U)
- printk(KERN_INFO "Using max_cache_size of %uKB\n",
- smallest / 1024U);
- else
- printk(KERN_INFO "Using max_cache_size of %uMB\n",
- smallest / 1024U / 1024U);
-}
-
-/* Constrain the number of cpus to max_cpus. */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
- if (num_possible_cpus() > max_cpus) {
- for_each_possible_cpu(i) {
- if (i != boot_cpu_id) {
- cpu_clear(i, phys_cpu_present_map);
- cpu_clear(i, cpu_present_map);
- if (num_possible_cpus() <= max_cpus)
- break;
- }
- }
- }
-
- cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
- smp_tune_scheduling();
}
void __devinit smp_prepare_boot_cpu(void)
@@ -1217,30 +1242,32 @@ void __devinit smp_fill_in_sib_core_maps(void)
{
unsigned int i;
- for_each_possible_cpu(i) {
+ for_each_present_cpu(i) {
unsigned int j;
+ cpus_clear(cpu_core_map[i]);
if (cpu_data(i).core_id == 0) {
cpu_set(i, cpu_core_map[i]);
continue;
}
- for_each_possible_cpu(j) {
+ for_each_present_cpu(j) {
if (cpu_data(i).core_id ==
cpu_data(j).core_id)
cpu_set(j, cpu_core_map[i]);
}
}
- for_each_possible_cpu(i) {
+ for_each_present_cpu(i) {
unsigned int j;
+ cpus_clear(cpu_sibling_map[i]);
if (cpu_data(i).proc_id == -1) {
cpu_set(i, cpu_sibling_map[i]);
continue;
}
- for_each_possible_cpu(j) {
+ for_each_present_cpu(j) {
if (cpu_data(i).proc_id ==
cpu_data(j).proc_id)
cpu_set(j, cpu_sibling_map[i]);
@@ -1269,18 +1296,112 @@ int __cpuinit __cpu_up(unsigned int cpu)
return ret;
}
-void __init smp_cpus_done(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+void cpu_play_dead(void)
{
- unsigned long bogosum = 0;
+ int cpu = smp_processor_id();
+ unsigned long pstate;
+
+ idle_task_exit();
+
+ if (tlb_type == hypervisor) {
+ struct trap_per_cpu *tb = &trap_block[cpu];
+
+ sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
+ tb->cpu_mondo_pa, 0);
+ sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
+ tb->dev_mondo_pa, 0);
+ sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
+ tb->resum_mondo_pa, 0);
+ sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
+ tb->nonresum_mondo_pa, 0);
+ }
+
+ cpu_clear(cpu, smp_commenced_mask);
+ membar_safe("#Sync");
+
+ local_irq_disable();
+
+ __asm__ __volatile__(
+ "rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+
+ while (1)
+ barrier();
+}
+
+int __cpu_disable(void)
+{
+ int cpu = smp_processor_id();
+ cpuinfo_sparc *c;
int i;
- for_each_online_cpu(i)
- bogosum += cpu_data(i).udelay_val;
- printk("Total of %ld processors activated "
- "(%lu.%02lu BogoMIPS).\n",
- (long) num_online_cpus(),
- bogosum/(500000/HZ),
- (bogosum/(5000/HZ))%100);
+ for_each_cpu_mask(i, cpu_core_map[cpu])
+ cpu_clear(cpu, cpu_core_map[i]);
+ cpus_clear(cpu_core_map[cpu]);
+
+ for_each_cpu_mask(i, cpu_sibling_map[cpu])
+ cpu_clear(cpu, cpu_sibling_map[i]);
+ cpus_clear(cpu_sibling_map[cpu]);
+
+ c = &cpu_data(cpu);
+
+ c->core_id = 0;
+ c->proc_id = -1;
+
+ spin_lock(&call_lock);
+ cpu_clear(cpu, cpu_online_map);
+ spin_unlock(&call_lock);
+
+ smp_wmb();
+
+ /* Make sure no interrupts point to this cpu. */
+ fixup_irqs();
+
+ local_irq_enable();
+ mdelay(1);
+ local_irq_disable();
+
+ return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ smp_rmb();
+ if (!cpu_isset(cpu, smp_commenced_mask))
+ break;
+ msleep(100);
+ }
+ if (cpu_isset(cpu, smp_commenced_mask)) {
+ printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+ } else {
+#if defined(CONFIG_SUN_LDOMS)
+ unsigned long hv_err;
+ int limit = 100;
+
+ do {
+ hv_err = sun4v_cpu_stop(cpu);
+ if (hv_err == HV_EOK) {
+ cpu_clear(cpu, cpu_present_map);
+ break;
+ }
+ } while (--limit > 0);
+ if (limit <= 0) {
+ printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
+ hv_err);
+ }
+#endif
+ }
+}
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
}
void smp_send_reschedule(int cpu)
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 6fa761612899..23fad7ebdd0d 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -1,7 +1,6 @@
-/* $Id: sparc64_ksyms.c,v 1.121 2002/02/09 19:49:31 davem Exp $
- * arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
+/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
@@ -28,7 +27,6 @@
#include <net/compat.h>
#include <asm/oplib.h>
-#include <asm/delay.h>
#include <asm/system.h>
#include <asm/auxio.h>
#include <asm/pgtable.h>
@@ -124,10 +122,6 @@ EXPORT_SYMBOL(__write_lock);
EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
-/* CPU online map and active count. */
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(phys_cpu_present_map);
-
EXPORT_SYMBOL(smp_call_function);
#endif /* CONFIG_SMP */
@@ -174,6 +168,7 @@ EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL(tlb_type);
+EXPORT_SYMBOL(sun4v_chip_type);
EXPORT_SYMBOL(get_fb_unmapped_area);
EXPORT_SYMBOL(flush_icache_range);
@@ -286,6 +281,7 @@ EXPORT_SYMBOL(sys_getgid);
EXPORT_SYMBOL(svr4_getcontext);
EXPORT_SYMBOL(svr4_setcontext);
EXPORT_SYMBOL(compat_sys_ioctl);
+EXPORT_SYMBOL(sys_ioctl);
EXPORT_SYMBOL(sparc32_open);
#endif
@@ -330,19 +326,12 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(strncmp);
-/* Delay routines. */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__const_udelay);
-EXPORT_SYMBOL(__delay);
-
void VISenter(void);
/* RAID code needs this */
EXPORT_SYMBOL(VISenter);
/* for input/keybdev */
EXPORT_SYMBOL(sun_do_break);
-EXPORT_SYMBOL(serial_console);
EXPORT_SYMBOL(stop_a_enabled);
#ifdef CONFIG_DEBUG_BUGVERBOSE
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index abd83129b2e7..e8dce90d05d4 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1,8 +1,7 @@
-/* $Id: sys_sparc32.c,v 1.184 2002/02/09 19:49:31 davem Exp $
- * sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
+/* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment.
@@ -1028,3 +1027,10 @@ long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_lo
(nb_high << 32) | nb_low,
flags);
}
+
+asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+ u32 lenhi, u32 lenlo)
+{
+ return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
+ ((loff_t)lenhi << 32) | lenlo);
+}
diff --git a/arch/sparc64/kernel/sysfs.c b/arch/sparc64/kernel/sysfs.c
index cdb1477af89f..52816c7be0b9 100644
--- a/arch/sparc64/kernel/sysfs.c
+++ b/arch/sparc64/kernel/sysfs.c
@@ -193,7 +193,6 @@ static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
}
SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
-SHOW_CPUDATA_ULONG_NAME(udelay_val, udelay_val);
SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
@@ -203,7 +202,6 @@ SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
static struct sysdev_attribute cpu_core_attrs[] = {
_SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL),
- _SYSDEV_ATTR(udelay_val, 0444, show_udelay_val, NULL),
_SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL),
_SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
_SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL),
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 8765e32155a0..06d10907d8ce 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -1,8 +1,7 @@
-/* $Id: systbls.S,v 1.81 2002/02/08 03:57:14 davem Exp $
- * systbls.S: System call entry point tables for OS compatibility.
+/* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
- * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995, 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*
* Based upon preliminary work which is:
@@ -81,7 +80,7 @@ sys_call_table32:
.word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
-/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, compat_sys_timerfd, sys_eventfd
+/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, compat_sys_timerfd, sys_eventfd, compat_sys_fallocate
#endif /* CONFIG_COMPAT */
@@ -153,7 +152,7 @@ sys_call_table:
.word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd
+/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd, sys_fallocate
#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
defined(CONFIG_SOLARIS_EMUL_MODULE)
@@ -272,6 +271,6 @@ sunos_sys_table:
.word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys
/*310*/ .word sunos_nosys, sunos_nosys, sunos_nosys
- .word sunos_nosys
+ .word sunos_nosys, sunos_nosys
#endif
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index a31a0439244f..69cad1b653c1 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -403,58 +403,9 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
-#define TICK_SIZE (tick_nsec / 1000)
-
-#define USEC_AFTER 500000
-#define USEC_BEFORE 500000
-
-static void sync_cmos_clock(unsigned long dummy);
-
-static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
-
-static void sync_cmos_clock(unsigned long dummy)
+int update_persistent_clock(struct timespec now)
{
- struct timeval now, next;
- int fail = 1;
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- * This code is run on a timer. If the clock is set, that timer
- * may not expire at the correct time. Thus, we adjust...
- */
- if (!ntp_synced())
- /*
- * Not synced, exit, do not restart a timer (if one is
- * running, let it run out).
- */
- return;
-
- do_gettimeofday(&now);
- if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
- now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
- fail = set_rtc_mmss(now.tv_sec);
-
- next.tv_usec = USEC_AFTER - now.tv_usec;
- if (next.tv_usec <= 0)
- next.tv_usec += USEC_PER_SEC;
-
- if (!fail)
- next.tv_sec = 659;
- else
- next.tv_sec = 0;
-
- if (next.tv_usec >= USEC_PER_SEC) {
- next.tv_sec++;
- next.tv_usec -= USEC_PER_SEC;
- }
- mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
-}
-
-void notify_arch_cmos_timer(void)
-{
- mod_timer(&sync_cmos_timer, jiffies + 1);
+ return set_rtc_mmss(now.tv_sec);
}
/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
@@ -835,7 +786,7 @@ static int __init clock_init(void)
return 0;
}
- return of_register_driver(&clock_driver, &of_bus_type);
+ return of_register_driver(&clock_driver, &of_platform_bus_type);
}
/* Must be after subsys_initcall() so that busses are probed. Must
@@ -849,9 +800,6 @@ static unsigned long sparc64_init_timers(void)
{
struct device_node *dp;
unsigned long clock;
-#ifdef CONFIG_SMP
- extern void smp_tick_init(void);
-#endif
dp = of_find_node_by_path("/");
if (tlb_type == spitfire) {
@@ -874,10 +822,6 @@ static unsigned long sparc64_init_timers(void)
clock = of_getintprop_default(dp, "stick-frequency", 0);
}
-#ifdef CONFIG_SMP
- smp_tick_init();
-#endif
-
return clock;
}
@@ -938,6 +882,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode,
{
switch (mode) {
case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_RESUME:
break;
case CLOCK_EVT_MODE_SHUTDOWN:
@@ -1038,10 +983,31 @@ static void __init setup_clockevent_multiplier(unsigned long hz)
sparc64_clockevent.mult = mult;
}
+static unsigned long tb_ticks_per_usec __read_mostly;
+
+void __delay(unsigned long loops)
+{
+ unsigned long bclock, now;
+
+ bclock = tick_ops->get_tick();
+ do {
+ now = tick_ops->get_tick();
+ } while ((now-bclock) < loops);
+}
+EXPORT_SYMBOL(__delay);
+
+void udelay(unsigned long usecs)
+{
+ __delay(tb_ticks_per_usec * usecs);
+}
+EXPORT_SYMBOL(udelay);
+
void __init time_init(void)
{
unsigned long clock = sparc64_init_timers();
+ tb_ticks_per_usec = clock / USEC_PER_SEC;
+
timer_ticks_per_nsec_quotient =
clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
@@ -1420,8 +1386,148 @@ static int bq4802_set_rtc_time(struct rtc_time *time)
return 0;
}
+
+static void cmos_get_rtc_time(struct rtc_time *rtc_tm)
+{
+ unsigned char ctrl;
+
+ rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
+ rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
+ rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
+ rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+ rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
+ rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
+ rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK);
+
+ ctrl = CMOS_READ(RTC_CONTROL);
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BCD_TO_BIN(rtc_tm->tm_sec);
+ BCD_TO_BIN(rtc_tm->tm_min);
+ BCD_TO_BIN(rtc_tm->tm_hour);
+ BCD_TO_BIN(rtc_tm->tm_mday);
+ BCD_TO_BIN(rtc_tm->tm_mon);
+ BCD_TO_BIN(rtc_tm->tm_year);
+ BCD_TO_BIN(rtc_tm->tm_wday);
+ }
+
+ if (rtc_tm->tm_year <= 69)
+ rtc_tm->tm_year += 100;
+
+ rtc_tm->tm_mon--;
+}
+
+static int cmos_set_rtc_time(struct rtc_time *rtc_tm)
+{
+ unsigned char mon, day, hrs, min, sec;
+ unsigned char save_control, save_freq_select;
+ unsigned int yrs;
+
+ yrs = rtc_tm->tm_year;
+ mon = rtc_tm->tm_mon + 1;
+ day = rtc_tm->tm_mday;
+ hrs = rtc_tm->tm_hour;
+ min = rtc_tm->tm_min;
+ sec = rtc_tm->tm_sec;
+
+ if (yrs >= 100)
+ yrs -= 100;
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BIN_TO_BCD(sec);
+ BIN_TO_BCD(min);
+ BIN_TO_BCD(hrs);
+ BIN_TO_BCD(day);
+ BIN_TO_BCD(mon);
+ BIN_TO_BCD(yrs);
+ }
+
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ CMOS_WRITE(yrs, RTC_YEAR);
+ CMOS_WRITE(mon, RTC_MONTH);
+ CMOS_WRITE(day, RTC_DAY_OF_MONTH);
+ CMOS_WRITE(hrs, RTC_HOURS);
+ CMOS_WRITE(min, RTC_MINUTES);
+ CMOS_WRITE(sec, RTC_SECONDS);
+
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ return 0;
+}
#endif /* CONFIG_PCI */
+static void mostek_get_rtc_time(struct rtc_time *rtc_tm)
+{
+ void __iomem *regs = mstk48t02_regs;
+ u8 tmp;
+
+ spin_lock_irq(&mostek_lock);
+
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_READ;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+
+ rtc_tm->tm_sec = MSTK_REG_SEC(regs);
+ rtc_tm->tm_min = MSTK_REG_MIN(regs);
+ rtc_tm->tm_hour = MSTK_REG_HOUR(regs);
+ rtc_tm->tm_mday = MSTK_REG_DOM(regs);
+ rtc_tm->tm_mon = MSTK_REG_MONTH(regs);
+ rtc_tm->tm_year = MSTK_CVT_YEAR( MSTK_REG_YEAR(regs) );
+ rtc_tm->tm_wday = MSTK_REG_DOW(regs);
+
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_READ;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
+
+ rtc_tm->tm_mon--;
+ rtc_tm->tm_wday--;
+ rtc_tm->tm_year -= 1900;
+}
+
+static int mostek_set_rtc_time(struct rtc_time *rtc_tm)
+{
+ unsigned char mon, day, hrs, min, sec, wday;
+ void __iomem *regs = mstk48t02_regs;
+ unsigned int yrs;
+ u8 tmp;
+
+ yrs = rtc_tm->tm_year + 1900;
+ mon = rtc_tm->tm_mon + 1;
+ day = rtc_tm->tm_mday;
+ wday = rtc_tm->tm_wday + 1;
+ hrs = rtc_tm->tm_hour;
+ min = rtc_tm->tm_min;
+ sec = rtc_tm->tm_sec;
+
+ spin_lock_irq(&mostek_lock);
+
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_WRITE;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+
+ MSTK_SET_REG_SEC(regs, sec);
+ MSTK_SET_REG_MIN(regs, min);
+ MSTK_SET_REG_HOUR(regs, hrs);
+ MSTK_SET_REG_DOW(regs, wday);
+ MSTK_SET_REG_DOM(regs, day);
+ MSTK_SET_REG_MONTH(regs, mon);
+ MSTK_SET_REG_YEAR(regs, yrs - MSTK_YEAR_ZERO);
+
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_WRITE;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
+
+ return 0;
+}
+
struct mini_rtc_ops {
void (*get_rtc_time)(struct rtc_time *);
int (*set_rtc_time)(struct rtc_time *);
@@ -1442,8 +1548,18 @@ static struct mini_rtc_ops bq4802_rtc_ops = {
.get_rtc_time = bq4802_get_rtc_time,
.set_rtc_time = bq4802_set_rtc_time,
};
+
+static struct mini_rtc_ops cmos_rtc_ops = {
+ .get_rtc_time = cmos_get_rtc_time,
+ .set_rtc_time = cmos_set_rtc_time,
+};
#endif /* CONFIG_PCI */
+static struct mini_rtc_ops mostek_rtc_ops = {
+ .get_rtc_time = mostek_get_rtc_time,
+ .set_rtc_time = mostek_set_rtc_time,
+};
+
static struct mini_rtc_ops *mini_rtc_ops;
static inline void mini_get_rtc_time(struct rtc_time *time)
@@ -1569,7 +1685,11 @@ static int __init rtc_mini_init(void)
#ifdef CONFIG_PCI
else if (bq4802_regs)
mini_rtc_ops = &bq4802_rtc_ops;
+ else if (ds1287_regs)
+ mini_rtc_ops = &cmos_rtc_ops;
#endif /* CONFIG_PCI */
+ else if (mstk48t02_regs)
+ mini_rtc_ops = &mostek_rtc_ops;
else
return -ENODEV;
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index a4dc01a3d238..04e81dda13d0 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -95,14 +95,13 @@ spitfire_startup:
membar #Sync
startup_continue:
+ mov %o0, %l0
+ BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
+
sethi %hi(0x80000000), %g2
sllx %g2, 32, %g2
wr %g2, 0, %tick_cmpr
- mov %o0, %l0
-
- BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
-
/* Call OBP by hand to lock KERNBASE into i/d tlbs.
* We lock 2 consequetive entries if we are 'bigkernel'.
*/
@@ -346,7 +345,7 @@ after_lock_tlb:
sethi %hi(tramp_stack), %g1
or %g1, %lo(tramp_stack), %g1
add %g1, TRAMP_STACK_SIZE, %g1
- sub %g1, STACKFRAME_SZ + STACK_BIAS, %sp
+ sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
mov 0, %fp
/* Put garbage in these registers to trap any access to them. */
@@ -366,11 +365,8 @@ after_lock_tlb:
call hard_smp_processor_id
nop
- mov %o0, %o1
- mov 0, %o0
- mov 0, %o2
- call sun4v_init_mondo_queues
- mov 1, %o3
+ call sun4v_register_mondo_queues
+ nop
1: call init_cur_cpu_trap
ldx [%l0], %o0
@@ -415,15 +411,38 @@ after_lock_tlb:
sethi %hi(kern_base), %g3
ldx [%g3 + %lo(kern_base)], %g3
add %g2, %g3, %o1
+ sethi %hi(sparc64_ttable_tl0), %o0
- call prom_set_trap_table_sun4v
- sethi %hi(sparc64_ttable_tl0), %o0
+ set prom_set_trap_table_name, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 2, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 0, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ stx %o0, [%sp + 2047 + 128 + 0x18]
+ stx %o1, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
ba,pt %xcc, 2f
nop
-1: call prom_set_trap_table
- sethi %hi(sparc64_ttable_tl0), %o0
+1: sethi %hi(sparc64_ttable_tl0), %o0
+ set prom_set_trap_table_name, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 0, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ stx %o0, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
2: ldx [%l0], %g6
ldx [%g6 + TI_TASK], %g4
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index a05b37f025c4..6ef42b8e53d8 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2229,6 +2229,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
__asm__ __volatile__("flushw");
__show_regs(regs);
+ add_taint(TAINT_DIE);
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 3ad10f3027e4..b982fa3dd748 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -45,6 +45,8 @@ SECTIONS
__ex_table : { *(__ex_table) }
__stop___ex_table = .;
+ NOTES
+
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.text : {
@@ -90,10 +92,8 @@ SECTIONS
__initramfs_end = .;
#endif
- . = ALIGN(PAGE_SIZE);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(PAGE_SIZE)
+
. = ALIGN(PAGE_SIZE);
__init_end = .;
__bss_start = .;
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 4a725d8985f1..f095e13910bc 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.25 2000/12/14 22:57:25 davem Exp $
+#
# Makefile for Sparc64 library files..
#
@@ -13,7 +13,11 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \
NGpage.o NGbzero.o \
+ NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o NG2patch.o \
+ NG2page.o \
+ GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o GENpatch.o \
+ GENpage.o GENbzero.o \
copy_in_user.o user_fixup.o memmove.o \
- mcount.o ipcsum.o rwsem.o xor.o delay.o
+ mcount.o ipcsum.o rwsem.o xor.o
obj-y += iomap.o
diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S
index 2d93456f76dd..e7f433f71b42 100644
--- a/arch/sparc64/lib/NGcopy_from_user.S
+++ b/arch/sparc64/lib/NGcopy_from_user.S
@@ -1,6 +1,6 @@
/* NGcopy_from_user.S: Niagara optimized copy from userspace.
*
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#define EX_LD(x) \
@@ -8,8 +8,8 @@
.section .fixup; \
.align 4; \
99: wr %g0, ASI_AIUS, %asi;\
- retl; \
- mov 1, %o0; \
+ ret; \
+ restore %g0, 1, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
@@ -24,7 +24,7 @@
#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest
#define LOAD_TWIN(addr_reg,dest0,dest1) \
ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0
-#define EX_RETVAL(x) 0
+#define EX_RETVAL(x) %g0
#ifdef __KERNEL__
#define PREAMBLE \
diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S
index 34112d5054ef..6ea01c5532a0 100644
--- a/arch/sparc64/lib/NGcopy_to_user.S
+++ b/arch/sparc64/lib/NGcopy_to_user.S
@@ -1,6 +1,6 @@
/* NGcopy_to_user.S: Niagara optimized copy to userspace.
*
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#define EX_ST(x) \
@@ -8,8 +8,8 @@
.section .fixup; \
.align 4; \
99: wr %g0, ASI_AIUS, %asi;\
- retl; \
- mov 1, %o0; \
+ ret; \
+ restore %g0, 1, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
@@ -23,7 +23,7 @@
#define FUNC_NAME NGcopy_to_user
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS
-#define EX_RETVAL(x) 0
+#define EX_RETVAL(x) %g0
#ifdef __KERNEL__
/* Writing to %asi is _expensive_ so we hardcode it.
diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S
index 66063a9a66b8..96a14caf6966 100644
--- a/arch/sparc64/lib/NGmemcpy.S
+++ b/arch/sparc64/lib/NGmemcpy.S
@@ -1,6 +1,6 @@
/* NGmemcpy.S: Niagara optimized memcpy.
*
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#ifdef __KERNEL__
@@ -16,6 +16,12 @@
wr %g0, ASI_PNF, %asi
#endif
+#ifdef __sparc_v9__
+#define SAVE_AMOUNT 128
+#else
+#define SAVE_AMOUNT 64
+#endif
+
#ifndef STORE_ASI
#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
#endif
@@ -50,7 +56,11 @@
#endif
#ifndef STORE_INIT
+#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
#define STORE_INIT(src,addr) stxa src, [addr] %asi
+#else
+#define STORE_INIT(src,addr) stx src, [addr + 0x00]
+#endif
#endif
#ifndef FUNC_NAME
@@ -73,18 +83,19 @@
.globl FUNC_NAME
.type FUNC_NAME,#function
-FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
- srlx %o2, 31, %g2
+FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
+ PREAMBLE
+ save %sp, -SAVE_AMOUNT, %sp
+ srlx %i2, 31, %g2
cmp %g2, 0
tne %xcc, 5
- PREAMBLE
- mov %o0, GLOBAL_SPARE
- cmp %o2, 0
+ mov %i0, %o0
+ cmp %i2, 0
be,pn %XCC, 85f
- or %o0, %o1, %o3
- cmp %o2, 16
+ or %o0, %i1, %i3
+ cmp %i2, 16
blu,a,pn %XCC, 80f
- or %o3, %o2, %o3
+ or %i3, %i2, %i3
/* 2 blocks (128 bytes) is the minimum we can do the block
* copy with. We need to ensure that we'll iterate at least
@@ -93,31 +104,31 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
* to (64 - 1) bytes from the length before we perform the
* block copy loop.
*/
- cmp %o2, (2 * 64)
+ cmp %i2, (2 * 64)
blu,pt %XCC, 70f
- andcc %o3, 0x7, %g0
+ andcc %i3, 0x7, %g0
/* %o0: dst
- * %o1: src
- * %o2: len (known to be >= 128)
+ * %i1: src
+ * %i2: len (known to be >= 128)
*
- * The block copy loops will use %o4/%o5,%g2/%g3 as
+ * The block copy loops will use %i4/%i5,%g2/%g3 as
* temporaries while copying the data.
*/
- LOAD(prefetch, %o1, #one_read)
+ LOAD(prefetch, %i1, #one_read)
wr %g0, STORE_ASI, %asi
/* Align destination on 64-byte boundary. */
- andcc %o0, (64 - 1), %o4
+ andcc %o0, (64 - 1), %i4
be,pt %XCC, 2f
- sub %o4, 64, %o4
- sub %g0, %o4, %o4 ! bytes to align dst
- sub %o2, %o4, %o2
-1: subcc %o4, 1, %o4
- EX_LD(LOAD(ldub, %o1, %g1))
+ sub %i4, 64, %i4
+ sub %g0, %i4, %i4 ! bytes to align dst
+ sub %i2, %i4, %i2
+1: subcc %i4, 1, %i4
+ EX_LD(LOAD(ldub, %i1, %g1))
EX_ST(STORE(stb, %g1, %o0))
- add %o1, 1, %o1
+ add %i1, 1, %i1
bne,pt %XCC, 1b
add %o0, 1, %o0
@@ -136,111 +147,155 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
* aligned store data at a time, this is easy to ensure.
*/
2:
- andcc %o1, (16 - 1), %o4
- andn %o2, (64 - 1), %g1 ! block copy loop iterator
- sub %o2, %g1, %o2 ! final sub-block copy bytes
+ andcc %i1, (16 - 1), %i4
+ andn %i2, (64 - 1), %g1 ! block copy loop iterator
be,pt %XCC, 50f
- cmp %o4, 8
- be,a,pt %XCC, 10f
- sub %o1, 0x8, %o1
+ sub %i2, %g1, %i2 ! final sub-block copy bytes
+
+ cmp %i4, 8
+ be,pt %XCC, 10f
+ sub %i1, %i4, %i1
/* Neither 8-byte nor 16-byte aligned, shift and mask. */
- mov %g1, %o4
- and %o1, 0x7, %g1
- sll %g1, 3, %g1
- mov 64, %o3
- andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
- sub %o3, %g1, %o3
- sllx %g2, %g1, %g2
+ and %i4, 0x7, GLOBAL_SPARE
+ sll GLOBAL_SPARE, 3, GLOBAL_SPARE
+ mov 64, %i5
+ EX_LD(LOAD_TWIN(%i1, %g2, %g3))
+ sub %i5, GLOBAL_SPARE, %i5
+ mov 16, %o4
+ mov 32, %o5
+ mov 48, %o7
+ mov 64, %i3
+
+ bg,pn %XCC, 9f
+ nop
-#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\
- EX_LD(LOAD(ldx, SRC, TMP1)); \
- srlx TMP1, PRE_SHIFT, TMP2; \
- or TMP2, PRE_VAL, TMP2; \
- EX_ST(STORE_INIT(TMP2, DST)); \
- sllx TMP1, POST_SHIFT, PRE_VAL;
-
-1: add %o1, 0x8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00)
- add %o1, 0x8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08)
- add %o1, 0x8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10)
- add %o1, 0x8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18)
- add %o1, 32, %o1
- LOAD(prefetch, %o1, #one_read)
- sub %o1, 32 - 8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20)
- add %o1, 8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28)
- add %o1, 8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30)
- add %o1, 8, %o1
- SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38)
- subcc %o4, 64, %o4
- bne,pt %XCC, 1b
+#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \
+ sllx WORD1, POST_SHIFT, WORD1; \
+ srlx WORD2, PRE_SHIFT, TMP; \
+ sllx WORD2, POST_SHIFT, WORD2; \
+ or WORD1, TMP, WORD1; \
+ srlx WORD3, PRE_SHIFT, TMP; \
+ or WORD2, TMP, WORD2;
+
+8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+ MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
+ LOAD(prefetch, %i1 + %i3, #one_read)
+
+ EX_ST(STORE_INIT(%g2, %o0 + 0x00))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x08))
+
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%g2, %o0 + 0x20))
+ EX_ST(STORE_INIT(%g3, %o0 + 0x28))
+
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ add %i1, 64, %i1
+ MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+
+ subcc %g1, 64, %g1
+ bne,pt %XCC, 8b
add %o0, 64, %o0
-#undef SWIVEL_ONE_DWORD
+ ba,pt %XCC, 60f
+ add %i1, %i4, %i1
+
+9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+ MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
+ LOAD(prefetch, %i1 + %i3, #one_read)
+
+ EX_ST(STORE_INIT(%g3, %o0 + 0x00))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+
+ EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+ MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x18))
+
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%g3, %o0 + 0x20))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+
+ EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+ add %i1, 64, %i1
+ MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
+
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30))
+ EX_ST(STORE_INIT(%g2, %o0 + 0x38))
+
+ subcc %g1, 64, %g1
+ bne,pt %XCC, 9b
+ add %o0, 64, %o0
- srl %g1, 3, %g1
ba,pt %XCC, 60f
- add %o1, %g1, %o1
+ add %i1, %i4, %i1
10: /* Destination is 64-byte aligned, source was only 8-byte
* aligned but it has been subtracted by 8 and we perform
* one twin load ahead, then add 8 back into source when
* we finish the loop.
*/
- EX_LD(LOAD_TWIN(%o1, %o4, %o5))
-1: add %o1, 16, %o1
- EX_LD(LOAD_TWIN(%o1, %g2, %g3))
- add %o1, 16 + 32, %o1
- LOAD(prefetch, %o1, #one_read)
- sub %o1, 32, %o1
+ EX_LD(LOAD_TWIN(%i1, %o4, %o5))
+ mov 16, %o7
+ mov 32, %g2
+ mov 48, %g3
+ mov 64, %o1
+1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ LOAD(prefetch, %i1 + %o1, #one_read)
EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line
- EX_ST(STORE_INIT(%g2, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%o1, %o4, %o5))
- add %o1, 16, %o1
- EX_ST(STORE_INIT(%g3, %o0 + 0x10))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x10))
EX_ST(STORE_INIT(%o4, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%o1, %g2, %g3))
- add %o1, 16, %o1
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
EX_ST(STORE_INIT(%o5, %o0 + 0x20))
- EX_ST(STORE_INIT(%g2, %o0 + 0x28))
- EX_LD(LOAD_TWIN(%o1, %o4, %o5))
- EX_ST(STORE_INIT(%g3, %o0 + 0x30))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+ EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
+ add %i1, 64, %i1
+ EX_ST(STORE_INIT(%o3, %o0 + 0x30))
EX_ST(STORE_INIT(%o4, %o0 + 0x38))
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
ba,pt %XCC, 60f
- add %o1, 0x8, %o1
+ add %i1, 0x8, %i1
50: /* Destination is 64-byte aligned, and source is 16-byte
* aligned.
*/
-1: EX_LD(LOAD_TWIN(%o1, %o4, %o5))
- add %o1, 16, %o1
- EX_LD(LOAD_TWIN(%o1, %g2, %g3))
- add %o1, 16 + 32, %o1
- LOAD(prefetch, %o1, #one_read)
- sub %o1, 32, %o1
+ mov 16, %o7
+ mov 32, %g2
+ mov 48, %g3
+ mov 64, %o1
+1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
+ EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+ LOAD(prefetch, %i1 + %o1, #one_read)
EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line
EX_ST(STORE_INIT(%o5, %o0 + 0x08))
- EX_LD(LOAD_TWIN(%o1, %o4, %o5))
- add %o1, 16, %o1
- EX_ST(STORE_INIT(%g2, %o0 + 0x10))
- EX_ST(STORE_INIT(%g3, %o0 + 0x18))
- EX_LD(LOAD_TWIN(%o1, %g2, %g3))
- add %o1, 16, %o1
+ EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x10))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+ EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+ add %i1, 64, %i1
EX_ST(STORE_INIT(%o4, %o0 + 0x20))
EX_ST(STORE_INIT(%o5, %o0 + 0x28))
- EX_ST(STORE_INIT(%g2, %o0 + 0x30))
- EX_ST(STORE_INIT(%g3, %o0 + 0x38))
+ EX_ST(STORE_INIT(%o2, %o0 + 0x30))
+ EX_ST(STORE_INIT(%o3, %o0 + 0x38))
subcc %g1, 64, %g1
bne,pt %XCC, 1b
add %o0, 64, %o0
@@ -249,47 +304,47 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
60:
membar #Sync
- /* %o2 contains any final bytes still needed to be copied
+ /* %i2 contains any final bytes still needed to be copied
* over. If anything is left, we copy it one byte at a time.
*/
- RESTORE_ASI(%o3)
- brz,pt %o2, 85f
- sub %o0, %o1, %o3
+ RESTORE_ASI(%i3)
+ brz,pt %i2, 85f
+ sub %o0, %i1, %i3
ba,a,pt %XCC, 90f
.align 64
70: /* 16 < len <= 64 */
bne,pn %XCC, 75f
- sub %o0, %o1, %o3
+ sub %o0, %i1, %i3
72:
- andn %o2, 0xf, %o4
- and %o2, 0xf, %o2
-1: subcc %o4, 0x10, %o4
- EX_LD(LOAD(ldx, %o1, %o5))
- add %o1, 0x08, %o1
- EX_LD(LOAD(ldx, %o1, %g1))
- sub %o1, 0x08, %o1
- EX_ST(STORE(stx, %o5, %o1 + %o3))
- add %o1, 0x8, %o1
- EX_ST(STORE(stx, %g1, %o1 + %o3))
+ andn %i2, 0xf, %i4
+ and %i2, 0xf, %i2
+1: subcc %i4, 0x10, %i4
+ EX_LD(LOAD(ldx, %i1, %o4))
+ add %i1, 0x08, %i1
+ EX_LD(LOAD(ldx, %i1, %g1))
+ sub %i1, 0x08, %i1
+ EX_ST(STORE(stx, %o4, %i1 + %i3))
+ add %i1, 0x8, %i1
+ EX_ST(STORE(stx, %g1, %i1 + %i3))
bgu,pt %XCC, 1b
- add %o1, 0x8, %o1
-73: andcc %o2, 0x8, %g0
+ add %i1, 0x8, %i1
+73: andcc %i2, 0x8, %g0
be,pt %XCC, 1f
nop
- sub %o2, 0x8, %o2
- EX_LD(LOAD(ldx, %o1, %o5))
- EX_ST(STORE(stx, %o5, %o1 + %o3))
- add %o1, 0x8, %o1
-1: andcc %o2, 0x4, %g0
+ sub %i2, 0x8, %i2
+ EX_LD(LOAD(ldx, %i1, %o4))
+ EX_ST(STORE(stx, %o4, %i1 + %i3))
+ add %i1, 0x8, %i1
+1: andcc %i2, 0x4, %g0
be,pt %XCC, 1f
nop
- sub %o2, 0x4, %o2
- EX_LD(LOAD(lduw, %o1, %o5))
- EX_ST(STORE(stw, %o5, %o1 + %o3))
- add %o1, 0x4, %o1
-1: cmp %o2, 0
+ sub %i2, 0x4, %i2
+ EX_LD(LOAD(lduw, %i1, %i5))
+ EX_ST(STORE(stw, %i5, %i1 + %i3))
+ add %i1, 0x4, %i1
+1: cmp %i2, 0
be,pt %XCC, 85f
nop
ba,pt %xcc, 90f
@@ -300,71 +355,71 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
sub %g1, 0x8, %g1
be,pn %icc, 2f
sub %g0, %g1, %g1
- sub %o2, %g1, %o2
+ sub %i2, %g1, %i2
1: subcc %g1, 1, %g1
- EX_LD(LOAD(ldub, %o1, %o5))
- EX_ST(STORE(stb, %o5, %o1 + %o3))
+ EX_LD(LOAD(ldub, %i1, %i5))
+ EX_ST(STORE(stb, %i5, %i1 + %i3))
bgu,pt %icc, 1b
- add %o1, 1, %o1
+ add %i1, 1, %i1
-2: add %o1, %o3, %o0
- andcc %o1, 0x7, %g1
+2: add %i1, %i3, %o0
+ andcc %i1, 0x7, %g1
bne,pt %icc, 8f
sll %g1, 3, %g1
- cmp %o2, 16
+ cmp %i2, 16
bgeu,pt %icc, 72b
nop
ba,a,pt %xcc, 73b
-8: mov 64, %o3
- andn %o1, 0x7, %o1
- EX_LD(LOAD(ldx, %o1, %g2))
- sub %o3, %g1, %o3
- andn %o2, 0x7, %o4
+8: mov 64, %i3
+ andn %i1, 0x7, %i1
+ EX_LD(LOAD(ldx, %i1, %g2))
+ sub %i3, %g1, %i3
+ andn %i2, 0x7, %i4
sllx %g2, %g1, %g2
-1: add %o1, 0x8, %o1
- EX_LD(LOAD(ldx, %o1, %g3))
- subcc %o4, 0x8, %o4
- srlx %g3, %o3, %o5
- or %o5, %g2, %o5
- EX_ST(STORE(stx, %o5, %o0))
+1: add %i1, 0x8, %i1
+ EX_LD(LOAD(ldx, %i1, %g3))
+ subcc %i4, 0x8, %i4
+ srlx %g3, %i3, %i5
+ or %i5, %g2, %i5
+ EX_ST(STORE(stx, %i5, %o0))
add %o0, 0x8, %o0
bgu,pt %icc, 1b
sllx %g3, %g1, %g2
srl %g1, 3, %g1
- andcc %o2, 0x7, %o2
+ andcc %i2, 0x7, %i2
be,pn %icc, 85f
- add %o1, %g1, %o1
+ add %i1, %g1, %i1
ba,pt %xcc, 90f
- sub %o0, %o1, %o3
+ sub %o0, %i1, %i3
.align 64
80: /* 0 < len <= 16 */
- andcc %o3, 0x3, %g0
+ andcc %i3, 0x3, %g0
bne,pn %XCC, 90f
- sub %o0, %o1, %o3
+ sub %o0, %i1, %i3
1:
- subcc %o2, 4, %o2
- EX_LD(LOAD(lduw, %o1, %g1))
- EX_ST(STORE(stw, %g1, %o1 + %o3))
+ subcc %i2, 4, %i2
+ EX_LD(LOAD(lduw, %i1, %g1))
+ EX_ST(STORE(stw, %g1, %i1 + %i3))
bgu,pt %XCC, 1b
- add %o1, 4, %o1
+ add %i1, 4, %i1
-85: retl
- mov EX_RETVAL(GLOBAL_SPARE), %o0
+85: ret
+ restore EX_RETVAL(%i0), %g0, %o0
.align 32
90:
- subcc %o2, 1, %o2
- EX_LD(LOAD(ldub, %o1, %g1))
- EX_ST(STORE(stb, %g1, %o1 + %o3))
+ subcc %i2, 1, %i2
+ EX_LD(LOAD(ldub, %i1, %g1))
+ EX_ST(STORE(stb, %g1, %i1 + %i3))
bgu,pt %XCC, 90b
- add %o1, 1, %o1
- retl
- mov EX_RETVAL(GLOBAL_SPARE), %o0
+ add %i1, 1, %i1
+ ret
+ restore EX_RETVAL(%i0), %g0, %o0
.size FUNC_NAME, .-FUNC_NAME
diff --git a/arch/sparc64/lib/NGpage.S b/arch/sparc64/lib/NGpage.S
index 8ce3a0c9c537..428920de05ba 100644
--- a/arch/sparc64/lib/NGpage.S
+++ b/arch/sparc64/lib/NGpage.S
@@ -45,6 +45,7 @@ NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
retl
nop
+ .globl NGclear_page, NGclear_user_page
NGclear_page: /* %o0=dest */
NGclear_user_page: /* %o0=dest, %o1=vaddr */
mov 8, %g1
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c
deleted file mode 100644
index fb27e54a03ee..000000000000
--- a/arch/sparc64/lib/delay.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/* delay.c: Delay loops for sparc64
- *
- * Copyright (C) 2004, 2006 David S. Miller <davem@davemloft.net>
- *
- * Based heavily upon x86 variant which is:
- * Copyright (C) 1993 Linus Torvalds
- * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
- */
-
-#include <linux/delay.h>
-#include <asm/timer.h>
-
-void __delay(unsigned long loops)
-{
- unsigned long bclock, now;
-
- bclock = tick_ops->get_tick();
- do {
- now = tick_ops->get_tick();
- } while ((now-bclock) < loops);
-}
-
-/* We used to multiply by HZ after shifting down by 32 bits
- * but that runs into problems for higher values of HZ and
- * slow cpus.
- */
-void __const_udelay(unsigned long n)
-{
- n *= 4;
-
- n *= (cpu_data(raw_smp_processor_id()).udelay_val * (HZ/4));
- n >>= 32;
-
- __delay(n + 1);
-}
-
-void __udelay(unsigned long n)
-{
- __const_udelay(n * 0x10c7UL);
-}
-
-
-void __ndelay(unsigned long n)
-{
- __const_udelay(n * 0x5UL);
-}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index e2cb9911d445..9f7740eee8d2 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -275,7 +275,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned int insn = 0;
- int si_code, fault_code;
+ int si_code, fault_code, fault;
unsigned long address, mm_rss;
fault_code = get_thread_fault_code();
@@ -412,20 +412,18 @@ good_area:
goto bad_area;
}
- switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) {
- case VM_FAULT_MINOR:
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- current->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- goto do_sigbus;
- case VM_FAULT_OOM:
- goto out_of_memory;
- default:
+ fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
BUG();
}
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
up_read(&mm->mmap_sem);
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index eaba9b70b184..6cfab2e4d340 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -175,7 +175,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return -ENOMEM;
if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(addr, len, pgoff))
+ if (prepare_hugepage_range(addr, len))
return -EINVAL;
return addr;
}
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 8eb8a7c76ec9..7ff0a02f5813 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -262,8 +262,7 @@ void __init pgtable_cache_init(void)
tsb_caches[i] = kmem_cache_create(name,
size, size,
- 0,
- NULL, NULL);
+ 0, NULL);
if (!tsb_caches[i]) {
prom_printf("Could not create %s cache\n", name);
prom_halt();
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
index 7c25c54cefdc..3fafa9a8b50b 100644
--- a/arch/sparc64/prom/console.c
+++ b/arch/sparc64/prom/console.c
@@ -73,88 +73,3 @@ prom_puts(const char *s, int len)
P1275_INOUT(3,1),
prom_stdout, s, P1275_SIZE(len));
}
-
-/* Query for input device type */
-enum prom_input_device
-prom_query_input_device(void)
-{
- int st_p;
- char propb[64];
-
- st_p = prom_inst2pkg(prom_stdin);
- if(prom_node_has_property(st_p, "keyboard"))
- return PROMDEV_IKBD;
- prom_getproperty(st_p, "device_type", propb, sizeof(propb));
- if(strncmp(propb, "serial", 6))
- return PROMDEV_I_UNK;
- /* FIXME: Is there any better way how to find out? */
- memset(propb, 0, sizeof(propb));
- st_p = prom_finddevice ("/options");
- prom_getproperty(st_p, "input-device", propb, sizeof(propb));
-
- /*
- * If we get here with propb == 'keyboard', we are on ttya, as
- * the PROM defaulted to this due to 'no input device'.
- */
- if (!strncmp(propb, "keyboard", 8))
- return PROMDEV_ITTYA;
-
- if (!strncmp (propb, "rsc", 3))
- return PROMDEV_IRSC;
-
- if (!strncmp (propb, "virtual-console", 3))
- return PROMDEV_IVCONS;
-
- if (strncmp (propb, "tty", 3) || !propb[3])
- return PROMDEV_I_UNK;
-
- switch (propb[3]) {
- case 'a': return PROMDEV_ITTYA;
- case 'b': return PROMDEV_ITTYB;
- default: return PROMDEV_I_UNK;
- }
-}
-
-/* Query for output device type */
-
-enum prom_output_device
-prom_query_output_device(void)
-{
- int st_p;
- char propb[64];
- int propl;
-
- st_p = prom_inst2pkg(prom_stdout);
- propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
- if (propl >= 0 && propl == sizeof("display") &&
- strncmp("display", propb, sizeof("display")) == 0)
- return PROMDEV_OSCREEN;
- if(strncmp("serial", propb, 6))
- return PROMDEV_O_UNK;
- /* FIXME: Is there any better way how to find out? */
- memset(propb, 0, sizeof(propb));
- st_p = prom_finddevice ("/options");
- prom_getproperty(st_p, "output-device", propb, sizeof(propb));
-
- /*
- * If we get here with propb == 'screen', we are on ttya, as
- * the PROM defaulted to this due to 'no input device'.
- */
- if (!strncmp(propb, "screen", 6))
- return PROMDEV_OTTYA;
-
- if (!strncmp (propb, "rsc", 3))
- return PROMDEV_ORSC;
-
- if (!strncmp (propb, "virtual-console", 3))
- return PROMDEV_OVCONS;
-
- if (strncmp (propb, "tty", 3) || !propb[3])
- return PROMDEV_O_UNK;
-
- switch (propb[3]) {
- case 'a': return PROMDEV_OTTYA;
- case 'b': return PROMDEV_OTTYB;
- default: return PROMDEV_O_UNK;
- }
-}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index f3e0c14e9eef..bbec7522826c 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -14,6 +14,7 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/system.h>
+#include <asm/ldc.h>
int prom_service_exists(const char *service_name)
{
@@ -37,6 +38,10 @@ void prom_sun4v_guest_soft_state(void)
/* Reset and reboot the machine with the command 'bcommand'. */
void prom_reboot(const char *bcommand)
{
+#ifdef CONFIG_SUN_LDOMS
+ if (ldom_domaining_enabled)
+ ldom_reboot(bcommand);
+#endif
p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_INOUT(1, 0), bcommand);
}
@@ -67,7 +72,7 @@ void prom_cmdline(void)
local_irq_save(flags);
- if (!serial_console && prom_palette)
+ if (prom_palette)
prom_palette(1);
#ifdef CONFIG_SMP
@@ -80,7 +85,7 @@ void prom_cmdline(void)
smp_release();
#endif
- if (!serial_console && prom_palette)
+ if (prom_palette)
prom_palette(0);
local_irq_restore(flags);
@@ -91,6 +96,10 @@ void prom_cmdline(void)
*/
void prom_halt(void)
{
+#ifdef CONFIG_SUN_LDOMS
+ if (ldom_domaining_enabled)
+ ldom_power_off();
+#endif
again:
p1275_cmd("exit", P1275_INOUT(0, 0));
goto again; /* PROM is out to get me -DaveM */
@@ -98,6 +107,10 @@ again:
void prom_halt_power_off(void)
{
+#ifdef CONFIG_SUN_LDOMS
+ if (ldom_domaining_enabled)
+ ldom_power_off();
+#endif
p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
/* if nothing else helps, we just halt */
@@ -130,22 +143,6 @@ unsigned char prom_get_idprom(char *idbuf, int num_bytes)
return 0xff;
}
-/* Install Linux trap table so PROM uses that instead of its own. */
-void prom_set_trap_table(unsigned long tba)
-{
- p1275_cmd("SUNW,set-trap-table",
- (P1275_ARG(0, P1275_ARG_IN_64B) |
- P1275_INOUT(1, 0)), tba);
-}
-
-void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa)
-{
- p1275_cmd("SUNW,set-trap-table",
- (P1275_ARG(0, P1275_ARG_IN_64B) |
- P1275_ARG(1, P1275_ARG_IN_64B) |
- P1275_INOUT(2, 0)), tba, mmfsa);
-}
-
int prom_get_mmu_ihandle(void)
{
int node, ret;
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
index 2b32c489860c..7fcccc0e19cf 100644
--- a/arch/sparc64/prom/p1275.c
+++ b/arch/sparc64/prom/p1275.c
@@ -16,6 +16,7 @@
#include <asm/system.h>
#include <asm/spitfire.h>
#include <asm/pstate.h>
+#include <asm/ldc.h>
struct {
long prom_callback; /* 0x00 */
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index 500f05e2cfcb..b2c5b12c9818 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -13,6 +13,7 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
+#include <asm/ldc.h>
/* Return the child of node 'node' or zero if no this node has no
* direct descendent.
@@ -261,9 +262,17 @@ int prom_node_has_property(int node, const char *prop)
int
prom_setprop(int node, const char *pname, char *value, int size)
{
- if(size == 0) return 0;
- if((pname == 0) || (value == 0)) return 0;
+ if (size == 0)
+ return 0;
+ if ((pname == 0) || (value == 0))
+ return 0;
+#ifdef CONFIG_SUN_LDOMS
+ if (ldom_domaining_enabled) {
+ ldom_set_var(pname, value);
+ return 0;
+ }
+#endif
return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
P1275_ARG(2,P1275_ARG_IN_BUF)|
P1275_INOUT(4, 1),
@@ -295,3 +304,11 @@ prom_pathtoinode(const char *path)
if (node == -1) return 0;
return node;
}
+
+int prom_ihandle2path(int handle, char *buffer, int bufsize)
+{
+ return p1275_cmd("instance-to-path",
+ P1275_ARG(1,P1275_ARG_OUT_BUF)|
+ P1275_INOUT(3, 1),
+ handle, buffer, P1275_SIZE(bufsize));
+}
diff --git a/arch/sparc64/solaris/socksys.c b/arch/sparc64/solaris/socksys.c
index e94f6e5d9455..7736411f244f 100644
--- a/arch/sparc64/solaris/socksys.c
+++ b/arch/sparc64/solaris/socksys.c
@@ -199,6 +199,5 @@ int __init init_socksys(void)
void __exit cleanup_socksys(void)
{
- if (unregister_chrdev(30, "socksys"))
- printk ("Couldn't unregister socksys character device\n");
+ unregister_chrdev(30, "socksys");
}