summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acconfig.h1
-rw-r--r--drivers/acpi/acpica/acevents.h17
-rw-r--r--drivers/acpi/acpica/acglobal.h13
-rw-r--r--drivers/acpi/acpica/amlcode.h15
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c335
-rw-r--r--drivers/acpi/acpica/evmisc.c303
-rw-r--r--drivers/acpi/acpica/evregion.c121
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c13
-rw-r--r--drivers/acpi/acpica/excreate.c3
-rw-r--r--drivers/acpi/acpica/nsrepair.c13
-rw-r--r--drivers/acpi/acpica/utdecode.c5
-rw-r--r--drivers/acpi/acpica/utmutex.c12
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/custom_method.c100
-rw-r--r--drivers/acpi/debugfs.c92
-rw-r--r--drivers/acpi/ec.c19
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/osl.c33
-rw-r--r--drivers/acpi/processor_core.c12
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/virtio_blk.c91
-rw-r--r--drivers/bluetooth/hci_ldisc.c17
-rw-r--r--drivers/cdrom/viocd.c1
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/clocksource/sh_cmt.c12
-rw-r--r--drivers/clocksource/sh_tmu.c12
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/TODO14
-rw-r--r--drivers/dma/at_hdmac.c376
-rw-r--r--drivers/dma/at_hdmac_regs.h30
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dw_dmac.c272
-rw-r--r--drivers/dma/dw_dmac_regs.h2
-rw-r--r--drivers/dma/intel_mid_dma.c17
-rw-r--r--drivers/dma/ioat/dma_v2.c8
-rw-r--r--drivers/dma/iop-adma.c6
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/pch_dma.c96
-rw-r--r--drivers/dma/ppc4xx/adma.c8
-rw-r--r--drivers/dma/shdma.c9
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/gpio/Kconfig38
-rw-r--r--drivers/gpio/Makefile7
-rw-r--r--drivers/gpio/gpio-exynos4.c365
-rw-r--r--drivers/gpio/gpio-nomadik.c1069
-rw-r--r--drivers/gpio/gpio-omap.c2007
-rw-r--r--drivers/gpio/gpio-plat-samsung.c206
-rw-r--r--drivers/gpio/gpio-s5pc100.c355
-rw-r--r--drivers/gpio/gpio-s5pv210.c288
-rw-r--r--drivers/gpio/gpio-u300.c700
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpio/langwell_gpio.c65
-rw-r--r--drivers/gpio/pca953x.c249
-rw-r--r--drivers/gpio/pch_gpio.c2
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/input/serio/serport.c10
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c8
-rw-r--r--drivers/md/dm-io.c27
-rw-r--r--drivers/md/dm-kcopyd.c168
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-snap-persistent.c13
-rw-r--r--drivers/md/dm-snap.c10
-rw-r--r--drivers/md/dm-table.c23
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/misc/kgdbts.c5
-rw-r--r--drivers/misc/ti-st/st_core.c6
-rw-r--r--drivers/net/caif/caif_serial.c6
-rw-r--r--drivers/net/can/slcan.c9
-rw-r--r--drivers/net/hamradio/6pack.c8
-rw-r--r--drivers/net/hamradio/mkiss.c11
-rw-r--r--drivers/net/irda/irtty-sir.c16
-rw-r--r--drivers/net/ppp_async.c6
-rw-r--r--drivers/net/ppp_synctty.c6
-rw-r--r--drivers/net/slip.c11
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wan/x25_asy.c7
-rw-r--r--drivers/oprofile/event_buffer.h2
-rw-r--r--drivers/oprofile/oprof.c2
-rw-r--r--drivers/pci/dmar.c7
-rw-r--r--drivers/pci/intel-iommu.c240
-rw-r--r--drivers/pci/iova.c12
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/acer-wmi.c184
-rw-r--r--drivers/platform/x86/acerhdf.c4
-rw-r--r--drivers/platform/x86/asus-laptop.c34
-rw-r--r--drivers/platform/x86/asus-wmi.c22
-rw-r--r--drivers/platform/x86/asus_acpi.c77
-rw-r--r--drivers/platform/x86/compal-laptop.c36
-rw-r--r--drivers/platform/x86/dell-laptop.c12
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c3
-rw-r--r--drivers/platform/x86/dell-wmi.c17
-rw-r--r--drivers/platform/x86/eeepc-laptop.c21
-rw-r--r--drivers/platform/x86/eeepc-wmi.c14
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c39
-rw-r--r--drivers/platform/x86/hdaps.c19
-rw-r--r--drivers/platform/x86/hp-wmi.c43
-rw-r--r--drivers/platform/x86/ibm_rtl.c23
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_menlow.c5
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c72
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c607
-rw-r--r--drivers/platform/x86/intel_oaktrail.c396
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c14
-rw-r--r--drivers/platform/x86/msi-laptop.c23
-rw-r--r--drivers/platform/x86/msi-wmi.c45
-rw-r--r--drivers/platform/x86/sony-laptop.c106
-rw-r--r--drivers/platform/x86/tc1100-wmi.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c490
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c59
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c11
-rw-r--r--drivers/platform/x86/wmi.c10
-rw-r--r--drivers/platform/x86/xo15-ebook.c5
-rw-r--r--drivers/scsi/scsi_proc.c5
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi_bfin_sport.c952
-rw-r--r--drivers/spi/tle62x0.c3
-rw-r--r--drivers/thermal/thermal_sys.c10
-rw-r--r--drivers/tty/n_gsm.c6
-rw-r--r--drivers/tty/n_hdlc.c18
-rw-r--r--drivers/tty/n_r3964.c10
-rw-r--r--drivers/tty/n_tty.c61
-rw-r--r--drivers/tty/tty_buffer.c15
-rw-r--r--drivers/tty/vt/selection.c3
-rw-r--r--drivers/usb/host/ehci-pci.c39
-rw-r--r--drivers/usb/host/pci-quirks.c63
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-pci.c26
-rw-r--r--drivers/usb/host/xhci-ring.c89
-rw-r--r--drivers/usb/host/xhci.c240
-rw-r--r--drivers/usb/host/xhci.h22
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.c138
-rw-r--r--drivers/vhost/vhost.h21
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_ring.c53
154 files changed, 10129 insertions, 2261 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 6b17f5864340..09f3232bcdcd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_SFI) += sfi/
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
obj-$(CONFIG_ARM_AMBA) += amba/
+# Many drivers will want to use DMA so this has to be made available
+# really early.
+obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_XEN) += xen/
@@ -92,7 +95,6 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bc2218db5ba9..de0e3df76776 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -369,6 +369,21 @@ config ACPI_HED
which is used to report some hardware errors notified via
SCI, mainly the corrected errors.
+config ACPI_CUSTOM_METHOD
+ tristate "Allow ACPI methods to be inserted/replaced at run time"
+ depends on DEBUG_FS
+ default n
+ help
+ This debug facility allows ACPI AML methods to me inserted and/or
+ replaced without rebooting the system. For details refer to:
+ Documentation/acpi/method-customizing.txt.
+
+ NOTE: This option is security sensitive, because it allows arbitrary
+ kernel memory to be written to by root (uid=0) users, allowing them
+ to bypass certain security measures (e.g. if root is not allowed to
+ load additional kernel modules after boot, this feature may be used
+ to override that restriction).
+
source "drivers/acpi/apei/Kconfig"
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b66fbb2fc85f..ecb26b4f29a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o
obj-$(CONFIG_ACPI_SBS) += sbs.o
obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
+obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a1224712fd0c..301bd2d388ad 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index ab87396c2c07..bc533dde16c4 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -187,7 +187,6 @@
/* Operation regions */
-#define ACPI_NUM_PREDEFINED_REGIONS 9
#define ACPI_USER_REGION_BEGIN 0x80
/* Maximum space_ids for Operation Regions */
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 41d247daf461..bea3b4899183 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -58,12 +58,6 @@ u32 acpi_ev_fixed_event_detect(void);
*/
u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
-acpi_status acpi_ev_acquire_global_lock(u16 timeout);
-
-acpi_status acpi_ev_release_global_lock(void);
-
-acpi_status acpi_ev_init_global_lock_handler(void);
-
u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
acpi_status
@@ -71,6 +65,17 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value);
/*
+ * evglock - Global Lock support
+ */
+acpi_status acpi_ev_init_global_lock_handler(void);
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout);
+
+acpi_status acpi_ev_release_global_lock(void);
+
+acpi_status acpi_ev_remove_global_lock_handler(void);
+
+/*
* evgpe - Low-level GPE support
*/
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d69750b83b36..73863d86f022 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -214,24 +214,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/*
* Global lock mutex is an actual AML mutex object
- * Global lock semaphore works in conjunction with the HW global lock
+ * Global lock semaphore works in conjunction with the actual global lock
+ * Global lock spinlock is used for "pending" handshake
*/
ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
+ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
ACPI_EXTERN u8 acpi_gbl_global_lock_present;
+ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
/*
* Spinlocks are used for interfaces that can be possibly called at
* interrupt level
*/
-ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */
-ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
-ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
-#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
-#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
-#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
+ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
+ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index f4f0998d3967..1077f17859ed 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -394,21 +394,6 @@
#define AML_CLASS_METHOD_CALL 0x09
#define AML_CLASS_UNKNOWN 0x0A
-/* Predefined Operation Region space_iDs */
-
-typedef enum {
- REGION_MEMORY = 0,
- REGION_IO,
- REGION_PCI_CONFIG,
- REGION_EC,
- REGION_SMBUS,
- REGION_CMOS,
- REGION_PCI_BAR,
- REGION_IPMI,
- REGION_DATA_TABLE, /* Internal use only */
- REGION_FIXED_HW = 0x7F
-} AML_REGION_TYPES;
-
/* Comparison operation codes for match_op operator */
typedef enum {
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 23a3b1ab20c1..324acec1179a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -450,7 +450,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ex_create_region(op->named.data,
op->named.length,
- REGION_DATA_TABLE,
+ ACPI_ADR_SPACE_DATA_TABLE,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 4be4e921dfe1..976318138c56 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -562,7 +562,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
((op->common.value.arg)->common.value.
integer);
} else {
- region_space = REGION_DATA_TABLE;
+ region_space = ACPI_ADR_SPACE_DATA_TABLE;
}
/*
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
new file mode 100644
index 000000000000..56a562a1e5d7
--- /dev/null
+++ b/drivers/acpi/acpica/evglock.c
@@ -0,0 +1,335 @@
+/******************************************************************************
+ *
+ * Module Name: evglock - Global Lock support
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evglock")
+
+/* Local prototypes */
+static u32 acpi_ev_global_lock_handler(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_init_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for the global lock release event
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_init_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+
+ /* Attempt installation of the global lock handler */
+
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler,
+ NULL);
+
+ /*
+ * If the global lock does not exist on this platform, the attempt to
+ * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
+ * Map to AE_OK, but mark global lock as not present. Any attempt to
+ * actually use the global lock will be flagged with an error.
+ */
+ acpi_gbl_global_lock_present = FALSE;
+ if (status == AE_NO_HARDWARE_RESPONSE) {
+ ACPI_ERROR((AE_INFO,
+ "No response from Global Lock hardware, disabling lock"));
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ status = acpi_os_create_lock(&acpi_gbl_global_lock_pending_lock);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_global_lock_pending = FALSE;
+ acpi_gbl_global_lock_present = TRUE;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_remove_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove the handler for the Global Lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_remove_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
+
+ acpi_gbl_global_lock_present = FALSE;
+ status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_global_lock_handler
+ *
+ * PARAMETERS: Context - From thread interface, not used
+ *
+ * RETURN: ACPI_INTERRUPT_HANDLED
+ *
+ * DESCRIPTION: Invoked directly from the SCI handler when a global lock
+ * release interrupt occurs. If there is actually a pending
+ * request for the lock, signal the waiting thread.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_global_lock_handler(void *context)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ /*
+ * If a request for the global lock is not actually pending,
+ * we are done. This handles "spurious" global lock interrupts
+ * which are possible (and have been seen) with bad BIOSs.
+ */
+ if (!acpi_gbl_global_lock_pending) {
+ goto cleanup_and_exit;
+ }
+
+ /*
+ * Send a unit to the global lock semaphore. The actual acquisition
+ * of the global lock will be performed by the waiting thread.
+ */
+ status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
+ }
+
+ acpi_gbl_global_lock_pending = FALSE;
+
+ cleanup_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+ return (ACPI_INTERRUPT_HANDLED);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_acquire_global_lock
+ *
+ * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Attempt to gain ownership of the Global Lock.
+ *
+ * MUTEX: Interpreter must be locked
+ *
+ * Note: The original implementation allowed multiple threads to "acquire" the
+ * Global Lock, and the OS would hold the lock until the last thread had
+ * released it. However, this could potentially starve the BIOS out of the
+ * lock, especially in the case where there is a tight handshake between the
+ * Embedded Controller driver and the BIOS. Therefore, this implementation
+ * allows only one thread to acquire the HW Global Lock at a time, and makes
+ * the global lock appear as a standard mutex on the OS side.
+ *
+ *****************************************************************************/
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout)
+{
+ acpi_cpu_flags flags;
+ acpi_status status;
+ u8 acquired = FALSE;
+
+ ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
+
+ /*
+ * Only one thread can acquire the GL at a time, the global_lock_mutex
+ * enforces this. This interface releases the interpreter if we must wait.
+ */
+ status =
+ acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex.
+ os_mutex, timeout);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Update the global lock handle and check for wraparound. The handle is
+ * only used for the external global lock interfaces, but it is updated
+ * here to properly handle the case where a single thread may acquire the
+ * lock via both the AML and the acpi_acquire_global_lock interfaces. The
+ * handle is therefore updated on the first acquire from a given thread
+ * regardless of where the acquisition request originated.
+ */
+ acpi_gbl_global_lock_handle++;
+ if (acpi_gbl_global_lock_handle == 0) {
+ acpi_gbl_global_lock_handle = 1;
+ }
+
+ /*
+ * Make sure that a global lock actually exists. If not, just
+ * treat the lock as a standard mutex.
+ */
+ if (!acpi_gbl_global_lock_present) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ do {
+
+ /* Attempt to acquire the actual hardware lock */
+
+ ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+ if (acquired) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Acquired hardware Global Lock\n"));
+ break;
+ }
+
+ /*
+ * Did not get the lock. The pending bit was set above, and
+ * we must now wait until we receive the global lock
+ * released interrupt.
+ */
+ acpi_gbl_global_lock_pending = TRUE;
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Waiting for hardware Global Lock\n"));
+
+ /*
+ * Wait for handshake with the global lock interrupt handler.
+ * This interface releases the interpreter if we must wait.
+ */
+ status =
+ acpi_ex_system_wait_semaphore
+ (acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
+
+ } while (ACPI_SUCCESS(status));
+
+ acpi_gbl_global_lock_pending = FALSE;
+ acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_release_global_lock
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Releases ownership of the Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_release_global_lock(void)
+{
+ u8 pending = FALSE;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ev_release_global_lock);
+
+ /* Lock must be already acquired */
+
+ if (!acpi_gbl_global_lock_acquired) {
+ ACPI_WARNING((AE_INFO,
+ "Cannot release the ACPI Global Lock, it has not been acquired"));
+ return_ACPI_STATUS(AE_NOT_ACQUIRED);
+ }
+
+ if (acpi_gbl_global_lock_present) {
+
+ /* Allow any thread to release the lock */
+
+ ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
+
+ /*
+ * If the pending bit was set, we must write GBL_RLS to the control
+ * register
+ */
+ if (pending) {
+ status =
+ acpi_write_bit_register
+ (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
+ ACPI_ENABLE_EVENT);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Released hardware Global Lock\n"));
+ }
+
+ acpi_gbl_global_lock_acquired = FALSE;
+
+ /* Release the local GL mutex */
+
+ acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 7dc80946f7bd..d0b331844427 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -45,7 +45,6 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evmisc")
@@ -53,10 +52,6 @@ ACPI_MODULE_NAME("evmisc")
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
-static u32 acpi_ev_global_lock_handler(void *context);
-
-static acpi_status acpi_ev_remove_global_lock_handler(void);
-
/*******************************************************************************
*
* FUNCTION: acpi_ev_is_notify_object
@@ -275,304 +270,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
acpi_ut_delete_generic_state(notify_info);
}
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_global_lock_handler
- *
- * PARAMETERS: Context - From thread interface, not used
- *
- * RETURN: ACPI_INTERRUPT_HANDLED
- *
- * DESCRIPTION: Invoked directly from the SCI handler when a global lock
- * release interrupt occurs. If there's a thread waiting for
- * the global lock, signal it.
- *
- * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
- * this is not possible for some reason, a separate thread will have to be
- * scheduled to do this.
- *
- ******************************************************************************/
-static u8 acpi_ev_global_lock_pending;
-
-static u32 acpi_ev_global_lock_handler(void *context)
-{
- acpi_status status;
- acpi_cpu_flags flags;
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- if (!acpi_ev_global_lock_pending) {
- goto out;
- }
-
- /* Send a unit to the semaphore */
-
- status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
- }
-
- acpi_ev_global_lock_pending = FALSE;
-
- out:
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- return (ACPI_INTERRUPT_HANDLED);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_init_global_lock_handler
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Install a handler for the global lock release event
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_init_global_lock_handler(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
-
- /* Attempt installation of the global lock handler */
-
- status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
- acpi_ev_global_lock_handler,
- NULL);
-
- /*
- * If the global lock does not exist on this platform, the attempt to
- * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
- * Map to AE_OK, but mark global lock as not present. Any attempt to
- * actually use the global lock will be flagged with an error.
- */
- if (status == AE_NO_HARDWARE_RESPONSE) {
- ACPI_ERROR((AE_INFO,
- "No response from Global Lock hardware, disabling lock"));
-
- acpi_gbl_global_lock_present = FALSE;
- return_ACPI_STATUS(AE_OK);
- }
-
- acpi_gbl_global_lock_present = TRUE;
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_remove_global_lock_handler
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove the handler for the Global Lock
- *
- ******************************************************************************/
-
-static acpi_status acpi_ev_remove_global_lock_handler(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
-
- acpi_gbl_global_lock_present = FALSE;
- status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
- acpi_ev_global_lock_handler);
-
- return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_ev_acquire_global_lock
- *
- * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Attempt to gain ownership of the Global Lock.
- *
- * MUTEX: Interpreter must be locked
- *
- * Note: The original implementation allowed multiple threads to "acquire" the
- * Global Lock, and the OS would hold the lock until the last thread had
- * released it. However, this could potentially starve the BIOS out of the
- * lock, especially in the case where there is a tight handshake between the
- * Embedded Controller driver and the BIOS. Therefore, this implementation
- * allows only one thread to acquire the HW Global Lock at a time, and makes
- * the global lock appear as a standard mutex on the OS side.
- *
- *****************************************************************************/
-static acpi_thread_id acpi_ev_global_lock_thread_id;
-static int acpi_ev_global_lock_acquired;
-
-acpi_status acpi_ev_acquire_global_lock(u16 timeout)
-{
- acpi_cpu_flags flags;
- acpi_status status = AE_OK;
- u8 acquired = FALSE;
-
- ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
-
- /*
- * Only one thread can acquire the GL at a time, the global_lock_mutex
- * enforces this. This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_mutex(
- acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
- if (status == AE_TIME) {
- if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
- acpi_ev_global_lock_acquired++;
- return AE_OK;
- }
- }
-
- if (ACPI_FAILURE(status)) {
- status = acpi_ex_system_wait_mutex(
- acpi_gbl_global_lock_mutex->mutex.os_mutex,
- timeout);
- }
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
- acpi_ev_global_lock_acquired++;
-
- /*
- * Update the global lock handle and check for wraparound. The handle is
- * only used for the external global lock interfaces, but it is updated
- * here to properly handle the case where a single thread may acquire the
- * lock via both the AML and the acpi_acquire_global_lock interfaces. The
- * handle is therefore updated on the first acquire from a given thread
- * regardless of where the acquisition request originated.
- */
- acpi_gbl_global_lock_handle++;
- if (acpi_gbl_global_lock_handle == 0) {
- acpi_gbl_global_lock_handle = 1;
- }
-
- /*
- * Make sure that a global lock actually exists. If not, just treat the
- * lock as a standard mutex.
- */
- if (!acpi_gbl_global_lock_present) {
- acpi_gbl_global_lock_acquired = TRUE;
- return_ACPI_STATUS(AE_OK);
- }
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- do {
-
- /* Attempt to acquire the actual hardware lock */
-
- ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
- if (acquired) {
- acpi_gbl_global_lock_acquired = TRUE;
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Acquired hardware Global Lock\n"));
- break;
- }
-
- acpi_ev_global_lock_pending = TRUE;
-
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- /*
- * Did not get the lock. The pending bit was set above, and we
- * must wait until we get the global lock released interrupt.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Waiting for hardware Global Lock\n"));
-
- /*
- * Wait for handshake with the global lock interrupt handler.
- * This interface releases the interpreter if we must wait.
- */
- status = acpi_ex_system_wait_semaphore(
- acpi_gbl_global_lock_semaphore,
- ACPI_WAIT_FOREVER);
-
- flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
-
- } while (ACPI_SUCCESS(status));
-
- acpi_ev_global_lock_pending = FALSE;
-
- acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_release_global_lock
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Releases ownership of the Global Lock.
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_release_global_lock(void)
-{
- u8 pending = FALSE;
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE(ev_release_global_lock);
-
- /* Lock must be already acquired */
-
- if (!acpi_gbl_global_lock_acquired) {
- ACPI_WARNING((AE_INFO,
- "Cannot release the ACPI Global Lock, it has not been acquired"));
- return_ACPI_STATUS(AE_NOT_ACQUIRED);
- }
-
- acpi_ev_global_lock_acquired--;
- if (acpi_ev_global_lock_acquired > 0) {
- return AE_OK;
- }
-
- if (acpi_gbl_global_lock_present) {
-
- /* Allow any thread to release the lock */
-
- ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
-
- /*
- * If the pending bit was set, we must write GBL_RLS to the control
- * register
- */
- if (pending) {
- status =
- acpi_write_bit_register
- (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
- ACPI_ENABLE_EVENT);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Released hardware Global Lock\n"));
- }
-
- acpi_gbl_global_lock_acquired = FALSE;
-
- /* Release the local GL mutex */
- acpi_ev_global_lock_thread_id = 0;
- acpi_ev_global_lock_acquired = 0;
- acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
- return_ACPI_STATUS(status);
-}
-
/******************************************************************************
*
* FUNCTION: acpi_ev_terminate
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index bea7223d7a71..f0edf5c43c03 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -55,6 +55,8 @@ static u8
acpi_ev_has_default_handler(struct acpi_namespace_node *node,
acpi_adr_space_type space_id);
+static void acpi_ev_orphan_ec_reg_method(void);
+
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -561,7 +563,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
/* Now stop region accesses by executing the _REG method */
- status = acpi_ev_execute_reg_method(region_obj, 0);
+ status =
+ acpi_ev_execute_reg_method(region_obj,
+ ACPI_REG_DISCONNECT);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"from region _REG, [%s]",
@@ -1062,6 +1066,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
NULL, &space_id, NULL);
+ /* Special case for EC: handle "orphan" _REG methods with no region */
+
+ if (space_id == ACPI_ADR_SPACE_EC) {
+ acpi_ev_orphan_ec_reg_method();
+ }
+
return_ACPI_STATUS(status);
}
@@ -1120,6 +1130,113 @@ acpi_ev_reg_run(acpi_handle obj_handle,
return (AE_OK);
}
- status = acpi_ev_execute_reg_method(obj_desc, 1);
+ status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
return (status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_orphan_ec_reg_method
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC
+ * device. This is a _REG method that has no corresponding region
+ * within the EC device scope. The orphan _REG method appears to
+ * have been enabled by the description of the ECDT in the ACPI
+ * specification: "The availability of the region space can be
+ * detected by providing a _REG method object underneath the
+ * Embedded Controller device."
+ *
+ * To quickly access the EC device, we use the EC_ID that appears
+ * within the ECDT. Otherwise, we would need to perform a time-
+ * consuming namespace walk, executing _HID methods to find the
+ * EC device.
+ *
+ ******************************************************************************/
+
+static void acpi_ev_orphan_ec_reg_method(void)
+{
+ struct acpi_table_ecdt *table;
+ acpi_status status;
+ struct acpi_object_list args;
+ union acpi_object objects[2];
+ struct acpi_namespace_node *ec_device_node;
+ struct acpi_namespace_node *reg_method;
+ struct acpi_namespace_node *next_node;
+
+ ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method);
+
+ /* Get the ECDT (if present in system) */
+
+ status = acpi_get_table(ACPI_SIG_ECDT, 0,
+ ACPI_CAST_INDIRECT_PTR(struct acpi_table_header,
+ &table));
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* We need a valid EC_ID string */
+
+ if (!(*table->id)) {
+ return_VOID;
+ }
+
+ /* Namespace is currently locked, must release */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ /* Get a handle to the EC device referenced in the ECDT */
+
+ status = acpi_get_handle(NULL,
+ ACPI_CAST_PTR(char, table->id),
+ ACPI_CAST_PTR(acpi_handle, &ec_device_node));
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Get a handle to a _REG method immediately under the EC device */
+
+ status = acpi_get_handle(ec_device_node,
+ METHOD_NAME__REG, ACPI_CAST_PTR(acpi_handle,
+ &reg_method));
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /*
+ * Execute the _REG method only if there is no Operation Region in
+ * this scope with the Embedded Controller space ID. Otherwise, it
+ * will already have been executed. Note, this allows for Regions
+ * with other space IDs to be present; but the code below will then
+ * execute the _REG method with the EC space ID argument.
+ */
+ next_node = acpi_ns_get_next_node(ec_device_node, NULL);
+ while (next_node) {
+ if ((next_node->type == ACPI_TYPE_REGION) &&
+ (next_node->object) &&
+ (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) {
+ goto exit; /* Do not execute _REG */
+ }
+ next_node = acpi_ns_get_next_node(ec_device_node, next_node);
+ }
+
+ /* Evaluate the _REG(EC,Connect) method */
+
+ args.count = 2;
+ args.pointer = objects;
+ objects[0].type = ACPI_TYPE_INTEGER;
+ objects[0].integer.value = ACPI_ADR_SPACE_EC;
+ objects[1].type = ACPI_TYPE_INTEGER;
+ objects[1].integer.value = ACPI_REG_CONNECT;
+
+ status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
+
+ exit:
+ /* We ignore all errors from above, don't care */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 9659cee6093e..55a5d35ef34a 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -637,7 +637,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
status =
acpi_ev_execute_reg_method
- (region_obj, 1);
+ (region_obj, ACPI_REG_CONNECT);
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index c85c8c45599d..00cd95692a91 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -130,20 +130,21 @@ acpi_install_address_space_handler(acpi_handle device,
case ACPI_ADR_SPACE_PCI_CONFIG:
case ACPI_ADR_SPACE_DATA_TABLE:
- if (acpi_gbl_reg_methods_executed) {
+ if (!acpi_gbl_reg_methods_executed) {
- /* Run all _REG methods for this address space */
-
- status = acpi_ev_execute_reg_methods(node, space_id);
+ /* We will defer execution of the _REG methods for this space */
+ goto unlock_and_exit;
}
break;
default:
-
- status = acpi_ev_execute_reg_methods(node, space_id);
break;
}
+ /* Run all _REG methods for this address space */
+
+ status = acpi_ev_execute_reg_methods(node, space_id);
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index e7b372d17667..110711afada8 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -305,7 +305,8 @@ acpi_ex_create_region(u8 * aml_start,
* range
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
- (region_space < ACPI_USER_REGION_BEGIN)) {
+ (region_space < ACPI_USER_REGION_BEGIN) &&
+ (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 1d76ac85b5e7..ac7b854b0bd7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -74,7 +74,6 @@ ACPI_MODULE_NAME("nsrepair")
*
* Additional possible repairs:
*
- * Optional/unnecessary NULL package elements removed
* Required package elements that are NULL replaced by Integer/String/Buffer
* Incorrect standalone package wrapped with required outer package
*
@@ -623,16 +622,12 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
ACPI_FUNCTION_NAME(ns_remove_null_elements);
/*
- * PTYPE1 packages contain no subpackages.
- * PTYPE2 packages contain a variable number of sub-packages. We can
- * safely remove all NULL elements from the PTYPE2 packages.
+ * We can safely remove all NULL elements from these package types:
+ * PTYPE1_VAR packages contain a variable number of simple data types.
+ * PTYPE2 packages contain a variable number of sub-packages.
*/
switch (package_type) {
- case ACPI_PTYPE1_FIXED:
case ACPI_PTYPE1_VAR:
- case ACPI_PTYPE1_OPTION:
- return;
-
case ACPI_PTYPE2:
case ACPI_PTYPE2_COUNT:
case ACPI_PTYPE2_PKG_COUNT:
@@ -642,6 +637,8 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
break;
default:
+ case ACPI_PTYPE1_FIXED:
+ case ACPI_PTYPE1_OPTION:
return;
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 136a814cec69..97cb36f85ce9 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -170,8 +170,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
- "IPMI",
- "DataTable"
+ "IPMI"
};
char *acpi_ut_get_region_name(u8 space_id)
@@ -179,6 +178,8 @@ char *acpi_ut_get_region_name(u8 space_id)
if (space_id >= ACPI_USER_REGION_BEGIN) {
return ("UserDefinedRegion");
+ } else if (space_id == ACPI_ADR_SPACE_DATA_TABLE) {
+ return ("DataTable");
} else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
return ("FunctionalFixedHW");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index a946c689f03b..7d797e2baecd 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -83,9 +83,15 @@ acpi_status acpi_ut_mutex_initialize(void)
/* Create the spinlocks for use at interrupt level */
- spin_lock_init(acpi_gbl_gpe_lock);
- spin_lock_init(acpi_gbl_hardware_lock);
- spin_lock_init(acpi_ev_global_lock_pending_lock);
+ status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
+
+ status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
/* Mutex for _OSI support */
status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9749980ca6ca..d1e06c182cdb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -227,7 +227,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
acpi_status status = AE_OK;
char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
- if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+ if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
/* Make sure this is a valid target state */
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
new file mode 100644
index 000000000000..5d42c2414ae5
--- /dev/null
+++ b/drivers/acpi/custom_method.c
@@ -0,0 +1,100 @@
+/*
+ * debugfs.c - ACPI debugfs interface to userspace.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
+
+#define _COMPONENT ACPI_SYSTEM_COMPONENT
+ACPI_MODULE_NAME("custom_method");
+MODULE_LICENSE("GPL");
+
+static struct dentry *cm_dentry;
+
+/* /sys/kernel/debug/acpi/custom_method */
+
+static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char *buf;
+ static u32 max_size;
+ static u32 uncopied_bytes;
+
+ struct acpi_table_header table;
+ acpi_status status;
+
+ if (!(*ppos)) {
+ /* parse the table header to get the table length */
+ if (count <= sizeof(struct acpi_table_header))
+ return -EINVAL;
+ if (copy_from_user(&table, user_buf,
+ sizeof(struct acpi_table_header)))
+ return -EFAULT;
+ uncopied_bytes = max_size = table.length;
+ buf = kzalloc(max_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ if (buf == NULL)
+ return -EINVAL;
+
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+ (count > uncopied_bytes))
+ return -EINVAL;
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+ buf = NULL;
+ return -EFAULT;
+ }
+
+ uncopied_bytes -= count;
+ *ppos += count;
+
+ if (!uncopied_bytes) {
+ status = acpi_install_method(buf);
+ kfree(buf);
+ buf = NULL;
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
+
+ return count;
+}
+
+static const struct file_operations cm_fops = {
+ .write = cm_write,
+ .llseek = default_llseek,
+};
+
+static int __init acpi_custom_method_init(void)
+{
+ if (acpi_debugfs_dir == NULL)
+ return -ENOENT;
+
+ cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
+ acpi_debugfs_dir, NULL, &cm_fops);
+ if (cm_dentry == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit acpi_custom_method_exit(void)
+{
+ if (cm_dentry)
+ debugfs_remove(cm_dentry);
+ }
+
+module_init(acpi_custom_method_init);
+module_exit(acpi_custom_method_exit);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 384f7abcff77..182a9fc36355 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -3,100 +3,16 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <acpi/acpi_drivers.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("debugfs");
+struct dentry *acpi_debugfs_dir;
+EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
-/* /sys/modules/acpi/parameters/aml_debug_output */
-
-module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
- bool, 0644);
-MODULE_PARM_DESC(aml_debug_output,
- "To enable/disable the ACPI Debug Object output.");
-
-/* /sys/kernel/debug/acpi/custom_method */
-
-static ssize_t cm_write(struct file *file, const char __user * user_buf,
- size_t count, loff_t *ppos)
+void __init acpi_debugfs_init(void)
{
- static char *buf;
- static u32 max_size;
- static u32 uncopied_bytes;
-
- struct acpi_table_header table;
- acpi_status status;
-
- if (!(*ppos)) {
- /* parse the table header to get the table length */
- if (count <= sizeof(struct acpi_table_header))
- return -EINVAL;
- if (copy_from_user(&table, user_buf,
- sizeof(struct acpi_table_header)))
- return -EFAULT;
- uncopied_bytes = max_size = table.length;
- buf = kzalloc(max_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- }
-
- if (buf == NULL)
- return -EINVAL;
-
- if ((*ppos > max_size) ||
- (*ppos + count > max_size) ||
- (*ppos + count < count) ||
- (count > uncopied_bytes))
- return -EINVAL;
-
- if (copy_from_user(buf + (*ppos), user_buf, count)) {
- kfree(buf);
- buf = NULL;
- return -EFAULT;
- }
-
- uncopied_bytes -= count;
- *ppos += count;
-
- if (!uncopied_bytes) {
- status = acpi_install_method(buf);
- kfree(buf);
- buf = NULL;
- if (ACPI_FAILURE(status))
- return -EINVAL;
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
- }
-
- return count;
-}
-
-static const struct file_operations cm_fops = {
- .write = cm_write,
- .llseek = default_llseek,
-};
-
-int __init acpi_debugfs_init(void)
-{
- struct dentry *acpi_dir, *cm_dentry;
-
- acpi_dir = debugfs_create_dir("acpi", NULL);
- if (!acpi_dir)
- goto err;
-
- cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
- acpi_dir, NULL, &cm_fops);
- if (!cm_dentry)
- goto err;
-
- return 0;
-
-err:
- if (acpi_dir)
- debugfs_remove(acpi_dir);
- return -EINVAL;
+ acpi_debugfs_dir = debugfs_create_dir("acpi", NULL);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fa848c4116a8..b19a18dd994f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -69,7 +69,6 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
-#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
@@ -433,8 +432,7 @@ EXPORT_SYMBOL(ec_write);
int ec_transaction(u8 command,
const u8 * wdata, unsigned wdata_len,
- u8 * rdata, unsigned rdata_len,
- int force_poll)
+ u8 * rdata, unsigned rdata_len)
{
struct transaction t = {.command = command,
.wdata = wdata, .rdata = rdata,
@@ -592,8 +590,6 @@ static void acpi_ec_gpe_query(void *ec_cxt)
mutex_unlock(&ec->lock);
}
-static void acpi_ec_gpe_query(void *ec_cxt);
-
static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
@@ -808,8 +804,6 @@ static int acpi_ec_add(struct acpi_device *device)
return -EINVAL;
}
- ec->handle = device->handle;
-
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods, NULL, ec, NULL);
@@ -938,8 +932,19 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
ec_flag_msi, "MSI hardware", {
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
{
+ ec_flag_msi, "Quanta hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
+ {
+ ec_flag_msi, "Quanta hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
+ {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
{},
};
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4bfb759deb10..ca75b9ce0489 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,9 +28,10 @@ int acpi_scan_init(void);
int acpi_sysfs_init(void);
#ifdef CONFIG_DEBUG_FS
+extern struct dentry *acpi_debugfs_dir;
int acpi_debugfs_init(void);
#else
-static inline int acpi_debugfs_init(void) { return 0; }
+static inline void acpi_debugfs_init(void) { return; }
#endif
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 45ad4ffef533..52ca9649d769 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -902,14 +902,6 @@ void acpi_os_wait_events_complete(void *context)
EXPORT_SYMBOL(acpi_os_wait_events_complete);
-/*
- * Deallocate the memory for a spinlock.
- */
-void acpi_os_delete_lock(acpi_spinlock handle)
-{
- return;
-}
-
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
@@ -1341,6 +1333,31 @@ int acpi_resources_are_enforced(void)
EXPORT_SYMBOL(acpi_resources_are_enforced);
/*
+ * Create and initialize a spinlock.
+ */
+acpi_status
+acpi_os_create_lock(acpi_spinlock *out_handle)
+{
+ spinlock_t *lock;
+
+ lock = ACPI_ALLOCATE(sizeof(spinlock_t));
+ if (!lock)
+ return AE_NO_MEMORY;
+ spin_lock_init(lock);
+ *out_handle = lock;
+
+ return AE_OK;
+}
+
+/*
+ * Deallocate the memory for a spinlock.
+ */
+void acpi_os_delete_lock(acpi_spinlock handle)
+{
+ ACPI_FREE(handle);
+}
+
+/*
* Acquire a spinlock.
*
* handle is a pointer to the spinlock_t.
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 25bf17da69fd..02d2a4c9084d 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -37,7 +37,6 @@ static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
{},
};
-#ifdef CONFIG_SMP
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
@@ -165,7 +164,9 @@ exit:
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
+#ifdef CONFIG_SMP
int i;
+#endif
int apic_id = -1;
apic_id = map_mat_entry(handle, type, acpi_id);
@@ -174,14 +175,19 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
if (apic_id == -1)
return apic_id;
+#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == apic_id)
return i;
}
+#else
+ /* In UP kernel, only processor 0 is valid */
+ if (apic_id == 0)
+ return apic_id;
+#endif
return -1;
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-#endif
static bool __init processor_physically_present(acpi_handle handle)
{
@@ -217,7 +223,7 @@ static bool __init processor_physically_present(acpi_handle handle)
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
cpuid = acpi_get_cpuid(handle, type, acpi_id);
- if ((cpuid == -1) && (num_possible_cpus() > 1))
+ if (cpuid == -1)
return false;
return true;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d615b7d69bca..431ab11c8c1b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -161,7 +161,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
return;
- if (c1e_detected)
+ if (amd_e400_c1e_detected)
type = ACPI_STATE_C1;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 61891e75583d..77255f250dbb 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -220,6 +220,14 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
#endif /* CONFIG_ACPI_DEBUG */
+
+/* /sys/modules/acpi/parameters/aml_debug_output */
+
+module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
+ bool, 0644);
+MODULE_PARM_DESC(aml_debug_output,
+ "To enable/disable the ACPI Debug Object output.");
+
/* /sys/module/acpi/parameters/acpica_version */
static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
{
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index db8f88586c8d..98de8f418676 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1038,6 +1038,7 @@ static void floppy_disable_hlt(void)
{
unsigned long flags;
+ WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
spin_lock_irqsave(&floppy_hlt_lock, flags);
if (!hlt_disabled) {
hlt_disabled = 1;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index a0aabd904a51..46b8136c31bb 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -321,7 +321,6 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ecf89cdf006..079c08808d8a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -6,10 +6,13 @@
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
+#include <scsi/scsi_cmnd.h>
#define PART_BITS 4
static int major, index;
+struct workqueue_struct *virtblk_wq;
struct virtio_blk
{
@@ -26,6 +29,9 @@ struct virtio_blk
mempool_t *pool;
+ /* Process context for config space updates */
+ struct work_struct config_work;
+
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@@ -141,7 +147,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
- sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
+ sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
sizeof(vbr->in_hdr));
}
@@ -291,6 +297,46 @@ static ssize_t virtblk_serial_show(struct device *dev,
}
DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
+static void virtblk_config_changed_work(struct work_struct *work)
+{
+ struct virtio_blk *vblk =
+ container_of(work, struct virtio_blk, config_work);
+ struct virtio_device *vdev = vblk->vdev;
+ struct request_queue *q = vblk->disk->queue;
+ char cap_str_2[10], cap_str_10[10];
+ u64 capacity, size;
+
+ /* Host must always specify the capacity. */
+ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
+ &capacity, sizeof(capacity));
+
+ /* If capacity is too big, truncate with warning. */
+ if ((sector_t)capacity != capacity) {
+ dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
+ (unsigned long long)capacity);
+ capacity = (sector_t)-1;
+ }
+
+ size = capacity * queue_logical_block_size(q);
+ string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+ string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
+
+ dev_notice(&vdev->dev,
+ "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ (unsigned long long)capacity,
+ queue_logical_block_size(q),
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
+}
+
+static void virtblk_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+
+ queue_work(virtblk_wq, &vblk->config_work);
+}
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -327,6 +373,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
/* We expect one virtqueue, for output. */
vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
@@ -477,6 +524,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ flush_work(&vblk->config_work);
+
/* Nothing should be pending. */
BUG_ON(!list_empty(&vblk->reqs));
@@ -508,27 +557,47 @@ static unsigned int features[] = {
* Use __refdata to avoid this warning.
*/
static struct virtio_driver __refdata virtio_blk = {
- .feature_table = features,
- .feature_table_size = ARRAY_SIZE(features),
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = virtblk_probe,
- .remove = __devexit_p(virtblk_remove),
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtblk_probe,
+ .remove = __devexit_p(virtblk_remove),
+ .config_changed = virtblk_config_changed,
};
static int __init init(void)
{
+ int error;
+
+ virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ if (!virtblk_wq)
+ return -ENOMEM;
+
major = register_blkdev(0, "virtblk");
- if (major < 0)
- return major;
- return register_virtio_driver(&virtio_blk);
+ if (major < 0) {
+ error = major;
+ goto out_destroy_workqueue;
+ }
+
+ error = register_virtio_driver(&virtio_blk);
+ if (error)
+ goto out_unregister_blkdev;
+ return 0;
+
+out_unregister_blkdev:
+ unregister_blkdev(major, "virtblk");
+out_destroy_workqueue:
+ destroy_workqueue(virtblk_wq);
+ return error;
}
static void __exit fini(void)
{
unregister_blkdev(major, "virtblk");
unregister_virtio_driver(&virtio_blk);
+ destroy_workqueue(virtblk_wq);
}
module_init(init);
module_exit(fini);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index b3f01996318f..48ad2a7ab080 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -355,29 +355,24 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* flags pointer to flags for data
* count count of received data in bytes
*
- * Return Value: Number of bytes received
+ * Return Value: None
*/
-static unsigned int hci_uart_tty_receive(struct tty_struct *tty,
- const u8 *data, char *flags, int count)
+static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
{
struct hci_uart *hu = (void *)tty->disc_data;
- int received;
if (!hu || tty != hu->tty)
- return -ENODEV;
+ return;
if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
- return -EINVAL;
+ return;
spin_lock(&hu->rx_lock);
- received = hu->proto->recv(hu, (void *) data, count);
- if (received > 0)
- hu->hdev->stat.byte_rx += received;
+ hu->proto->recv(hu, (void *) data, count);
+ hu->hdev->stat.byte_rx += count;
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
-
- return received;
}
static int hci_uart_register_dev(struct hci_uart *hu)
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index ae15a4ddaa9b..7878da89d29e 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -627,7 +627,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
gendisk->fops = &viocd_fops;
gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- gendisk->events = DISK_EVENT_MEDIA_CHANGE;
set_capacity(gendisk, 0);
gendisk->private_data = d;
d->viocd_disk = gendisk;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 838568a7dbf5..fb68b1295373 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1677,17 +1677,12 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
portdev->config.max_nr_ports = 1;
if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
multiport = true;
- vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
-
vdev->config->get(vdev, offsetof(struct virtio_console_config,
max_nr_ports),
&portdev->config.max_nr_ports,
sizeof(portdev->config.max_nr_ports));
}
- /* Let the Host know we support multiple ports.*/
- vdev->config->finalize_features(vdev);
-
err = init_vqs(portdev);
if (err < 0) {
dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 036e5865eb40..dc7c033ef587 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -24,7 +24,6 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@@ -153,12 +152,10 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
int ret;
- /* wake up device and enable clock */
- pm_runtime_get_sync(&p->pdev->dev);
+ /* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@@ -190,9 +187,8 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
/* disable interrupts in CMT block */
sh_cmt_write(p, CMCSR, 0);
- /* stop clock and mark device as idle */
+ /* stop clock */
clk_disable(p->clk);
- pm_runtime_put_sync(&p->pdev->dev);
}
/* private flags */
@@ -664,7 +660,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
- pm_runtime_enable(&pdev->dev);
return 0;
}
@@ -679,9 +674,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
-
- if (!is_early_platform_device(pdev))
- pm_runtime_enable(&pdev->dev);
return ret;
}
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 17296288a205..808135768617 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
{
int ret;
- /* wake up device and enable clock */
- pm_runtime_get_sync(&p->pdev->dev);
+ /* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
/* disable interrupts in TMU block */
sh_tmu_write(p, TCR, 0x0000);
- /* stop clock and mark device as idle */
+ /* stop clock */
clk_disable(p->clk);
- pm_runtime_put_sync(&p->pdev->dev);
}
static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
- pm_runtime_enable(&pdev->dev);
return 0;
}
@@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
-
- if (!is_early_platform_device(pdev))
- pm_runtime_enable(&pdev->dev);
return ret;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f508690eb958..c47f3d09c1ee 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev)
unsigned int power_usage = -1;
int i;
int multiplier;
+ struct timespec t;
if (data->needs_update) {
menu_update(dev);
@@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev)
return 0;
/* determine the expected residency time, round up */
+ t = ktime_to_timespec(tick_nohz_get_sleep_length());
data->expected_us =
- DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
+ t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
data->bucket = which_bucket(data->expected_us);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a572600e44eb..25cf327cd1cb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -200,16 +200,18 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
+ tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
- Output Hub) which is for IVI(In-Vehicle Infotainment) use.
- ML7213 is companion chip for Intel Atom E6xx series.
- ML7213 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7213 and ML7223.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
+ for MP(Media Phone) use.
+ ML7213/ML7223 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
new file mode 100644
index 000000000000..a4af8589330c
--- /dev/null
+++ b/drivers/dma/TODO
@@ -0,0 +1,14 @@
+TODO for slave dma
+
+1. Move remaining drivers to use new slave interface
+2. Remove old slave pointer machansim
+3. Make issue_pending to start the transaction in below drivers
+ - mpc512x_dma
+ - imx-dma
+ - imx-sdma
+ - mxs-dma.c
+ - dw_dmac
+ - intel_mid_dma
+ - ste_dma40
+4. Check other subsystems for dma drivers and merge/move to dmaengine
+5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 235f53bf494e..36144f88d718 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -37,8 +37,8 @@
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
#define ATC_DEFAULT_CTRLA (0)
-#define ATC_DEFAULT_CTRLB (ATC_SIF(0) \
- |ATC_DIF(1))
+#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
+ |ATC_DIF(AT_DMA_MEM_IF))
/*
* Initial number of descriptors to allocate for each channel. This could
@@ -165,6 +165,29 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
}
/**
+ * atc_desc_chain - build chain adding a descripor
+ * @first: address of first descripor of the chain
+ * @prev: address of previous descripor of the chain
+ * @desc: descriptor to queue
+ *
+ * Called from prep_* functions
+ */
+static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
+ struct at_desc *desc)
+{
+ if (!(*first)) {
+ *first = desc;
+ } else {
+ /* inform the HW lli about chaining */
+ (*prev)->lli.dscr = desc->txd.phys;
+ /* insert the link descriptor to the LD ring */
+ list_add_tail(&desc->desc_node,
+ &(*first)->tx_list);
+ }
+ *prev = desc;
+}
+
+/**
* atc_assign_cookie - compute and assign new cookie
* @atchan: channel we work on
* @desc: descriptor to assign cookie for
@@ -237,16 +260,12 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{
- dma_async_tx_callback callback;
- void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
atchan->completed_cookie = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -278,12 +297,19 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
- callback(param);
+ /* for cyclic transfers,
+ * no need to replay callback function while stopping */
+ if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+ }
dma_run_dependencies(txd);
}
@@ -419,6 +445,26 @@ static void atc_handle_error(struct at_dma_chan *atchan)
atc_chain_complete(atchan, bad_desc);
}
+/**
+ * atc_handle_cyclic - at the end of a period, run callback function
+ * @atchan: channel used for cyclic operations
+ *
+ * Called with atchan->lock held and bh disabled
+ */
+static void atc_handle_cyclic(struct at_dma_chan *atchan)
+{
+ struct at_desc *first = atc_first_active(atchan);
+ struct dma_async_tx_descriptor *txd = &first->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ dev_vdbg(chan2dev(&atchan->chan_common),
+ "new cyclic period llp 0x%08x\n",
+ channel_readl(atchan, DSCR));
+
+ if (callback)
+ callback(param);
+}
/*-- IRQ & Tasklet ---------------------------------------------------*/
@@ -426,16 +472,11 @@ static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
- /* Channel cannot be enabled here */
- if (atc_chan_is_enabled(atchan)) {
- dev_err(chan2dev(&atchan->chan_common),
- "BUG: channel enabled in tasklet\n");
- return;
- }
-
spin_lock(&atchan->lock);
- if (test_and_clear_bit(0, &atchan->error_status))
+ if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
+ else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
@@ -464,12 +505,13 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
for (i = 0; i < atdma->dma_common.chancnt; i++) {
atchan = &atdma->chan[i];
- if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
+ if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
if (pending & AT_DMA_ERR(i)) {
/* Disable channel on AHB error */
- dma_writel(atdma, CHDR, atchan->mask);
+ dma_writel(atdma, CHDR,
+ AT_DMA_RES(i) | atchan->mask);
/* Give information to tasklet */
- set_bit(0, &atchan->error_status);
+ set_bit(ATC_IS_ERROR, &atchan->status);
}
tasklet_schedule(&atchan->tasklet);
ret = IRQ_HANDLED;
@@ -549,7 +591,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
ctrla = ATC_DEFAULT_CTRLA;
- ctrlb = ATC_DEFAULT_CTRLB
+ ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
| ATC_FC_MEM2MEM;
@@ -584,16 +626,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
desc->txd.cookie = 0;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
}
/* First descriptor of the chain embedds additional information */
@@ -639,7 +672,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct scatterlist *sg;
size_t total_len = 0;
- dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
+ dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
+ sg_len,
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
flags);
@@ -651,14 +685,15 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg_width = atslave->reg_width;
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
- ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
+ ctrlb = ATC_IEN;
switch (direction) {
case DMA_TO_DEVICE:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
- | ATC_FC_MEM2PER;
+ | ATC_FC_MEM2PER
+ | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
reg = atslave->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc;
@@ -682,16 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| len >> mem_width;
desc->lli.ctrlb = ctrlb;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
total_len += len;
}
break;
@@ -699,7 +725,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
- | ATC_FC_PER2MEM;
+ | ATC_FC_PER2MEM
+ | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
reg = atslave->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
@@ -724,16 +751,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
| len >> reg_width;
desc->lli.ctrlb = ctrlb;
- if (!first) {
- first = desc;
- } else {
- /* inform the HW lli about chaining */
- prev->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &first->tx_list);
- }
- prev = desc;
+ atc_desc_chain(&first, &prev, desc);
total_len += len;
}
break;
@@ -759,41 +777,211 @@ err_desc_get:
return NULL;
}
+/**
+ * atc_dma_cyclic_check_values
+ * Check for too big/unaligned periods and unaligned DMA buffer
+ */
+static int
+atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
+ size_t period_len, enum dma_data_direction direction)
+{
+ if (period_len > (ATC_BTSIZE_MAX << reg_width))
+ goto err_out;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto err_out;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto err_out;
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ return -EINVAL;
+}
+
+/**
+ * atc_dma_cyclic_fill_desc - Fill one period decriptor
+ */
+static int
+atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
+ unsigned int period_index, dma_addr_t buf_addr,
+ size_t period_len, enum dma_data_direction direction)
+{
+ u32 ctrla;
+ unsigned int reg_width = atslave->reg_width;
+
+ /* prepare common CRTLA value */
+ ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
+ | ATC_DST_WIDTH(reg_width)
+ | ATC_SRC_WIDTH(reg_width)
+ | period_len >> reg_width;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->lli.saddr = buf_addr + (period_len * period_index);
+ desc->lli.daddr = atslave->tx_reg;
+ desc->lli.ctrla = ctrla;
+ desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
+ | ATC_SRC_ADDR_MODE_INCR
+ | ATC_FC_MEM2PER
+ | ATC_SIF(AT_DMA_MEM_IF)
+ | ATC_DIF(AT_DMA_PER_IF);
+ break;
+
+ case DMA_FROM_DEVICE:
+ desc->lli.saddr = atslave->rx_reg;
+ desc->lli.daddr = buf_addr + (period_len * period_index);
+ desc->lli.ctrla = ctrla;
+ desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
+ | ATC_SRC_ADDR_MODE_FIXED
+ | ATC_FC_PER2MEM
+ | ATC_SIF(AT_DMA_PER_IF)
+ | ATC_DIF(AT_DMA_MEM_IF);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma_slave *atslave = chan->private;
+ struct at_desc *first = NULL;
+ struct at_desc *prev = NULL;
+ unsigned long was_cyclic;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
+ direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ buf_addr,
+ periods, buf_len, period_len);
+
+ if (unlikely(!atslave || !buf_len || !period_len)) {
+ dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
+ return NULL;
+ }
+
+ was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
+ return NULL;
+ }
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer */
+ if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
+ period_len, direction))
+ goto err_out;
+
+ /* build cyclic linked list */
+ for (i = 0; i < periods; i++) {
+ struct at_desc *desc;
+
+ desc = atc_desc_get(atchan);
+ if (!desc)
+ goto err_desc_get;
+
+ if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
+ period_len, direction))
+ goto err_desc_get;
+
+ atc_desc_chain(&first, &prev, desc);
+ }
+
+ /* lets make a cyclic list */
+ prev->lli.dscr = first->txd.phys;
+
+ /* First descriptor of the chain embedds additional information */
+ first->txd.cookie = -EBUSY;
+ first->len = buf_len;
+
+ return &first->txd;
+
+err_desc_get:
+ dev_err(chan2dev(chan), "not enough descriptors available\n");
+ atc_desc_put(atchan, first);
+err_out:
+ clear_bit(ATC_IS_CYCLIC, &atchan->status);
+ return NULL;
+}
+
+
static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
- struct at_desc *desc, *_desc;
+ int chan_id = atchan->chan_common.chan_id;
+
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_bh(&atchan->lock);
+ if (cmd == DMA_PAUSE) {
+ spin_lock_bh(&atchan->lock);
- dma_writel(atdma, CHDR, atchan->mask);
+ dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
+ set_bit(ATC_IS_PAUSED, &atchan->status);
- /* confirm that this channel is disabled */
- while (dma_readl(atdma, CHSR) & atchan->mask)
- cpu_relax();
+ spin_unlock_bh(&atchan->lock);
+ } else if (cmd == DMA_RESUME) {
+ if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+ return 0;
- /* active_list entries will end up before queued entries */
- list_splice_init(&atchan->queue, &list);
- list_splice_init(&atchan->active_list, &list);
+ spin_lock_bh(&atchan->lock);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
+ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
+ clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_bh(&atchan->lock);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ struct at_desc *desc, *_desc;
+ /*
+ * This is only called when something went wrong elsewhere, so
+ * we don't really care about the data. Just disable the
+ * channel. We still have to poll the channel enable bit due
+ * to AHB/HSB limitations.
+ */
+ spin_lock_bh(&atchan->lock);
+
+ /* disabling channel: must also remove suspend state */
+ dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
+
+ /* confirm that this channel is disabled */
+ while (dma_readl(atdma, CHSR) & atchan->mask)
+ cpu_relax();
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&atchan->queue, &list);
+ list_splice_init(&atchan->active_list, &list);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ atc_chain_complete(atchan, desc);
+
+ clear_bit(ATC_IS_PAUSED, &atchan->status);
+ /* if channel dedicated to cyclic operations, free it */
+ clear_bit(ATC_IS_CYCLIC, &atchan->status);
+
+ spin_unlock_bh(&atchan->lock);
+ } else {
+ return -ENXIO;
+ }
return 0;
}
@@ -835,9 +1023,17 @@ atc_tx_status(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock);
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
- cookie, last_complete ? last_complete : 0,
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ atc_first_active(atchan)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ if (test_bit(ATC_IS_PAUSED, &atchan->status))
+ ret = DMA_PAUSED;
+
+ dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
+ ret, cookie, last_complete ? last_complete : 0,
last_used ? last_used : 0);
return ret;
@@ -853,6 +1049,10 @@ static void atc_issue_pending(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "issue_pending\n");
+ /* Not needed for cyclic transfers */
+ if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ return;
+
spin_lock_bh(&atchan->lock);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
@@ -959,6 +1159,7 @@ static void atc_free_chan_resources(struct dma_chan *chan)
}
list_splice_init(&atchan->free_list, &list);
atchan->descs_allocated = 0;
+ atchan->status = 0;
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
@@ -1092,10 +1293,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
+
+ if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
+ atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
+
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
+ dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control;
- }
dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 495457e3dc4b..087dbf1dd39c 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -103,6 +103,10 @@
/* Bitfields in CTRLB */
#define ATC_SIF(i) (0x3 & (i)) /* Src tx done via AHB-Lite Interface i */
#define ATC_DIF(i) ((0x3 & (i)) << 4) /* Dst tx done via AHB-Lite Interface i */
+ /* Specify AHB interfaces */
+#define AT_DMA_MEM_IF 0 /* interface 0 as memory interface */
+#define AT_DMA_PER_IF 1 /* interface 1 as peripheral interface */
+
#define ATC_SRC_PIP (0x1 << 8) /* Source Picture-in-Picture enabled */
#define ATC_DST_PIP (0x1 << 12) /* Destination Picture-in-Picture enabled */
#define ATC_SRC_DSCR_DIS (0x1 << 16) /* Src Descriptor fetch disable */
@@ -181,12 +185,23 @@ txd_to_at_desc(struct dma_async_tx_descriptor *txd)
/*-- Channels --------------------------------------------------------*/
/**
+ * atc_status - information bits stored in channel status flag
+ *
+ * Manipulated with atomic operations.
+ */
+enum atc_status {
+ ATC_IS_ERROR = 0,
+ ATC_IS_PAUSED = 1,
+ ATC_IS_CYCLIC = 24,
+};
+
+/**
* struct at_dma_chan - internal representation of an Atmel HDMAC channel
* @chan_common: common dmaengine channel object members
* @device: parent device
* @ch_regs: memory mapped register base
* @mask: channel index in a mask
- * @error_status: transmit error status information from irq handler
+ * @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
* @lock: serializes enqueue/dequeue operations to descriptors lists
@@ -201,7 +216,7 @@ struct at_dma_chan {
struct at_dma *device;
void __iomem *ch_regs;
u8 mask;
- unsigned long error_status;
+ unsigned long status;
struct tasklet_struct tasklet;
spinlock_t lock;
@@ -309,8 +324,8 @@ static void atc_setup_irq(struct at_dma_chan *atchan, int on)
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
u32 ebci;
- /* enable interrupts on buffer chain completion & error */
- ebci = AT_DMA_CBTC(atchan->chan_common.chan_id)
+ /* enable interrupts on buffer transfer completion & error */
+ ebci = AT_DMA_BTC(atchan->chan_common.chan_id)
| AT_DMA_ERR(atchan->chan_common.chan_id);
if (on)
dma_writel(atdma, EBCIER, ebci);
@@ -347,7 +362,12 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
*/
static void set_desc_eol(struct at_desc *desc)
{
- desc->lli.ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
+ u32 ctrlb = desc->lli.ctrlb;
+
+ ctrlb &= ~ATC_IEN;
+ ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
+
+ desc->lli.ctrlb = ctrlb;
desc->lli.dscr = 0;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index f48e54006518..af8c0b5ed70f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1610,7 +1610,7 @@ int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
-arch_initcall(coh901318_init);
+subsys_initcall(coh901318_init);
void __exit coh901318_exit(void)
{
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2a2e2fa00e91..4d180ca9a1d8 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -3,6 +3,7 @@
* AVR32 systems.)
*
* Copyright (C) 2007-2008 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
struct dw_desc *desc, *_desc;
struct dw_desc *ret = NULL;
unsigned int i = 0;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
if (async_tx_test_ack(&desc->txd)) {
list_del(&desc->desc_node);
@@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
i++;
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
@@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
*/
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
+ unsigned long flags;
+
if (desc) {
struct dw_desc *child;
dwc_sync_desc_for_cpu(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&dwc->chan),
"moving child desc %p to freelist\n",
@@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
list_splice_init(&desc->tx_list, &dwc->free_list);
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &dwc->free_list);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/*----------------------------------------------------------------------*/
static void
-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
+ bool callback_required)
{
- dma_async_tx_callback callback;
- void *param;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
+ unsigned long flags;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
+ spin_lock_irqsave(&dwc->lock, flags);
dwc->completed = txd->cookie;
- callback = txd->callback;
- param = txd->callback_param;
+ if (callback_required) {
+ callback = txd->callback;
+ param = txd->callback_param;
+ }
dwc_sync_desc_for_cpu(dwc, desc);
@@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
}
}
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ if (callback_required && callback)
callback(param);
}
@@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
struct dw_desc *desc, *_desc;
LIST_HEAD(list);
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
"BUG: XFER bit set, but channel not idle!\n");
@@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_dostart(dwc, dwc_first_active(dwc));
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ dwc_descriptor_complete(dwc, desc, true);
}
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
struct dw_desc *desc, *_desc;
struct dw_desc *child;
u32 status_xfer;
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
/*
* Clear block interrupt flag before scanning so that we don't
* miss any, and read LLP before RAW_XFER to ensure it is
@@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
if (status_xfer & dwc->mask) {
/* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
dwc_complete_all(dw, dwc);
return;
}
- if (list_empty(&dwc->active_list))
+ if (list_empty(&dwc->active_list)) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
- if (desc->lli.llp == llp)
+ /* check first descriptors addr */
+ if (desc->txd.phys == llp) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return;
+ }
+
+ /* check first descriptors llp */
+ if (desc->lli.llp == llp) {
/* This one is currently in progress */
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
list_for_each_entry(child, &desc->tx_list, desc_node)
- if (child->lli.llp == llp)
+ if (child->lli.llp == llp) {
/* Currently in progress */
+ spin_unlock_irqrestore(&dwc->lock, flags);
return;
+ }
/*
* No descriptors so far seem to be in progress, i.e.
* this one must be done.
*/
- dwc_descriptor_complete(dwc, desc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc_descriptor_complete(dwc, desc, true);
+ spin_lock_irqsave(&dwc->lock, flags);
}
dev_err(chan2dev(&dwc->chan),
@@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_move(dwc->queue.next, &dwc->active_list);
dwc_dostart(dwc, dwc_first_active(dwc));
}
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
@@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
struct dw_desc *bad_desc;
struct dw_desc *child;
+ unsigned long flags;
dwc_scan_descriptors(dw, dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+
/*
* The descriptor currently at the head of the active list is
* borked. Since we don't have any way to report errors, we'll
@@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, &child->lli);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
/* Pretend the descriptor completed successfully */
- dwc_descriptor_complete(dwc, bad_desc);
+ dwc_descriptor_complete(dwc, bad_desc, true);
}
/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
u32 status_block, u32 status_err, u32 status_xfer)
{
+ unsigned long flags;
+
if (status_block & dwc->mask) {
void (*callback)(void *param);
void *callback_param;
@@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param;
- if (callback) {
- spin_unlock(&dwc->lock);
+
+ if (callback)
callback(callback_param);
- spin_lock(&dwc->lock);
- }
}
/*
@@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
"interrupt, stopping DMA transfer\n",
status_xfer ? "xfer" : "error");
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
dev_err(chan2dev(&dwc->chan),
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
channel_readl(dwc, SAR),
@@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
for (i = 0; i < dwc->cdesc->periods; i++)
dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
- spin_lock(&dwc->lock);
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
dwc_handle_cyclic(dw, dwc, status_block, status_err,
status_xfer);
@@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data)
dwc_handle_error(dw, dwc);
else if ((status_block | status_xfer) & (1 << i))
dwc_scan_descriptors(dw, dwc);
- spin_unlock(&dwc->lock);
}
/*
@@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
struct dw_desc *desc = txd_to_dw_desc(tx);
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
cookie = dwc_assign_cookie(dwc, desc);
/*
@@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &dwc->queue);
}
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return cookie;
}
@@ -689,9 +729,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
+ u32 len, dlen, mem;
+
+ mem = sg_phys(sg);
+ len = sg_dma_len(sg);
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc) {
dev_err(chan2dev(chan),
@@ -699,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto err_desc_get;
}
- mem = sg_phys(sg);
- len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
- mem_width = 0;
-
desc->lli.sar = mem;
desc->lli.dar = reg;
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
- desc->lli.ctlhi = len >> mem_width;
+ if ((len >> mem_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << mem_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+
+ desc->lli.ctlhi = dlen >> mem_width;
if (!first) {
first = desc;
@@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_todev_fill_desc;
}
break;
case DMA_FROM_DEVICE:
@@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
- u32 len;
- u32 mem;
-
- desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
- goto err_desc_get;
- }
+ u32 len, dlen, mem;
mem = sg_phys(sg);
len = sg_dma_len(sg);
@@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (unlikely(mem & 3 || len & 3))
mem_width = 0;
+slave_sg_fromdev_fill_desc:
+ desc = dwc_desc_get(dwc);
+ if (!desc) {
+ dev_err(chan2dev(chan),
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
desc->lli.sar = reg;
desc->lli.dar = mem;
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
- desc->lli.ctlhi = len >> reg_width;
+ if ((len >> reg_width) > DWC_MAX_COUNT) {
+ dlen = DWC_MAX_COUNT << reg_width;
+ mem += dlen;
+ len -= dlen;
+ } else {
+ dlen = len;
+ len = 0;
+ }
+ desc->lli.ctlhi = dlen >> reg_width;
if (!first) {
first = desc;
@@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
&first->tx_list);
}
prev = desc;
- total_len += len;
+ total_len += dlen;
+
+ if (len)
+ goto slave_sg_fromdev_fill_desc;
}
break;
default:
@@ -799,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ unsigned long flags;
+ u32 cfglo;
LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ if (cmd == DMA_PAUSE) {
+ spin_lock_irqsave(&dwc->lock, flags);
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_bh(&dwc->lock);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
+ cpu_relax();
- channel_clear_bit(dw, CH_EN, dwc->mask);
+ dwc->paused = true;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ } else if (cmd == DMA_RESUME) {
+ if (!dwc->paused)
+ return 0;
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ spin_lock_irqsave(&dwc->lock, flags);
- /* active_list entries will end up before queued entries */
- list_splice_init(&dwc->queue, &list);
- list_splice_init(&dwc->active_list, &list);
+ cfglo = channel_readl(dwc, CFG_LO);
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
+ dwc->paused = false;
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ } else if (cmd == DMA_TERMINATE_ALL) {
+ spin_lock_irqsave(&dwc->lock, flags);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- dwc_descriptor_complete(dwc, desc);
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dwc->paused = false;
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&dwc->queue, &list);
+ list_splice_init(&dwc->active_list, &list);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ dwc_descriptor_complete(dwc, desc, false);
+ } else
+ return -ENXIO;
return 0;
}
@@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
- spin_lock_bh(&dwc->lock);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
last_complete = dwc->completed;
last_used = chan->cookie;
@@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- dma_set_tx_state(txstate, last_complete, last_used, 0);
+ if (ret != DMA_SUCCESS)
+ dma_set_tx_state(txstate, last_complete, last_used,
+ dwc_first_active(dwc)->len);
+ else
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+
+ if (dwc->paused)
+ return DMA_PAUSED;
return ret;
}
@@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- spin_lock_bh(&dwc->lock);
if (!list_empty(&dwc->queue))
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
- spin_unlock_bh(&dwc->lock);
}
static int dwc_alloc_chan_resources(struct dma_chan *chan)
@@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
int i;
u32 cfghi;
u32 cfglo;
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
i = dwc->descs_allocated;
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
if (!desc) {
dev_info(chan2dev(chan),
"only allocated %d descriptors\n", i);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
break;
}
@@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
i = ++dwc->descs_allocated;
}
@@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
"alloc_chan_resources allocated %d descriptors\n", i);
@@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
+ unsigned long flags;
LIST_HEAD(list);
dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
@@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&dwc->queue));
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
@@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
@@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
return -ENODEV;
}
- spin_lock(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
/* assert channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
channel_readl(dwc, LLP),
channel_readl(dwc, CTL_HI),
channel_readl(dwc, CTL_LO));
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return -EBUSY;
}
@@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
channel_set_bit(dw, CH_EN, dwc->mask);
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
@@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ unsigned long flags;
- spin_lock(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
cpu_relax();
- spin_unlock(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
EXPORT_SYMBOL(dw_dma_cyclic_stop);
@@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
unsigned int reg_width;
unsigned int periods;
unsigned int i;
+ unsigned long flags;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(&dwc->chan),
"queue and/or active list are not empty\n");
return ERR_PTR(-EBUSY);
}
was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
if (was_cyclic) {
dev_dbg(chan2dev(&dwc->chan),
"channel already prepared for cyclic DMA\n");
@@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_cyclic_desc *cdesc = dwc->cdesc;
int i;
+ unsigned long flags;
dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
if (!cdesc)
return;
- spin_lock_bh(&dwc->lock);
+ spin_lock_irqsave(&dwc->lock, flags);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
@@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
- spin_unlock_bh(&dwc->lock);
+ spin_unlock_irqrestore(&dwc->lock, flags);
for (i = 0; i < cdesc->periods; i++)
dwc_desc_put(dwc, cdesc->desc[i]);
@@ -1487,3 +1576,4 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 720f821527f8..c3419518d701 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -2,6 +2,7 @@
* Driver for the Synopsys DesignWare AHB DMA Controller
*
* Copyright (C) 2005-2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -138,6 +139,7 @@ struct dw_dma_chan {
void __iomem *ch_regs;
u8 mask;
u8 priority;
+ bool paused;
spinlock_t lock;
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3d4ec38b9b62..f653517ef744 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1292,8 +1292,7 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
if (err)
goto err_dma;
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
@@ -1322,6 +1321,9 @@ err_enable_device:
static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
{
struct middma_device *device = pci_get_drvdata(pdev);
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
middma_shutdown(pdev);
pci_dev_put(pdev);
kfree(device);
@@ -1385,13 +1387,20 @@ int dma_resume(struct pci_dev *pci)
static int dma_runtime_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return dma_suspend(pci_dev, PMSG_SUSPEND);
+ struct middma_device *device = pci_get_drvdata(pci_dev);
+
+ device->state = SUSPENDED;
+ return 0;
}
static int dma_runtime_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- return dma_resume(pci_dev);
+ struct middma_device *device = pci_get_drvdata(pci_dev);
+
+ device->state = RUNNING;
+ iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
+ return 0;
}
static int dma_runtime_idle(struct device *dev)
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index f4a51d4d0349..5d65f8377971 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -508,6 +508,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat_ring_ent **ring;
u64 status;
int order;
+ int i = 0;
/* have we already been set up? */
if (ioat->ring)
@@ -548,8 +549,11 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
- udelay(5);
- status = ioat_chansts(chan);
+ do {
+ udelay(1);
+ status = ioat_chansts(chan);
+ } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
if (is_ioat_active(status) || is_ioat_idle(status)) {
set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c6b01f535b29..e03f811a83dd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -619,7 +619,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
__func__, len);
@@ -652,7 +652,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
__func__, len);
@@ -686,7 +686,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u flags: %lx\n",
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a25f5f61e0e0..954e334e01bb 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -671,7 +671,7 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memcpy_slot_count(len);
@@ -710,7 +710,7 @@ mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memset_slot_count(len);
@@ -744,7 +744,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
- BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan->device->common.dev,
"%s src_cnt: %d len: dest %x %u flags: %ld\n",
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 8d8fef1480a9..ff5b38f9d45b 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -77,10 +77,10 @@ struct pch_dma_regs {
u32 dma_ctl0;
u32 dma_ctl1;
u32 dma_ctl2;
- u32 reserved1;
+ u32 dma_ctl3;
u32 dma_sts0;
u32 dma_sts1;
- u32 reserved2;
+ u32 dma_sts2;
u32 reserved3;
struct pch_dma_desc_regs desc[MAX_CHAN_NR];
};
@@ -130,6 +130,7 @@ struct pch_dma {
#define PCH_DMA_CTL0 0x00
#define PCH_DMA_CTL1 0x04
#define PCH_DMA_CTL2 0x08
+#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
@@ -138,7 +139,8 @@ struct pch_dma {
#define dma_writel(pd, name, val) \
writel((val), (pd)->membase + PCH_DMA_##name)
-static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
+static inline
+struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
{
return container_of(txd, struct pch_dma_desc, txd);
}
@@ -163,13 +165,15 @@ static inline struct device *chan2parent(struct dma_chan *chan)
return chan->dev->device.parent;
}
-static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
+static inline
+struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->active_list,
struct pch_dma_desc, desc_node);
}
-static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
+static inline
+struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->queue,
struct pch_dma_desc, desc_node);
@@ -199,16 +203,30 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma *pd = to_pd(chan->device);
u32 val;
- val = dma_readl(pd, CTL0);
+ if (chan->chan_id < 8) {
+ val = dma_readl(pd, CTL0);
- if (pd_chan->dir == DMA_TO_DEVICE)
- val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
- DMA_CTL0_DIR_SHIFT_BITS);
- else
- val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
- DMA_CTL0_DIR_SHIFT_BITS));
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL0, val);
+ } else {
+ int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
+ val = dma_readl(pd, CTL3);
- dma_writel(pd, CTL0, val);
+ if (pd_chan->dir == DMA_TO_DEVICE)
+ val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
+ DMA_CTL0_DIR_SHIFT_BITS);
+ else
+ val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
+ DMA_CTL0_DIR_SHIFT_BITS));
+
+ dma_writel(pd, CTL3, val);
+ }
dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
chan->chan_id, val);
@@ -219,13 +237,26 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
struct pch_dma *pd = to_pd(chan->device);
u32 val;
- val = dma_readl(pd, CTL0);
+ if (chan->chan_id < 8) {
+ val = dma_readl(pd, CTL0);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * chan->chan_id));
- val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+ dma_writel(pd, CTL0, val);
+ } else {
+ int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
+
+ val = dma_readl(pd, CTL3);
+
+ val &= ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
- dma_writel(pd, CTL0, val);
+ dma_writel(pd, CTL3, val);
+
+ }
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
@@ -251,9 +282,6 @@ static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
{
- struct pch_dma *pd = to_pd(pd_chan->chan.device);
- u32 val;
-
if (!pdc_is_idle(pd_chan)) {
dev_err(chan2dev(&pd_chan->chan),
"BUG: Attempt to start non-idle channel\n");
@@ -279,10 +307,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
channel_writel(pd_chan, NEXT, desc->txd.phys);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
}
-
- val = dma_readl(pd, CTL2);
- val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
- dma_writel(pd, CTL2, val);
}
static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
@@ -403,7 +427,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
{
struct pch_dma_desc *desc, *_d;
struct pch_dma_desc *ret = NULL;
- int i;
+ int i = 0;
spin_lock(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
@@ -478,7 +502,6 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
spin_unlock_bh(&pd_chan->lock);
pdc_enable_irq(chan, 1);
- pdc_set_dir(chan);
return pd_chan->descs_allocated;
}
@@ -561,6 +584,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
else
return NULL;
+ pd_chan->dir = direction;
+ pdc_set_dir(chan);
+
for_each_sg(sgl, sg, sg_len, i) {
desc = pdc_desc_get(pd_chan);
@@ -703,6 +729,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
+ pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
pd_chan = to_pd_chan(chan);
@@ -725,6 +752,7 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
dma_writel(pd, CTL0, pd->regs.dma_ctl0);
dma_writel(pd, CTL1, pd->regs.dma_ctl1);
dma_writel(pd, CTL2, pd->regs.dma_ctl2);
+ dma_writel(pd, CTL3, pd->regs.dma_ctl3);
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
pd_chan = to_pd_chan(chan);
@@ -850,8 +878,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan->membase = &regs->desc[i];
- pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
spin_lock_init(&pd_chan->lock);
INIT_LIST_HEAD(&pd_chan->active_list);
@@ -929,13 +955,23 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
+#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
+#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
+#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
+#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
+#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
-static const struct pci_device_id pch_dma_id_table[] = {
+DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
{ 0, },
};
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 3b0247e74cc4..fc457a7e8832 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -2313,7 +2313,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
spin_lock_bh(&ppc440spe_chan->lock);
@@ -2354,7 +2354,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
spin_lock_bh(&ppc440spe_chan->lock);
@@ -2397,7 +2397,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
dma_dest, dma_src, src_cnt));
if (unlikely(!len))
return NULL;
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(ppc440spe_chan->device->common.dev,
"ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
@@ -2887,7 +2887,7 @@ static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
dst, src, src_cnt));
BUG_ON(!len);
- BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
BUG_ON(!src_cnt);
if (src_cnt == 1 && dst[1] == src[0]) {
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 636e40925b16..2a638f9f09a2 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
- } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
+ } else {
dmae_init(sh_chan);
}
@@ -1144,6 +1144,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* platform data */
shdev->pdata = pdata;
+ platform_set_drvdata(pdev, shdev);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -1256,7 +1258,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- platform_set_drvdata(pdev, shdev);
dma_async_device_register(&shdev->common);
return err;
@@ -1278,6 +1279,8 @@ rst_err:
if (dmars)
iounmap(shdev->dmars);
+
+ platform_set_drvdata(pdev, NULL);
emapdmars:
iounmap(shdev->chan_reg);
synchronize_rcu();
@@ -1316,6 +1319,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
iounmap(shdev->dmars);
iounmap(shdev->chan_reg);
+ platform_set_drvdata(pdev, NULL);
+
synchronize_rcu();
kfree(shdev);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 94ee15dd3aed..8f222d4db7de 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1829,7 +1829,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
if (chan->runtime_addr)
return chan->runtime_addr;
@@ -2962,4 +2962,4 @@ static int __init stedma40_init(void)
{
return platform_driver_probe(&d40_driver, d40_probe);
}
-arch_initcall(stedma40_init);
+subsys_initcall(stedma40_init);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d21364603755..4a7f63143455 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -86,6 +86,34 @@ config GPIO_IT8761E
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
+config GPIO_EXYNOS4
+ bool "Samsung Exynos4 GPIO library support"
+ default y if CPU_EXYNOS4210
+ depends on ARM
+ help
+ Say yes here to support Samsung Exynos4 series SoCs GPIO library
+
+config GPIO_PLAT_SAMSUNG
+ bool "Samsung SoCs GPIO library support"
+ default y if SAMSUNG_GPIOLIB_4BIT
+ depends on ARM
+ help
+ Say yes here to support Samsung SoCs GPIO library
+
+config GPIO_S5PC100
+ bool "Samsung S5PC100 GPIO library support"
+ default y if CPU_S5PC100
+ depends on ARM
+ help
+ Say yes here to support Samsung S5PC100 SoCs GPIO library
+
+config GPIO_S5PV210
+ bool "Samsung S5PV210/S5PC110 GPIO library support"
+ default y if CPU_S5PV210
+ depends on ARM
+ help
+ Say yes here to support Samsung S5PV210/S5PC110 SoCs GPIO library
+
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
@@ -303,7 +331,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && X86 && !CS5535_GPIO
+ depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -334,13 +362,19 @@ config GPIO_LANGWELL
Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_PCH
- tristate "PCH GPIO of Intel Topcliff"
+ tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
depends on PCI && X86
help
This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff
which is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH GPIO device.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7223.
+ ML7223 IOH is for MP(Media Phone) use.
+ ML7223 is companion chip for Intel Atom E6xx series.
+ ML7223 is completely compatible for Intel EG20T PCH.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on PCI
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 6a3387acc0e5..b605f8ec6fbe 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o
obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
+obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
+obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
+obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
+obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX730X) += max730x.o
obj-$(CONFIG_GPIO_MAX7300) += max7300.o
@@ -16,6 +20,7 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
obj-$(CONFIG_GPIO_MC33880) += mc33880.o
obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_74X164) += 74x164.o
+obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
@@ -34,6 +39,8 @@ obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+obj-$(CONFIG_MACH_U300) += gpio-u300.o
+obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
obj-$(CONFIG_GPIO_SX150X) += sx150x.o
diff --git a/drivers/gpio/gpio-exynos4.c b/drivers/gpio/gpio-exynos4.c
new file mode 100644
index 000000000000..d54ca6adb660
--- /dev/null
+++ b/drivers/gpio/gpio-exynos4.c
@@ -0,0 +1,365 @@
+/* linux/arch/arm/mach-exynos4/gpiolib.c
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/map.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+static struct s3c_gpio_cfg gpio_cfg = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_noint = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+/*
+ * Following are the gpio banks in v310.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+static struct s3c_gpio_chip exynos4_gpio_part1_4bit[] = {
+ {
+ .chip = {
+ .base = EXYNOS4_GPA0(0),
+ .ngpio = EXYNOS4_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPA1(0),
+ .ngpio = EXYNOS4_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPB(0),
+ .ngpio = EXYNOS4_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPC0(0),
+ .ngpio = EXYNOS4_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPC1(0),
+ .ngpio = EXYNOS4_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPD0(0),
+ .ngpio = EXYNOS4_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPD1(0),
+ .ngpio = EXYNOS4_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE0(0),
+ .ngpio = EXYNOS4_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE1(0),
+ .ngpio = EXYNOS4_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE2(0),
+ .ngpio = EXYNOS4_GPIO_E2_NR,
+ .label = "GPE2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE3(0),
+ .ngpio = EXYNOS4_GPIO_E3_NR,
+ .label = "GPE3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPE4(0),
+ .ngpio = EXYNOS4_GPIO_E4_NR,
+ .label = "GPE4",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF0(0),
+ .ngpio = EXYNOS4_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF1(0),
+ .ngpio = EXYNOS4_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF2(0),
+ .ngpio = EXYNOS4_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPF3(0),
+ .ngpio = EXYNOS4_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ },
+};
+
+static struct s3c_gpio_chip exynos4_gpio_part2_4bit[] = {
+ {
+ .chip = {
+ .base = EXYNOS4_GPJ0(0),
+ .ngpio = EXYNOS4_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPJ1(0),
+ .ngpio = EXYNOS4_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK0(0),
+ .ngpio = EXYNOS4_GPIO_K0_NR,
+ .label = "GPK0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK1(0),
+ .ngpio = EXYNOS4_GPIO_K1_NR,
+ .label = "GPK1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK2(0),
+ .ngpio = EXYNOS4_GPIO_K2_NR,
+ .label = "GPK2",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPK3(0),
+ .ngpio = EXYNOS4_GPIO_K3_NR,
+ .label = "GPK3",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL0(0),
+ .ngpio = EXYNOS4_GPIO_L0_NR,
+ .label = "GPL0",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL1(0),
+ .ngpio = EXYNOS4_GPIO_L1_NR,
+ .label = "GPL1",
+ },
+ }, {
+ .chip = {
+ .base = EXYNOS4_GPL2(0),
+ .ngpio = EXYNOS4_GPIO_L2_NR,
+ .label = "GPL2",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY0(0),
+ .ngpio = EXYNOS4_GPIO_Y0_NR,
+ .label = "GPY0",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY1(0),
+ .ngpio = EXYNOS4_GPIO_Y1_NR,
+ .label = "GPY1",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY2(0),
+ .ngpio = EXYNOS4_GPIO_Y2_NR,
+ .label = "GPY2",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY3(0),
+ .ngpio = EXYNOS4_GPIO_Y3_NR,
+ .label = "GPY3",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY4(0),
+ .ngpio = EXYNOS4_GPIO_Y4_NR,
+ .label = "GPY4",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY5(0),
+ .ngpio = EXYNOS4_GPIO_Y5_NR,
+ .label = "GPY5",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = EXYNOS4_GPY6(0),
+ .ngpio = EXYNOS4_GPIO_Y6_NR,
+ .label = "GPY6",
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC00),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = EXYNOS4_GPX0(0),
+ .ngpio = EXYNOS4_GPIO_X0_NR,
+ .label = "GPX0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC20),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = EXYNOS4_GPX1(0),
+ .ngpio = EXYNOS4_GPIO_X1_NR,
+ .label = "GPX1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC40),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = EXYNOS4_GPX2(0),
+ .ngpio = EXYNOS4_GPIO_X2_NR,
+ .label = "GPX2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO2 + 0xC60),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = EXYNOS4_GPX3(0),
+ .ngpio = EXYNOS4_GPIO_X3_NR,
+ .label = "GPX3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+};
+
+static struct s3c_gpio_chip exynos4_gpio_part3_4bit[] = {
+ {
+ .chip = {
+ .base = EXYNOS4_GPZ(0),
+ .ngpio = EXYNOS4_GPIO_Z_NR,
+ .label = "GPZ",
+ },
+ },
+};
+
+static __init int exynos4_gpiolib_init(void)
+{
+ struct s3c_gpio_chip *chip;
+ int i;
+ int group = 0;
+ int nr_chips;
+
+ /* GPIO part 1 */
+
+ chip = exynos4_gpio_part1_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part1_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ /* Assign the GPIO interrupt group */
+ chip->group = group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO1 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part1_4bit, nr_chips);
+
+ /* GPIO part 2 */
+
+ chip = exynos4_gpio_part2_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part2_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ /* Assign the GPIO interrupt group */
+ chip->group = group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO2 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part2_4bit, nr_chips);
+
+ /* GPIO part 3 */
+
+ chip = exynos4_gpio_part3_4bit;
+ nr_chips = ARRAY_SIZE(exynos4_gpio_part3_4bit);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ /* Assign the GPIO interrupt group */
+ chip->group = group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5P_VA_GPIO3 + (i) * 0x20;
+ }
+
+ samsung_gpiolib_add_4bit_chips(exynos4_gpio_part3_4bit, nr_chips);
+ s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
+ s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
+
+ return 0;
+}
+core_initcall(exynos4_gpiolib_init);
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
new file mode 100644
index 000000000000..4961ef9bc153
--- /dev/null
+++ b/drivers/gpio/gpio-nomadik.c
@@ -0,0 +1,1069 @@
+/*
+ * Generic GPIO driver for logic cells found in the Nomadik SoC
+ *
+ * Copyright (C) 2008,2009 STMicroelectronics
+ * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
+ * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+
+#include <asm/mach/irq.h>
+
+#include <plat/pincfg.h>
+#include <mach/hardware.h>
+#include <mach/gpio.h>
+
+/*
+ * The GPIO module in the Nomadik family of Systems-on-Chip is an
+ * AMBA device, managing 32 pins and alternate functions. The logic block
+ * is currently used in the Nomadik and ux500.
+ *
+ * Symbols in this file are called "nmk_gpio" for "nomadik gpio"
+ */
+
+#define NMK_GPIO_PER_CHIP 32
+
+struct nmk_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *addr;
+ struct clk *clk;
+ unsigned int bank;
+ unsigned int parent_irq;
+ int secondary_parent_irq;
+ u32 (*get_secondary_status)(unsigned int bank);
+ void (*set_ioforce)(bool enable);
+ spinlock_t lock;
+ /* Keep track of configured edges */
+ u32 edge_rising;
+ u32 edge_falling;
+ u32 real_wake;
+ u32 rwimsc;
+ u32 fwimsc;
+ u32 slpm;
+ u32 enabled;
+ u32 pull_up;
+};
+
+static struct nmk_gpio_chip *
+nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];
+
+static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
+
+#define NUM_BANKS ARRAY_SIZE(nmk_gpio_chips)
+
+static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int gpio_mode)
+{
+ u32 bit = 1 << offset;
+ u32 afunc, bfunc;
+
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit;
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit;
+ if (gpio_mode & NMK_GPIO_ALT_A)
+ afunc |= bit;
+ if (gpio_mode & NMK_GPIO_ALT_B)
+ bfunc |= bit;
+ writel(afunc, nmk_chip->addr + NMK_GPIO_AFSLA);
+ writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB);
+}
+
+static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, enum nmk_gpio_slpm mode)
+{
+ u32 bit = 1 << offset;
+ u32 slpm;
+
+ slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
+ if (mode == NMK_GPIO_SLPM_NOCHANGE)
+ slpm |= bit;
+ else
+ slpm &= ~bit;
+ writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
+}
+
+static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, enum nmk_gpio_pull pull)
+{
+ u32 bit = 1 << offset;
+ u32 pdis;
+
+ pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS);
+ if (pull == NMK_GPIO_PULL_NONE) {
+ pdis |= bit;
+ nmk_chip->pull_up &= ~bit;
+ } else {
+ pdis &= ~bit;
+ }
+
+ writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS);
+
+ if (pull == NMK_GPIO_PULL_UP) {
+ nmk_chip->pull_up |= bit;
+ writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
+ } else if (pull == NMK_GPIO_PULL_DOWN) {
+ nmk_chip->pull_up &= ~bit;
+ writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
+ }
+}
+
+static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset)
+{
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+}
+
+static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int val)
+{
+ if (val)
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS);
+ else
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC);
+}
+
+static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int val)
+{
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+}
+
+static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip,
+ unsigned offset, int gpio_mode,
+ bool glitch)
+{
+ u32 rwimsc = readl(nmk_chip->addr + NMK_GPIO_RWIMSC);
+ u32 fwimsc = readl(nmk_chip->addr + NMK_GPIO_FWIMSC);
+
+ if (glitch && nmk_chip->set_ioforce) {
+ u32 bit = BIT(offset);
+
+ /* Prevent spurious wakeups */
+ writel(rwimsc & ~bit, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ writel(fwimsc & ~bit, nmk_chip->addr + NMK_GPIO_FWIMSC);
+
+ nmk_chip->set_ioforce(true);
+ }
+
+ __nmk_gpio_set_mode(nmk_chip, offset, gpio_mode);
+
+ if (glitch && nmk_chip->set_ioforce) {
+ nmk_chip->set_ioforce(false);
+
+ writel(rwimsc, nmk_chip->addr + NMK_GPIO_RWIMSC);
+ writel(fwimsc, nmk_chip->addr + NMK_GPIO_FWIMSC);
+ }
+}
+
+static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
+ pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
+{
+ static const char *afnames[] = {
+ [NMK_GPIO_ALT_GPIO] = "GPIO",
+ [NMK_GPIO_ALT_A] = "A",
+ [NMK_GPIO_ALT_B] = "B",
+ [NMK_GPIO_ALT_C] = "C"
+ };
+ static const char *pullnames[] = {
+ [NMK_GPIO_PULL_NONE] = "none",
+ [NMK_GPIO_PULL_UP] = "up",
+ [NMK_GPIO_PULL_DOWN] = "down",
+ [3] /* illegal */ = "??"
+ };
+ static const char *slpmnames[] = {
+ [NMK_GPIO_SLPM_INPUT] = "input/wakeup",
+ [NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup",
+ };
+
+ int pin = PIN_NUM(cfg);
+ int pull = PIN_PULL(cfg);
+ int af = PIN_ALT(cfg);
+ int slpm = PIN_SLPM(cfg);
+ int output = PIN_DIR(cfg);
+ int val = PIN_VAL(cfg);
+ bool glitch = af == NMK_GPIO_ALT_C;
+
+ dev_dbg(nmk_chip->chip.dev, "pin %d [%#lx]: af %s, pull %s, slpm %s (%s%s)\n",
+ pin, cfg, afnames[af], pullnames[pull], slpmnames[slpm],
+ output ? "output " : "input",
+ output ? (val ? "high" : "low") : "");
+
+ if (sleep) {
+ int slpm_pull = PIN_SLPM_PULL(cfg);
+ int slpm_output = PIN_SLPM_DIR(cfg);
+ int slpm_val = PIN_SLPM_VAL(cfg);
+
+ af = NMK_GPIO_ALT_GPIO;
+
+ /*
+ * The SLPM_* values are normal values + 1 to allow zero to
+ * mean "same as normal".
+ */
+ if (slpm_pull)
+ pull = slpm_pull - 1;
+ if (slpm_output)
+ output = slpm_output - 1;
+ if (slpm_val)
+ val = slpm_val - 1;
+
+ dev_dbg(nmk_chip->chip.dev, "pin %d: sleep pull %s, dir %s, val %s\n",
+ pin,
+ slpm_pull ? pullnames[pull] : "same",
+ slpm_output ? (output ? "output" : "input") : "same",
+ slpm_val ? (val ? "high" : "low") : "same");
+ }
+
+ if (output)
+ __nmk_gpio_make_output(nmk_chip, offset, val);
+ else {
+ __nmk_gpio_make_input(nmk_chip, offset);
+ __nmk_gpio_set_pull(nmk_chip, offset, pull);
+ }
+
+ /*
+ * If we've backed up the SLPM registers (glitch workaround), modify
+ * the backups since they will be restored.
+ */
+ if (slpmregs) {
+ if (slpm == NMK_GPIO_SLPM_NOCHANGE)
+ slpmregs[nmk_chip->bank] |= BIT(offset);
+ else
+ slpmregs[nmk_chip->bank] &= ~BIT(offset);
+ } else
+ __nmk_gpio_set_slpm(nmk_chip, offset, slpm);
+
+ __nmk_gpio_set_mode_safe(nmk_chip, offset, af, glitch);
+}
+
+/*
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers
+ * - Set SLPM=0 for the IOs you want to switch and others to 1
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers
+ * - Any spurious wake up event during switch sequence to be ignored and
+ * cleared
+ */
+static void nmk_gpio_glitch_slpm_init(unsigned int *slpm)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+ unsigned int temp = slpm[i];
+
+ if (!chip)
+ break;
+
+ slpm[i] = readl(chip->addr + NMK_GPIO_SLPC);
+ writel(temp, chip->addr + NMK_GPIO_SLPC);
+ }
+}
+
+static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ break;
+
+ writel(slpm[i], chip->addr + NMK_GPIO_SLPC);
+ }
+}
+
+static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
+{
+ static unsigned int slpm[NUM_BANKS];
+ unsigned long flags;
+ bool glitch = false;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C) {
+ glitch = true;
+ break;
+ }
+ }
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+
+ if (glitch) {
+ memset(slpm, 0xff, sizeof(slpm));
+
+ for (i = 0; i < num; i++) {
+ int pin = PIN_NUM(cfgs[i]);
+ int offset = pin % NMK_GPIO_PER_CHIP;
+
+ if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C)
+ slpm[pin / NMK_GPIO_PER_CHIP] &= ~BIT(offset);
+ }
+
+ nmk_gpio_glitch_slpm_init(slpm);
+ }
+
+ for (i = 0; i < num; i++) {
+ struct nmk_gpio_chip *nmk_chip;
+ int pin = PIN_NUM(cfgs[i]);
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(pin));
+ if (!nmk_chip) {
+ ret = -EINVAL;
+ break;
+ }
+
+ spin_lock(&nmk_chip->lock);
+ __nmk_config_pin(nmk_chip, pin - nmk_chip->chip.base,
+ cfgs[i], sleep, glitch ? slpm : NULL);
+ spin_unlock(&nmk_chip->lock);
+ }
+
+ if (glitch)
+ nmk_gpio_glitch_slpm_restore(slpm);
+
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return ret;
+}
+
+/**
+ * nmk_config_pin - configure a pin's mux attributes
+ * @cfg: pin confguration
+ *
+ * Configures a pin's mode (alternate function or GPIO), its pull up status,
+ * and its sleep mode based on the specified configuration. The @cfg is
+ * usually one of the SoC specific macros defined in mach/<soc>-pins.h. These
+ * are constructed using, and can be further enhanced with, the macros in
+ * plat/pincfg.h.
+ *
+ * If a pin's mode is set to GPIO, it is configured as an input to avoid
+ * side-effects. The gpio can be manipulated later using standard GPIO API
+ * calls.
+ */
+int nmk_config_pin(pin_cfg_t cfg, bool sleep)
+{
+ return __nmk_config_pins(&cfg, 1, sleep);
+}
+EXPORT_SYMBOL(nmk_config_pin);
+
+/**
+ * nmk_config_pins - configure several pins at once
+ * @cfgs: array of pin configurations
+ * @num: number of elments in the array
+ *
+ * Configures several pins using nmk_config_pin(). Refer to that function for
+ * further information.
+ */
+int nmk_config_pins(pin_cfg_t *cfgs, int num)
+{
+ return __nmk_config_pins(cfgs, num, false);
+}
+EXPORT_SYMBOL(nmk_config_pins);
+
+int nmk_config_pins_sleep(pin_cfg_t *cfgs, int num)
+{
+ return __nmk_config_pins(cfgs, num, true);
+}
+EXPORT_SYMBOL(nmk_config_pins_sleep);
+
+/**
+ * nmk_gpio_set_slpm() - configure the sleep mode of a pin
+ * @gpio: pin number
+ * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE,
+ *
+ * Sets the sleep mode of a pin. If @mode is NMK_GPIO_SLPM_INPUT, the pin is
+ * changed to an input (with pullup/down enabled) in sleep and deep sleep. If
+ * @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was
+ * configured even when in sleep and deep sleep.
+ *
+ * On DB8500v2 onwards, this setting loses the previous meaning and instead
+ * indicates if wakeup detection is enabled on the pin. Note that
+ * enable_irq_wake() will automatically enable wakeup detection.
+ */
+int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base, mode);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return 0;
+}
+
+/**
+ * nmk_gpio_set_pull() - enable/disable pull up/down on a gpio
+ * @gpio: pin number
+ * @pull: one of NMK_GPIO_PULL_DOWN, NMK_GPIO_PULL_UP, and NMK_GPIO_PULL_NONE
+ *
+ * Enables/disables pull up/down on a specified pin. This only takes effect if
+ * the pin is configured as an input (either explicitly or by the alternate
+ * function).
+ *
+ * NOTE: If enabling the pull up/down, the caller must ensure that the GPIO is
+ * configured as an input. Otherwise, due to the way the controller registers
+ * work, this function will change the value output on the pin.
+ */
+int nmk_gpio_set_pull(int gpio, enum nmk_gpio_pull pull)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+ __nmk_gpio_set_pull(nmk_chip, gpio - nmk_chip->chip.base, pull);
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ return 0;
+}
+
+/* Mode functions */
+/**
+ * nmk_gpio_set_mode() - set the mux mode of a gpio pin
+ * @gpio: pin number
+ * @gpio_mode: one of NMK_GPIO_ALT_GPIO, NMK_GPIO_ALT_A,
+ * NMK_GPIO_ALT_B, and NMK_GPIO_ALT_C
+ *
+ * Sets the mode of the specified pin to one of the alternate functions or
+ * plain GPIO.
+ */
+int nmk_gpio_set_mode(int gpio, int gpio_mode)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+ __nmk_gpio_set_mode(nmk_chip, gpio - nmk_chip->chip.base, gpio_mode);
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(nmk_gpio_set_mode);
+
+int nmk_gpio_get_mode(int gpio)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ u32 afunc, bfunc, bit;
+
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ if (!nmk_chip)
+ return -EINVAL;
+
+ bit = 1 << (gpio - nmk_chip->chip.base);
+
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit;
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit;
+
+ return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
+}
+EXPORT_SYMBOL(nmk_gpio_get_mode);
+
+
+/* IRQ functions */
+static inline int nmk_gpio_get_bitmask(int gpio)
+{
+ return 1 << (gpio % 32);
+}
+
+static void nmk_gpio_irq_ack(struct irq_data *d)
+{
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+ nmk_chip = irq_data_get_irq_chip_data(d);
+ if (!nmk_chip)
+ return;
+ writel(nmk_gpio_get_bitmask(gpio), nmk_chip->addr + NMK_GPIO_IC);
+}
+
+enum nmk_gpio_irq_type {
+ NORMAL,
+ WAKE,
+};
+
+static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
+ int gpio, enum nmk_gpio_irq_type which,
+ bool enable)
+{
+ u32 rimsc = which == WAKE ? NMK_GPIO_RWIMSC : NMK_GPIO_RIMSC;
+ u32 fimsc = which == WAKE ? NMK_GPIO_FWIMSC : NMK_GPIO_FIMSC;
+ u32 bitmask = nmk_gpio_get_bitmask(gpio);
+ u32 reg;
+
+ /* we must individually set/clear the two edges */
+ if (nmk_chip->edge_rising & bitmask) {
+ reg = readl(nmk_chip->addr + rimsc);
+ if (enable)
+ reg |= bitmask;
+ else
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + rimsc);
+ }
+ if (nmk_chip->edge_falling & bitmask) {
+ reg = readl(nmk_chip->addr + fimsc);
+ if (enable)
+ reg |= bitmask;
+ else
+ reg &= ~bitmask;
+ writel(reg, nmk_chip->addr + fimsc);
+ }
+}
+
+static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
+ int gpio, bool on)
+{
+ __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
+}
+
+static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
+{
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+ nmk_chip = irq_data_get_irq_chip_data(d);
+ bitmask = nmk_gpio_get_bitmask(gpio);
+ if (!nmk_chip)
+ return -EINVAL;
+
+ if (enable)
+ nmk_chip->enabled |= bitmask;
+ else
+ nmk_chip->enabled &= ~bitmask;
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, enable);
+
+ if (!(nmk_chip->real_wake & bitmask))
+ __nmk_gpio_set_wake(nmk_chip, gpio, enable);
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return 0;
+}
+
+static void nmk_gpio_irq_mask(struct irq_data *d)
+{
+ nmk_gpio_irq_maskunmask(d, false);
+}
+
+static void nmk_gpio_irq_unmask(struct irq_data *d)
+{
+ nmk_gpio_irq_maskunmask(d, true);
+}
+
+static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask;
+ int gpio;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+ nmk_chip = irq_data_get_irq_chip_data(d);
+ if (!nmk_chip)
+ return -EINVAL;
+ bitmask = nmk_gpio_get_bitmask(gpio);
+
+ spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
+ spin_lock(&nmk_chip->lock);
+
+ if (!(nmk_chip->enabled & bitmask))
+ __nmk_gpio_set_wake(nmk_chip, gpio, on);
+
+ if (on)
+ nmk_chip->real_wake |= bitmask;
+ else
+ nmk_chip->real_wake &= ~bitmask;
+
+ spin_unlock(&nmk_chip->lock);
+ spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
+
+ return 0;
+}
+
+static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ bool enabled, wake = irqd_is_wakeup_set(d);
+ int gpio;
+ struct nmk_gpio_chip *nmk_chip;
+ unsigned long flags;
+ u32 bitmask;
+
+ gpio = NOMADIK_IRQ_TO_GPIO(d->irq);
+ nmk_chip = irq_data_get_irq_chip_data(d);
+ bitmask = nmk_gpio_get_bitmask(gpio);
+ if (!nmk_chip)
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ return -EINVAL;
+
+ enabled = nmk_chip->enabled & bitmask;
+
+ spin_lock_irqsave(&nmk_chip->lock, flags);
+
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, false);
+
+ if (enabled || wake)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, false);
+
+ nmk_chip->edge_rising &= ~bitmask;
+ if (type & IRQ_TYPE_EDGE_RISING)
+ nmk_chip->edge_rising |= bitmask;
+
+ nmk_chip->edge_falling &= ~bitmask;
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ nmk_chip->edge_falling |= bitmask;
+
+ if (enabled)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, NORMAL, true);
+
+ if (enabled || wake)
+ __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, true);
+
+ spin_unlock_irqrestore(&nmk_chip->lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip nmk_gpio_irq_chip = {
+ .name = "Nomadik-GPIO",
+ .irq_ack = nmk_gpio_irq_ack,
+ .irq_mask = nmk_gpio_irq_mask,
+ .irq_unmask = nmk_gpio_irq_unmask,
+ .irq_set_type = nmk_gpio_irq_set_type,
+ .irq_set_wake = nmk_gpio_irq_set_wake,
+};
+
+static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
+ u32 status)
+{
+ struct nmk_gpio_chip *nmk_chip;
+ struct irq_chip *host_chip = irq_get_chip(irq);
+ unsigned int first_irq;
+
+ chained_irq_enter(host_chip, desc);
+
+ nmk_chip = irq_get_handler_data(irq);
+ first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
+ while (status) {
+ int bit = __ffs(status);
+
+ generic_handle_irq(first_irq + bit);
+ status &= ~BIT(bit);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
+ u32 status = readl(nmk_chip->addr + NMK_GPIO_IS);
+
+ __nmk_gpio_irq_handler(irq, desc, status);
+}
+
+static void nmk_gpio_secondary_irq_handler(unsigned int irq,
+ struct irq_desc *desc)
+{
+ struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
+ u32 status = nmk_chip->get_secondary_status(nmk_chip->bank);
+
+ __nmk_gpio_irq_handler(irq, desc, status);
+}
+
+static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip)
+{
+ unsigned int first_irq;
+ int i;
+
+ first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
+ for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) {
+ irq_set_chip_and_handler(i, &nmk_gpio_irq_chip,
+ handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ irq_set_chip_data(i, nmk_chip);
+ irq_set_irq_type(i, IRQ_TYPE_EDGE_FALLING);
+ }
+
+ irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
+ irq_set_handler_data(nmk_chip->parent_irq, nmk_chip);
+
+ if (nmk_chip->secondary_parent_irq >= 0) {
+ irq_set_chained_handler(nmk_chip->secondary_parent_irq,
+ nmk_gpio_secondary_irq_handler);
+ irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip);
+ }
+
+ return 0;
+}
+
+/* I/O Functions */
+static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+ return 0;
+}
+
+static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+ u32 bit = 1 << offset;
+
+ return (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0;
+}
+
+static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ __nmk_gpio_set_output(nmk_chip, offset, val);
+}
+
+static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ __nmk_gpio_make_output(nmk_chip, offset, val);
+
+ return 0;
+}
+
+static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+
+ return NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base) + offset;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ int mode;
+ unsigned i;
+ unsigned gpio = chip->base;
+ int is_out;
+ struct nmk_gpio_chip *nmk_chip =
+ container_of(chip, struct nmk_gpio_chip, chip);
+ const char *modes[] = {
+ [NMK_GPIO_ALT_GPIO] = "gpio",
+ [NMK_GPIO_ALT_A] = "altA",
+ [NMK_GPIO_ALT_B] = "altB",
+ [NMK_GPIO_ALT_C] = "altC",
+ };
+
+ for (i = 0; i < chip->ngpio; i++, gpio++) {
+ const char *label = gpiochip_is_requested(chip, i);
+ bool pull;
+ u32 bit = 1 << i;
+
+ is_out = readl(nmk_chip->addr + NMK_GPIO_DIR) & bit;
+ pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
+ mode = nmk_gpio_get_mode(gpio);
+ seq_printf(s, " gpio-%-3d (%-20.20s) %s %s %s %s",
+ gpio, label ?: "(none)",
+ is_out ? "out" : "in ",
+ chip->get
+ ? (chip->get(chip, i) ? "hi" : "lo")
+ : "? ",
+ (mode < 0) ? "unknown" : modes[mode],
+ pull ? "pull" : "none");
+
+ if (label && !is_out) {
+ int irq = gpio_to_irq(gpio);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ /* This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ */
+ if (irq >= 0 && desc->action) {
+ char *trigger;
+ u32 bitmask = nmk_gpio_get_bitmask(gpio);
+
+ if (nmk_chip->edge_rising & bitmask)
+ trigger = "edge-rising";
+ else if (nmk_chip->edge_falling & bitmask)
+ trigger = "edge-falling";
+ else
+ trigger = "edge-undefined";
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger,
+ irqd_is_wakeup_set(&desc->irq_data)
+ ? " wakeup" : "");
+ }
+ }
+
+ seq_printf(s, "\n");
+ }
+}
+
+#else
+#define nmk_gpio_dbg_show NULL
+#endif
+
+/* This structure is replicated for each GPIO block allocated at probe time */
+static struct gpio_chip nmk_gpio_template = {
+ .direction_input = nmk_gpio_make_input,
+ .get = nmk_gpio_get_input,
+ .direction_output = nmk_gpio_make_output,
+ .set = nmk_gpio_set_output,
+ .to_irq = nmk_gpio_to_irq,
+ .dbg_show = nmk_gpio_dbg_show,
+ .can_sleep = 0,
+};
+
+/*
+ * Called from the suspend/resume path to only keep the real wakeup interrupts
+ * (those that have had set_irq_wake() called on them) as wakeup interrupts,
+ * and not the rest of the interrupts which we needed to have as wakeups for
+ * cpuidle.
+ *
+ * PM ops are not used since this needs to be done at the end, after all the
+ * other drivers are done with their suspend callbacks.
+ */
+void nmk_gpio_wakeups_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ break;
+
+ chip->rwimsc = readl(chip->addr + NMK_GPIO_RWIMSC);
+ chip->fwimsc = readl(chip->addr + NMK_GPIO_FWIMSC);
+
+ writel(chip->rwimsc & chip->real_wake,
+ chip->addr + NMK_GPIO_RWIMSC);
+ writel(chip->fwimsc & chip->real_wake,
+ chip->addr + NMK_GPIO_FWIMSC);
+
+ if (cpu_is_u8500v2()) {
+ chip->slpm = readl(chip->addr + NMK_GPIO_SLPC);
+
+ /* 0 -> wakeup enable */
+ writel(~chip->real_wake, chip->addr + NMK_GPIO_SLPC);
+ }
+ }
+}
+
+void nmk_gpio_wakeups_resume(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BANKS; i++) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
+
+ if (!chip)
+ break;
+
+ writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
+ writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
+
+ if (cpu_is_u8500v2())
+ writel(chip->slpm, chip->addr + NMK_GPIO_SLPC);
+ }
+}
+
+/*
+ * Read the pull up/pull down status.
+ * A bit set in 'pull_up' means that pull up
+ * is selected if pull is enabled in PDIS register.
+ * Note: only pull up/down set via this driver can
+ * be detected due to HW limitations.
+ */
+void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up)
+{
+ if (gpio_bank < NUM_BANKS) {
+ struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank];
+
+ if (!chip)
+ return;
+
+ *pull_up = chip->pull_up;
+ }
+}
+
+static int __devinit nmk_gpio_probe(struct platform_device *dev)
+{
+ struct nmk_gpio_platform_data *pdata = dev->dev.platform_data;
+ struct nmk_gpio_chip *nmk_chip;
+ struct gpio_chip *chip;
+ struct resource *res;
+ struct clk *clk;
+ int secondary_irq;
+ int irq;
+ int ret;
+
+ if (!pdata)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+
+ secondary_irq = platform_get_irq(dev, 1);
+ if (secondary_irq >= 0 && !pdata->get_secondary_status) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (request_mem_region(res->start, resource_size(res),
+ dev_name(&dev->dev)) == NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ clk = clk_get(&dev->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out_release;
+ }
+
+ clk_enable(clk);
+
+ nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
+ if (!nmk_chip) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
+ /*
+ * The virt address in nmk_chip->addr is in the nomadik register space,
+ * so we can simply convert the resource address, without remapping
+ */
+ nmk_chip->bank = dev->id;
+ nmk_chip->clk = clk;
+ nmk_chip->addr = io_p2v(res->start);
+ nmk_chip->chip = nmk_gpio_template;
+ nmk_chip->parent_irq = irq;
+ nmk_chip->secondary_parent_irq = secondary_irq;
+ nmk_chip->get_secondary_status = pdata->get_secondary_status;
+ nmk_chip->set_ioforce = pdata->set_ioforce;
+ spin_lock_init(&nmk_chip->lock);
+
+ chip = &nmk_chip->chip;
+ chip->base = pdata->first_gpio;
+ chip->ngpio = pdata->num_gpio;
+ chip->label = pdata->name ?: dev_name(&dev->dev);
+ chip->dev = &dev->dev;
+ chip->owner = THIS_MODULE;
+
+ ret = gpiochip_add(&nmk_chip->chip);
+ if (ret)
+ goto out_free;
+
+ BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
+
+ nmk_gpio_chips[nmk_chip->bank] = nmk_chip;
+ platform_set_drvdata(dev, nmk_chip);
+
+ nmk_gpio_init_irq(nmk_chip);
+
+ dev_info(&dev->dev, "Bits %i-%i at address %p\n",
+ nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
+ return 0;
+
+out_free:
+ kfree(nmk_chip);
+out_clk:
+ clk_disable(clk);
+ clk_put(clk);
+out_release:
+ release_mem_region(res->start, resource_size(res));
+out:
+ dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
+ pdata->first_gpio, pdata->first_gpio+31);
+ return ret;
+}
+
+static struct platform_driver nmk_gpio_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "gpio",
+ },
+ .probe = nmk_gpio_probe,
+};
+
+static int __init nmk_gpio_init(void)
+{
+ return platform_driver_register(&nmk_gpio_driver);
+}
+
+core_initcall(nmk_gpio_init);
+
+MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
+MODULE_DESCRIPTION("Nomadik GPIO Driver");
+MODULE_LICENSE("GPL");
+
+
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
new file mode 100644
index 000000000000..6c51191da567
--- /dev/null
+++ b/drivers/gpio/gpio-omap.c
@@ -0,0 +1,2007 @@
+/*
+ * Support functions for OMAP GPIO
+ *
+ * Copyright (C) 2003-2005 Nokia Corporation
+ * Written by Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/syscore_ops.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#include <mach/hardware.h>
+#include <asm/irq.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+#include <asm/mach/irq.h>
+
+struct gpio_bank {
+ unsigned long pbase;
+ void __iomem *base;
+ u16 irq;
+ u16 virtual_irq_start;
+ int method;
+#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
+ u32 suspend_wakeup;
+ u32 saved_wakeup;
+#endif
+ u32 non_wakeup_gpios;
+ u32 enabled_non_wakeup_gpios;
+
+ u32 saved_datain;
+ u32 saved_fallingdetect;
+ u32 saved_risingdetect;
+ u32 level_mask;
+ u32 toggle_mask;
+ spinlock_t lock;
+ struct gpio_chip chip;
+ struct clk *dbck;
+ u32 mod_usage;
+ u32 dbck_enable_mask;
+ struct device *dev;
+ bool dbck_flag;
+ int stride;
+};
+
+#ifdef CONFIG_ARCH_OMAP3
+struct omap3_gpio_regs {
+ u32 irqenable1;
+ u32 irqenable2;
+ u32 wake_en;
+ u32 ctrl;
+ u32 oe;
+ u32 leveldetect0;
+ u32 leveldetect1;
+ u32 risingdetect;
+ u32 fallingdetect;
+ u32 dataout;
+};
+
+static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
+#endif
+
+/*
+ * TODO: Cleanup gpio_bank usage as it is having information
+ * related to all instances of the device
+ */
+static struct gpio_bank *gpio_bank;
+
+static int bank_width;
+
+/* TODO: Analyze removing gpio_bank_count usage from driver code */
+int gpio_bank_count;
+
+static inline struct gpio_bank *get_gpio_bank(int gpio)
+{
+ if (cpu_is_omap15xx()) {
+ if (OMAP_GPIO_IS_MPUIO(gpio))
+ return &gpio_bank[0];
+ return &gpio_bank[1];
+ }
+ if (cpu_is_omap16xx()) {
+ if (OMAP_GPIO_IS_MPUIO(gpio))
+ return &gpio_bank[0];
+ return &gpio_bank[1 + (gpio >> 4)];
+ }
+ if (cpu_is_omap7xx()) {
+ if (OMAP_GPIO_IS_MPUIO(gpio))
+ return &gpio_bank[0];
+ return &gpio_bank[1 + (gpio >> 5)];
+ }
+ if (cpu_is_omap24xx())
+ return &gpio_bank[gpio >> 5];
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
+ return &gpio_bank[gpio >> 5];
+ BUG();
+ return NULL;
+}
+
+static inline int get_gpio_index(int gpio)
+{
+ if (cpu_is_omap7xx())
+ return gpio & 0x1f;
+ if (cpu_is_omap24xx())
+ return gpio & 0x1f;
+ if (cpu_is_omap34xx() || cpu_is_omap44xx())
+ return gpio & 0x1f;
+ return gpio & 0x0f;
+}
+
+static inline int gpio_valid(int gpio)
+{
+ if (gpio < 0)
+ return -1;
+ if (cpu_class_is_omap1() && OMAP_GPIO_IS_MPUIO(gpio)) {
+ if (gpio >= OMAP_MAX_GPIO_LINES + 16)
+ return -1;
+ return 0;
+ }
+ if (cpu_is_omap15xx() && gpio < 16)
+ return 0;
+ if ((cpu_is_omap16xx()) && gpio < 64)
+ return 0;
+ if (cpu_is_omap7xx() && gpio < 192)
+ return 0;
+ if (cpu_is_omap2420() && gpio < 128)
+ return 0;
+ if (cpu_is_omap2430() && gpio < 160)
+ return 0;
+ if ((cpu_is_omap34xx() || cpu_is_omap44xx()) && gpio < 192)
+ return 0;
+ return -1;
+}
+
+static int check_gpio(int gpio)
+{
+ if (unlikely(gpio_valid(gpio) < 0)) {
+ printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
+ dump_stack();
+ return -1;
+ }
+ return 0;
+}
+
+static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_IO_CNTL / bank->stride;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DIR_CONTROL;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DIRECTION;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DIR_CONTROL;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_OE;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_OE;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return;
+ }
+ l = __raw_readl(reg);
+ if (is_input)
+ l |= 1 << gpio;
+ else
+ l &= ~(1 << gpio);
+ __raw_writel(l, reg);
+}
+
+static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
+{
+ void __iomem *reg = bank->base;
+ u32 l = 0;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_OUTPUT / bank->stride;
+ l = __raw_readl(reg);
+ if (enable)
+ l |= 1 << gpio;
+ else
+ l &= ~(1 << gpio);
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DATA_OUTPUT;
+ l = __raw_readl(reg);
+ if (enable)
+ l |= 1 << gpio;
+ else
+ l &= ~(1 << gpio);
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ if (enable)
+ reg += OMAP1610_GPIO_SET_DATAOUT;
+ else
+ reg += OMAP1610_GPIO_CLEAR_DATAOUT;
+ l = 1 << gpio;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DATA_OUTPUT;
+ l = __raw_readl(reg);
+ if (enable)
+ l |= 1 << gpio;
+ else
+ l &= ~(1 << gpio);
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ if (enable)
+ reg += OMAP24XX_GPIO_SETDATAOUT;
+ else
+ reg += OMAP24XX_GPIO_CLEARDATAOUT;
+ l = 1 << gpio;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ if (enable)
+ reg += OMAP4_GPIO_SETDATAOUT;
+ else
+ reg += OMAP4_GPIO_CLEARDATAOUT;
+ l = 1 << gpio;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return;
+ }
+ __raw_writel(l, reg);
+}
+
+static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+ reg = bank->base;
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_INPUT_LATCH / bank->stride;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DATA_INPUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DATAIN;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DATA_INPUT;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_DATAIN;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_DATAIN;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+ return (__raw_readl(reg)
+ & (1 << get_gpio_index(gpio))) != 0;
+}
+
+static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+ reg = bank->base;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_OUTPUT / bank->stride;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DATA_OUTPUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DATAOUT;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DATA_OUTPUT;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_DATAOUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_DATAOUT;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0;
+}
+
+#define MOD_REG_BIT(reg, bit_mask, set) \
+do { \
+ int l = __raw_readl(base + reg); \
+ if (set) l |= bit_mask; \
+ else l &= ~bit_mask; \
+ __raw_writel(l, base + reg); \
+} while(0)
+
+/**
+ * _set_gpio_debounce - low level gpio debounce time
+ * @bank: the gpio bank we're acting upon
+ * @gpio: the gpio number on this @gpio
+ * @debounce: debounce time to use
+ *
+ * OMAP's debounce time is in 31us steps so we need
+ * to convert and round up to the closest unit.
+ */
+static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
+ unsigned debounce)
+{
+ void __iomem *reg = bank->base;
+ u32 val;
+ u32 l;
+
+ if (!bank->dbck_flag)
+ return;
+
+ if (debounce < 32)
+ debounce = 0x01;
+ else if (debounce > 7936)
+ debounce = 0xff;
+ else
+ debounce = (debounce / 0x1f) - 1;
+
+ l = 1 << get_gpio_index(gpio);
+
+ if (bank->method == METHOD_GPIO_44XX)
+ reg += OMAP4_GPIO_DEBOUNCINGTIME;
+ else
+ reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
+
+ __raw_writel(debounce, reg);
+
+ reg = bank->base;
+ if (bank->method == METHOD_GPIO_44XX)
+ reg += OMAP4_GPIO_DEBOUNCENABLE;
+ else
+ reg += OMAP24XX_GPIO_DEBOUNCE_EN;
+
+ val = __raw_readl(reg);
+
+ if (debounce) {
+ val |= l;
+ clk_enable(bank->dbck);
+ } else {
+ val &= ~l;
+ clk_disable(bank->dbck);
+ }
+ bank->dbck_enable_mask = val;
+
+ __raw_writel(val, reg);
+}
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+ int trigger)
+{
+ void __iomem *base = bank->base;
+ u32 gpio_bit = 1 << gpio;
+ u32 val;
+
+ if (cpu_is_omap44xx()) {
+ MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
+ } else {
+ MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
+ }
+ if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
+ if (cpu_is_omap44xx()) {
+ if (trigger != 0)
+ __raw_writel(1 << gpio, bank->base+
+ OMAP4_GPIO_IRQWAKEN0);
+ else {
+ val = __raw_readl(bank->base +
+ OMAP4_GPIO_IRQWAKEN0);
+ __raw_writel(val & (~(1 << gpio)), bank->base +
+ OMAP4_GPIO_IRQWAKEN0);
+ }
+ } else {
+ /*
+ * GPIO wakeup request can only be generated on edge
+ * transitions
+ */
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ __raw_writel(1 << gpio, bank->base
+ + OMAP24XX_GPIO_SETWKUENA);
+ else
+ __raw_writel(1 << gpio, bank->base
+ + OMAP24XX_GPIO_CLEARWKUENA);
+ }
+ }
+ /* This part needs to be executed always for OMAP34xx */
+ if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
+ /*
+ * Log the edge gpio and manually trigger the IRQ
+ * after resume if the input level changes
+ * to avoid irq lost during PER RET/OFF mode
+ * Applies for omap2 non-wakeup gpio and all omap3 gpios
+ */
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ bank->enabled_non_wakeup_gpios |= gpio_bit;
+ else
+ bank->enabled_non_wakeup_gpios &= ~gpio_bit;
+ }
+
+ if (cpu_is_omap44xx()) {
+ bank->level_mask =
+ __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
+ __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
+ } else {
+ bank->level_mask =
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
+ }
+}
+#endif
+
+#ifdef CONFIG_ARCH_OMAP1
+/*
+ * This only applies to chips that can't do both rising and falling edge
+ * detection at once. For all other chips, this function is a noop.
+ */
+static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg = bank->base;
+ u32 l = 0;
+
+ switch (bank->method) {
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
+ break;
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_CONTROL;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_CONTROL;
+ break;
+#endif
+ default:
+ return;
+ }
+
+ l = __raw_readl(reg);
+ if ((l >> gpio) & 1)
+ l &= ~(1 << gpio);
+ else
+ l |= 1 << gpio;
+
+ __raw_writel(l, reg);
+}
+#endif
+
+static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
+{
+ void __iomem *reg = bank->base;
+ u32 l = 0;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
+ l = __raw_readl(reg);
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
+ if (trigger & IRQ_TYPE_EDGE_RISING)
+ l |= 1 << gpio;
+ else if (trigger & IRQ_TYPE_EDGE_FALLING)
+ l &= ~(1 << gpio);
+ else
+ goto bad;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_CONTROL;
+ l = __raw_readl(reg);
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
+ if (trigger & IRQ_TYPE_EDGE_RISING)
+ l |= 1 << gpio;
+ else if (trigger & IRQ_TYPE_EDGE_FALLING)
+ l &= ~(1 << gpio);
+ else
+ goto bad;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ if (gpio & 0x08)
+ reg += OMAP1610_GPIO_EDGE_CTRL2;
+ else
+ reg += OMAP1610_GPIO_EDGE_CTRL1;
+ gpio &= 0x07;
+ l = __raw_readl(reg);
+ l &= ~(3 << (gpio << 1));
+ if (trigger & IRQ_TYPE_EDGE_RISING)
+ l |= 2 << (gpio << 1);
+ if (trigger & IRQ_TYPE_EDGE_FALLING)
+ l |= 1 << (gpio << 1);
+ if (trigger)
+ /* Enable wake-up during idle for dynamic tick */
+ __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_SET_WAKEUPENA);
+ else
+ __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA);
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_CONTROL;
+ l = __raw_readl(reg);
+ if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
+ if (trigger & IRQ_TYPE_EDGE_RISING)
+ l |= 1 << gpio;
+ else if (trigger & IRQ_TYPE_EDGE_FALLING)
+ l &= ~(1 << gpio);
+ else
+ goto bad;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ case METHOD_GPIO_24XX:
+ case METHOD_GPIO_44XX:
+ set_24xx_gpio_triggering(bank, gpio, trigger);
+ return 0;
+#endif
+ default:
+ goto bad;
+ }
+ __raw_writel(l, reg);
+ return 0;
+bad:
+ return -EINVAL;
+}
+
+static int gpio_irq_type(struct irq_data *d, unsigned type)
+{
+ struct gpio_bank *bank;
+ unsigned gpio;
+ int retval;
+ unsigned long flags;
+
+ if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
+ gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+ else
+ gpio = d->irq - IH_GPIO_BASE;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+
+ if (type & ~IRQ_TYPE_SENSE_MASK)
+ return -EINVAL;
+
+ /* OMAP1 allows only only edge triggering */
+ if (!cpu_class_is_omap2()
+ && (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
+ return -EINVAL;
+
+ bank = irq_data_get_irq_chip_data(d);
+ spin_lock_irqsave(&bank->lock, flags);
+ retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+
+ return retval;
+}
+
+static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+{
+ void __iomem *reg = bank->base;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ /* MPUIO irqstatus is reset by reading the status register,
+ * so do nothing here */
+ return;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_STATUS;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_IRQSTATUS1;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_STATUS;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_IRQSTATUS1;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_IRQSTATUS0;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return;
+ }
+ __raw_writel(gpio_mask, reg);
+
+ /* Workaround for clearing DSP GPIO interrupts to allow retention */
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ reg = bank->base + OMAP24XX_GPIO_IRQSTATUS2;
+ else if (cpu_is_omap44xx())
+ reg = bank->base + OMAP4_GPIO_IRQSTATUS1;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ __raw_writel(gpio_mask, reg);
+
+ /* Flush posted write for the irq status to avoid spurious interrupts */
+ __raw_readl(reg);
+ }
+}
+
+static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
+{
+ _clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio));
+}
+
+static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
+{
+ void __iomem *reg = bank->base;
+ int inv = 0;
+ u32 l;
+ u32 mask;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ mask = 0xffff;
+ inv = 1;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_MASK;
+ mask = 0xffff;
+ inv = 1;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_IRQENABLE1;
+ mask = 0xffff;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_MASK;
+ mask = 0xffffffff;
+ inv = 1;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_IRQENABLE1;
+ mask = 0xffffffff;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_IRQSTATUSSET0;
+ mask = 0xffffffff;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ l = __raw_readl(reg);
+ if (inv)
+ l = ~l;
+ l &= mask;
+ return l;
+}
+
+static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ l = __raw_readl(reg);
+ if (enable)
+ l &= ~(gpio_mask);
+ else
+ l |= gpio_mask;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_MASK;
+ l = __raw_readl(reg);
+ if (enable)
+ l &= ~(gpio_mask);
+ else
+ l |= gpio_mask;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ if (enable)
+ reg += OMAP1610_GPIO_SET_IRQENABLE1;
+ else
+ reg += OMAP1610_GPIO_CLEAR_IRQENABLE1;
+ l = gpio_mask;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_MASK;
+ l = __raw_readl(reg);
+ if (enable)
+ l &= ~(gpio_mask);
+ else
+ l |= gpio_mask;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ if (enable)
+ reg += OMAP24XX_GPIO_SETIRQENABLE1;
+ else
+ reg += OMAP24XX_GPIO_CLEARIRQENABLE1;
+ l = gpio_mask;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ if (enable)
+ reg += OMAP4_GPIO_IRQSTATUSSET0;
+ else
+ reg += OMAP4_GPIO_IRQSTATUSCLR0;
+ l = gpio_mask;
+ break;
+#endif
+ default:
+ WARN_ON(1);
+ return;
+ }
+ __raw_writel(l, reg);
+}
+
+static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
+{
+ _enable_gpio_irqbank(bank, 1 << get_gpio_index(gpio), enable);
+}
+
+/*
+ * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
+ * 1510 does not seem to have a wake-up register. If JTAG is connected
+ * to the target, system will wake up always on GPIO events. While
+ * system is running all registered GPIO interrupts need to have wake-up
+ * enabled. When system is suspended, only selected GPIO interrupts need
+ * to have wake-up enabled.
+ */
+static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
+{
+ unsigned long uninitialized_var(flags);
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_MPUIO:
+ case METHOD_GPIO_1610:
+ spin_lock_irqsave(&bank->lock, flags);
+ if (enable)
+ bank->suspend_wakeup |= (1 << gpio);
+ else
+ bank->suspend_wakeup &= ~(1 << gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+#endif
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ case METHOD_GPIO_24XX:
+ case METHOD_GPIO_44XX:
+ if (bank->non_wakeup_gpios & (1 << gpio)) {
+ printk(KERN_ERR "Unable to modify wakeup on "
+ "non-wakeup GPIO%d\n",
+ (bank - gpio_bank) * 32 + gpio);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&bank->lock, flags);
+ if (enable)
+ bank->suspend_wakeup |= (1 << gpio);
+ else
+ bank->suspend_wakeup &= ~(1 << gpio);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+#endif
+ default:
+ printk(KERN_ERR "Can't enable GPIO wakeup for method %i\n",
+ bank->method);
+ return -EINVAL;
+ }
+}
+
+static void _reset_gpio(struct gpio_bank *bank, int gpio)
+{
+ _set_gpio_direction(bank, get_gpio_index(gpio), 1);
+ _set_gpio_irqenable(bank, gpio, 0);
+ _clear_gpio_irqstatus(bank, gpio);
+ _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+}
+
+/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
+static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank;
+ int retval;
+
+ if (check_gpio(gpio) < 0)
+ return -ENODEV;
+ bank = irq_data_get_irq_chip_data(d);
+ retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
+
+ return retval;
+}
+
+static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ /* Set trigger to none. You need to enable the desired trigger with
+ * request_irq() or set_irq_type().
+ */
+ _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+
+#ifdef CONFIG_ARCH_OMAP15XX
+ if (bank->method == METHOD_GPIO_1510) {
+ void __iomem *reg;
+
+ /* Claim the pin for MPU */
+ reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
+ __raw_writel(__raw_readl(reg) | (1 << offset), reg);
+ }
+#endif
+ if (!cpu_class_is_omap1()) {
+ if (!bank->mod_usage) {
+ void __iomem *reg = bank->base;
+ u32 ctrl;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ reg += OMAP24XX_GPIO_CTRL;
+ else if (cpu_is_omap44xx())
+ reg += OMAP4_GPIO_CTRL;
+ ctrl = __raw_readl(reg);
+ /* Module is enabled, clocks are not gated */
+ ctrl &= 0xFFFFFFFE;
+ __raw_writel(ctrl, reg);
+ }
+ bank->mod_usage |= 1 << offset;
+ }
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+#ifdef CONFIG_ARCH_OMAP16XX
+ if (bank->method == METHOD_GPIO_1610) {
+ /* Disable wake-up during idle for dynamic tick */
+ void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
+ __raw_writel(1 << offset, reg);
+ }
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ if (bank->method == METHOD_GPIO_24XX) {
+ /* Disable wake-up during idle for dynamic tick */
+ void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
+ __raw_writel(1 << offset, reg);
+ }
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ if (bank->method == METHOD_GPIO_44XX) {
+ /* Disable wake-up during idle for dynamic tick */
+ void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ __raw_writel(1 << offset, reg);
+ }
+#endif
+ if (!cpu_class_is_omap1()) {
+ bank->mod_usage &= ~(1 << offset);
+ if (!bank->mod_usage) {
+ void __iomem *reg = bank->base;
+ u32 ctrl;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ reg += OMAP24XX_GPIO_CTRL;
+ else if (cpu_is_omap44xx())
+ reg += OMAP4_GPIO_CTRL;
+ ctrl = __raw_readl(reg);
+ /* Module is disabled, clocks are gated */
+ ctrl |= 1;
+ __raw_writel(ctrl, reg);
+ }
+ }
+ _reset_gpio(bank, bank->chip.base + offset);
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+/*
+ * We need to unmask the GPIO bank interrupt as soon as possible to
+ * avoid missing GPIO interrupts for other lines in the bank.
+ * Then we need to mask-read-clear-unmask the triggered GPIO lines
+ * in the bank to avoid missing nested interrupts for a GPIO line.
+ * If we wait to unmask individual GPIO lines in the bank after the
+ * line's interrupt handler has been run, we may miss some nested
+ * interrupts.
+ */
+static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *isr_reg = NULL;
+ u32 isr;
+ unsigned int gpio_irq, gpio_index;
+ struct gpio_bank *bank;
+ u32 retrigger = 0;
+ int unmasked = 0;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ bank = irq_get_handler_data(irq);
+#ifdef CONFIG_ARCH_OMAP1
+ if (bank->method == METHOD_MPUIO)
+ isr_reg = bank->base +
+ OMAP_MPUIO_GPIO_INT / bank->stride;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ if (bank->method == METHOD_GPIO_1510)
+ isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
+#endif
+#if defined(CONFIG_ARCH_OMAP16XX)
+ if (bank->method == METHOD_GPIO_1610)
+ isr_reg = bank->base + OMAP1610_GPIO_IRQSTATUS1;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ if (bank->method == METHOD_GPIO_7XX)
+ isr_reg = bank->base + OMAP7XX_GPIO_INT_STATUS;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ if (bank->method == METHOD_GPIO_24XX)
+ isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
+#endif
+#if defined(CONFIG_ARCH_OMAP4)
+ if (bank->method == METHOD_GPIO_44XX)
+ isr_reg = bank->base + OMAP4_GPIO_IRQSTATUS0;
+#endif
+
+ if (WARN_ON(!isr_reg))
+ goto exit;
+
+ while(1) {
+ u32 isr_saved, level_mask = 0;
+ u32 enabled;
+
+ enabled = _get_gpio_irqbank_mask(bank);
+ isr_saved = isr = __raw_readl(isr_reg) & enabled;
+
+ if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
+ isr &= 0x0000ffff;
+
+ if (cpu_class_is_omap2()) {
+ level_mask = bank->level_mask & enabled;
+ }
+
+ /* clear edge sensitive interrupts before handler(s) are
+ called so that we don't miss any interrupt occurred while
+ executing them */
+ _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 0);
+ _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
+ _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 1);
+
+ /* if there is only edge sensitive GPIO pin interrupts
+ configured, we could unmask GPIO bank interrupt immediately */
+ if (!level_mask && !unmasked) {
+ unmasked = 1;
+ chained_irq_exit(chip, desc);
+ }
+
+ isr |= retrigger;
+ retrigger = 0;
+ if (!isr)
+ break;
+
+ gpio_irq = bank->virtual_irq_start;
+ for (; isr != 0; isr >>= 1, gpio_irq++) {
+ gpio_index = get_gpio_index(irq_to_gpio(gpio_irq));
+
+ if (!(isr & 1))
+ continue;
+
+#ifdef CONFIG_ARCH_OMAP1
+ /*
+ * Some chips can't respond to both rising and falling
+ * at the same time. If this irq was requested with
+ * both flags, we need to flip the ICR data for the IRQ
+ * to respond to the IRQ for the opposite direction.
+ * This will be indicated in the bank toggle_mask.
+ */
+ if (bank->toggle_mask & (1 << gpio_index))
+ _toggle_gpio_edge_triggering(bank, gpio_index);
+#endif
+
+ generic_handle_irq(gpio_irq);
+ }
+ }
+ /* if bank has any level sensitive GPIO pin interrupt
+ configured, we must unmask the bank interrupt only after
+ handler(s) are executed in order to avoid spurious bank
+ interrupt */
+exit:
+ if (!unmasked)
+ chained_irq_exit(chip, desc);
+}
+
+static void gpio_irq_shutdown(struct irq_data *d)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ _reset_gpio(bank, gpio);
+}
+
+static void gpio_ack_irq(struct irq_data *d)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ _clear_gpio_irqstatus(bank, gpio);
+}
+
+static void gpio_mask_irq(struct irq_data *d)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ _set_gpio_irqenable(bank, gpio, 0);
+ _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+}
+
+static void gpio_unmask_irq(struct irq_data *d)
+{
+ unsigned int gpio = d->irq - IH_GPIO_BASE;
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ unsigned int irq_mask = 1 << get_gpio_index(gpio);
+ u32 trigger = irqd_get_trigger_type(d);
+
+ if (trigger)
+ _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
+
+ /* For level-triggered GPIOs, the clearing must be done after
+ * the HW source is cleared, thus after the handler has run */
+ if (bank->level_mask & irq_mask) {
+ _set_gpio_irqenable(bank, gpio, 0);
+ _clear_gpio_irqstatus(bank, gpio);
+ }
+
+ _set_gpio_irqenable(bank, gpio, 1);
+}
+
+static struct irq_chip gpio_irq_chip = {
+ .name = "GPIO",
+ .irq_shutdown = gpio_irq_shutdown,
+ .irq_ack = gpio_ack_irq,
+ .irq_mask = gpio_mask_irq,
+ .irq_unmask = gpio_unmask_irq,
+ .irq_set_type = gpio_irq_type,
+ .irq_set_wake = gpio_wake_enable,
+};
+
+/*---------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_OMAP1
+
+/* MPUIO uses the always-on 32k clock */
+
+static void mpuio_ack_irq(struct irq_data *d)
+{
+ /* The ISR is reset automatically, so do nothing here. */
+}
+
+static void mpuio_mask_irq(struct irq_data *d)
+{
+ unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ _set_gpio_irqenable(bank, gpio, 0);
+}
+
+static void mpuio_unmask_irq(struct irq_data *d)
+{
+ unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
+ struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ _set_gpio_irqenable(bank, gpio, 1);
+}
+
+static struct irq_chip mpuio_irq_chip = {
+ .name = "MPUIO",
+ .irq_ack = mpuio_ack_irq,
+ .irq_mask = mpuio_mask_irq,
+ .irq_unmask = mpuio_unmask_irq,
+ .irq_set_type = gpio_irq_type,
+#ifdef CONFIG_ARCH_OMAP16XX
+ /* REVISIT: assuming only 16xx supports MPUIO wake events */
+ .irq_set_wake = gpio_wake_enable,
+#endif
+};
+
+
+#define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
+
+
+#ifdef CONFIG_ARCH_OMAP16XX
+
+#include <linux/platform_device.h>
+
+static int omap_mpuio_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ void __iomem *mask_reg = bank->base +
+ OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->saved_wakeup = __raw_readl(mask_reg);
+ __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static int omap_mpuio_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+ void __iomem *mask_reg = bank->base +
+ OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+ __raw_writel(bank->saved_wakeup, mask_reg);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
+ .suspend_noirq = omap_mpuio_suspend_noirq,
+ .resume_noirq = omap_mpuio_resume_noirq,
+};
+
+/* use platform_driver for this. */
+static struct platform_driver omap_mpuio_driver = {
+ .driver = {
+ .name = "mpuio",
+ .pm = &omap_mpuio_dev_pm_ops,
+ },
+};
+
+static struct platform_device omap_mpuio_device = {
+ .name = "mpuio",
+ .id = -1,
+ .dev = {
+ .driver = &omap_mpuio_driver.driver,
+ }
+ /* could list the /proc/iomem resources */
+};
+
+static inline void mpuio_init(void)
+{
+ struct gpio_bank *bank = get_gpio_bank(OMAP_MPUIO(0));
+ platform_set_drvdata(&omap_mpuio_device, bank);
+
+ if (platform_driver_register(&omap_mpuio_driver) == 0)
+ (void) platform_device_register(&omap_mpuio_device);
+}
+
+#else
+static inline void mpuio_init(void) {}
+#endif /* 16xx */
+
+#else
+
+extern struct irq_chip mpuio_irq_chip;
+
+#define bank_is_mpuio(bank) 0
+static inline void mpuio_init(void) {}
+
+#endif
+
+/*---------------------------------------------------------------------*/
+
+/* REVISIT these are stupid implementations! replace by ones that
+ * don't switch on METHOD_* and which mostly avoid spinlocks
+ */
+
+static int gpio_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_direction(bank, offset, 1);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+}
+
+static int gpio_is_input(struct gpio_bank *bank, int mask)
+{
+ void __iomem *reg = bank->base;
+
+ switch (bank->method) {
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_IO_CNTL / bank->stride;
+ break;
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DIR_CONTROL;
+ break;
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DIRECTION;
+ break;
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_DIR_CONTROL;
+ break;
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_OE;
+ break;
+ case METHOD_GPIO_44XX:
+ reg += OMAP4_GPIO_OE;
+ break;
+ default:
+ WARN_ONCE(1, "gpio_is_input: incorrect OMAP GPIO method");
+ return -EINVAL;
+ }
+ return __raw_readl(reg) & mask;
+}
+
+static int gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank;
+ void __iomem *reg;
+ int gpio;
+ u32 mask;
+
+ gpio = chip->base + offset;
+ bank = get_gpio_bank(gpio);
+ reg = bank->base;
+ mask = 1 << get_gpio_index(gpio);
+
+ if (gpio_is_input(bank, mask))
+ return _get_gpio_datain(bank, gpio);
+ else
+ return _get_gpio_dataout(bank, gpio);
+}
+
+static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_dataout(bank, offset, value);
+ _set_gpio_direction(bank, offset, 0);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+}
+
+static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
+ unsigned debounce)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+
+ if (!bank->dbck) {
+ bank->dbck = clk_get(bank->dev, "dbclk");
+ if (IS_ERR(bank->dbck))
+ dev_err(bank->dev, "Could not get gpio dbck\n");
+ }
+
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_debounce(bank, offset, debounce);
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct gpio_bank *bank;
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ spin_lock_irqsave(&bank->lock, flags);
+ _set_gpio_dataout(bank, offset, value);
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct gpio_bank *bank;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ return bank->virtual_irq_start + offset;
+}
+
+/*---------------------------------------------------------------------*/
+
+static void __init omap_gpio_show_rev(struct gpio_bank *bank)
+{
+ u32 rev;
+
+ if (cpu_is_omap16xx() && !(bank->method != METHOD_MPUIO))
+ rev = __raw_readw(bank->base + OMAP1610_GPIO_REVISION);
+ else if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ rev = __raw_readl(bank->base + OMAP24XX_GPIO_REVISION);
+ else if (cpu_is_omap44xx())
+ rev = __raw_readl(bank->base + OMAP4_GPIO_REVISION);
+ else
+ return;
+
+ printk(KERN_INFO "OMAP GPIO hardware version %d.%d\n",
+ (rev >> 4) & 0x0f, rev & 0x0f);
+}
+
+/* This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
+static inline int init_gpio_info(struct platform_device *pdev)
+{
+ /* TODO: Analyze removing gpio_bank_count usage from driver code */
+ gpio_bank = kzalloc(gpio_bank_count * sizeof(struct gpio_bank),
+ GFP_KERNEL);
+ if (!gpio_bank) {
+ dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* TODO: Cleanup cpu_is_* checks */
+static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
+{
+ if (cpu_class_is_omap2()) {
+ if (cpu_is_omap44xx()) {
+ __raw_writel(0xffffffff, bank->base +
+ OMAP4_GPIO_IRQSTATUSCLR0);
+ __raw_writel(0x00000000, bank->base +
+ OMAP4_GPIO_DEBOUNCENABLE);
+ /* Initialize interface clk ungated, module enabled */
+ __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
+ } else if (cpu_is_omap34xx()) {
+ __raw_writel(0x00000000, bank->base +
+ OMAP24XX_GPIO_IRQENABLE1);
+ __raw_writel(0xffffffff, bank->base +
+ OMAP24XX_GPIO_IRQSTATUS1);
+ __raw_writel(0x00000000, bank->base +
+ OMAP24XX_GPIO_DEBOUNCE_EN);
+
+ /* Initialize interface clk ungated, module enabled */
+ __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
+ } else if (cpu_is_omap24xx()) {
+ static const u32 non_wakeup_gpios[] = {
+ 0xe203ffc0, 0x08700040
+ };
+ if (id < ARRAY_SIZE(non_wakeup_gpios))
+ bank->non_wakeup_gpios = non_wakeup_gpios[id];
+ }
+ } else if (cpu_class_is_omap1()) {
+ if (bank_is_mpuio(bank))
+ __raw_writew(0xffff, bank->base +
+ OMAP_MPUIO_GPIO_MASKIT / bank->stride);
+ if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
+ __raw_writew(0xffff, bank->base
+ + OMAP1510_GPIO_INT_MASK);
+ __raw_writew(0x0000, bank->base
+ + OMAP1510_GPIO_INT_STATUS);
+ }
+ if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
+ __raw_writew(0x0000, bank->base
+ + OMAP1610_GPIO_IRQENABLE1);
+ __raw_writew(0xffff, bank->base
+ + OMAP1610_GPIO_IRQSTATUS1);
+ __raw_writew(0x0014, bank->base
+ + OMAP1610_GPIO_SYSCONFIG);
+
+ /*
+ * Enable system clock for GPIO module.
+ * The CAM_CLK_CTRL *is* really the right place.
+ */
+ omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
+ ULPD_CAM_CLK_CTRL);
+ }
+ if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
+ __raw_writel(0xffffffff, bank->base
+ + OMAP7XX_GPIO_INT_MASK);
+ __raw_writel(0x00000000, bank->base
+ + OMAP7XX_GPIO_INT_STATUS);
+ }
+ }
+}
+
+static void __init omap_gpio_chip_init(struct gpio_bank *bank)
+{
+ int j;
+ static int gpio;
+
+ bank->mod_usage = 0;
+ /*
+ * REVISIT eventually switch from OMAP-specific gpio structs
+ * over to the generic ones
+ */
+ bank->chip.request = omap_gpio_request;
+ bank->chip.free = omap_gpio_free;
+ bank->chip.direction_input = gpio_input;
+ bank->chip.get = gpio_get;
+ bank->chip.direction_output = gpio_output;
+ bank->chip.set_debounce = gpio_debounce;
+ bank->chip.set = gpio_set;
+ bank->chip.to_irq = gpio_2irq;
+ if (bank_is_mpuio(bank)) {
+ bank->chip.label = "mpuio";
+#ifdef CONFIG_ARCH_OMAP16XX
+ bank->chip.dev = &omap_mpuio_device.dev;
+#endif
+ bank->chip.base = OMAP_MPUIO(0);
+ } else {
+ bank->chip.label = "gpio";
+ bank->chip.base = gpio;
+ gpio += bank_width;
+ }
+ bank->chip.ngpio = bank_width;
+
+ gpiochip_add(&bank->chip);
+
+ for (j = bank->virtual_irq_start;
+ j < bank->virtual_irq_start + bank_width; j++) {
+ irq_set_lockdep_class(j, &gpio_lock_class);
+ irq_set_chip_data(j, bank);
+ if (bank_is_mpuio(bank))
+ irq_set_chip(j, &mpuio_irq_chip);
+ else
+ irq_set_chip(j, &gpio_irq_chip);
+ irq_set_handler(j, handle_simple_irq);
+ set_irq_flags(j, IRQF_VALID);
+ }
+ irq_set_chained_handler(bank->irq, gpio_irq_handler);
+ irq_set_handler_data(bank->irq, bank);
+}
+
+static int __devinit omap_gpio_probe(struct platform_device *pdev)
+{
+ static int gpio_init_done;
+ struct omap_gpio_platform_data *pdata;
+ struct resource *res;
+ int id;
+ struct gpio_bank *bank;
+
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+
+ if (!gpio_init_done) {
+ int ret;
+
+ ret = init_gpio_info(pdev);
+ if (ret)
+ return ret;
+ }
+
+ id = pdev->id;
+ bank = &gpio_bank[id];
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (unlikely(!res)) {
+ dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n", id);
+ return -ENODEV;
+ }
+
+ bank->irq = res->start;
+ bank->virtual_irq_start = pdata->virtual_irq_start;
+ bank->method = pdata->bank_type;
+ bank->dev = &pdev->dev;
+ bank->dbck_flag = pdata->dbck_flag;
+ bank->stride = pdata->bank_stride;
+ bank_width = pdata->bank_width;
+
+ spin_lock_init(&bank->lock);
+
+ /* Static mapping, never released */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!res)) {
+ dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n", id);
+ return -ENODEV;
+ }
+
+ bank->base = ioremap(res->start, resource_size(res));
+ if (!bank->base) {
+ dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n", id);
+ return -ENOMEM;
+ }
+
+ pm_runtime_enable(bank->dev);
+ pm_runtime_get_sync(bank->dev);
+
+ omap_gpio_mod_init(bank, id);
+ omap_gpio_chip_init(bank);
+ omap_gpio_show_rev(bank);
+
+ if (!gpio_init_done)
+ gpio_init_done = 1;
+
+ return 0;
+}
+
+#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
+static int omap_gpio_suspend(void)
+{
+ int i;
+
+ if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
+ return 0;
+
+ for (i = 0; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ void __iomem *wake_status;
+ void __iomem *wake_clear;
+ void __iomem *wake_set;
+ unsigned long flags;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
+ wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
+ wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
+ wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
+ wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ break;
+#endif
+ default:
+ continue;
+ }
+
+ spin_lock_irqsave(&bank->lock, flags);
+ bank->saved_wakeup = __raw_readl(wake_status);
+ __raw_writel(0xffffffff, wake_clear);
+ __raw_writel(bank->suspend_wakeup, wake_set);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ }
+
+ return 0;
+}
+
+static void omap_gpio_resume(void)
+{
+ int i;
+
+ if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
+ return;
+
+ for (i = 0; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ void __iomem *wake_clear;
+ void __iomem *wake_set;
+ unsigned long flags;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
+ wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+ case METHOD_GPIO_24XX:
+ wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
+ wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP4
+ case METHOD_GPIO_44XX:
+ wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
+ break;
+#endif
+ default:
+ continue;
+ }
+
+ spin_lock_irqsave(&bank->lock, flags);
+ __raw_writel(0xffffffff, wake_clear);
+ __raw_writel(bank->saved_wakeup, wake_set);
+ spin_unlock_irqrestore(&bank->lock, flags);
+ }
+}
+
+static struct syscore_ops omap_gpio_syscore_ops = {
+ .suspend = omap_gpio_suspend,
+ .resume = omap_gpio_resume,
+};
+
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+
+static int workaround_enabled;
+
+void omap2_gpio_prepare_for_idle(int off_mode)
+{
+ int i, c = 0;
+ int min = 0;
+
+ if (cpu_is_omap34xx())
+ min = 1;
+
+ for (i = min; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ u32 l1 = 0, l2 = 0;
+ int j;
+
+ for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
+ clk_disable(bank->dbck);
+
+ if (!off_mode)
+ continue;
+
+ /* If going to OFF, remove triggering for all
+ * non-wakeup GPIOs. Otherwise spurious IRQs will be
+ * generated. See OMAP2420 Errata item 1.101. */
+ if (!(bank->enabled_non_wakeup_gpios))
+ continue;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ bank->saved_datain = __raw_readl(bank->base +
+ OMAP24XX_GPIO_DATAIN);
+ l1 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_FALLINGDETECT);
+ l2 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_RISINGDETECT);
+ }
+
+ if (cpu_is_omap44xx()) {
+ bank->saved_datain = __raw_readl(bank->base +
+ OMAP4_GPIO_DATAIN);
+ l1 = __raw_readl(bank->base +
+ OMAP4_GPIO_FALLINGDETECT);
+ l2 = __raw_readl(bank->base +
+ OMAP4_GPIO_RISINGDETECT);
+ }
+
+ bank->saved_fallingdetect = l1;
+ bank->saved_risingdetect = l2;
+ l1 &= ~bank->enabled_non_wakeup_gpios;
+ l2 &= ~bank->enabled_non_wakeup_gpios;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ __raw_writel(l1, bank->base +
+ OMAP24XX_GPIO_FALLINGDETECT);
+ __raw_writel(l2, bank->base +
+ OMAP24XX_GPIO_RISINGDETECT);
+ }
+
+ if (cpu_is_omap44xx()) {
+ __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
+ __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
+ }
+
+ c++;
+ }
+ if (!c) {
+ workaround_enabled = 0;
+ return;
+ }
+ workaround_enabled = 1;
+}
+
+void omap2_gpio_resume_after_idle(void)
+{
+ int i;
+ int min = 0;
+
+ if (cpu_is_omap34xx())
+ min = 1;
+ for (i = min; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ u32 l = 0, gen, gen0, gen1;
+ int j;
+
+ for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
+ clk_enable(bank->dbck);
+
+ if (!workaround_enabled)
+ continue;
+
+ if (!(bank->enabled_non_wakeup_gpios))
+ continue;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + OMAP24XX_GPIO_FALLINGDETECT);
+ __raw_writel(bank->saved_risingdetect,
+ bank->base + OMAP24XX_GPIO_RISINGDETECT);
+ l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
+ }
+
+ if (cpu_is_omap44xx()) {
+ __raw_writel(bank->saved_fallingdetect,
+ bank->base + OMAP4_GPIO_FALLINGDETECT);
+ __raw_writel(bank->saved_risingdetect,
+ bank->base + OMAP4_GPIO_RISINGDETECT);
+ l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
+ }
+
+ /* Check if any of the non-wakeup interrupt GPIOs have changed
+ * state. If so, generate an IRQ by software. This is
+ * horribly racy, but it's the best we can do to work around
+ * this silicon bug. */
+ l ^= bank->saved_datain;
+ l &= bank->enabled_non_wakeup_gpios;
+
+ /*
+ * No need to generate IRQs for the rising edge for gpio IRQs
+ * configured with falling edge only; and vice versa.
+ */
+ gen0 = l & bank->saved_fallingdetect;
+ gen0 &= bank->saved_datain;
+
+ gen1 = l & bank->saved_risingdetect;
+ gen1 &= ~(bank->saved_datain);
+
+ /* FIXME: Consider GPIO IRQs with level detections properly! */
+ gen = l & (~(bank->saved_fallingdetect) &
+ ~(bank->saved_risingdetect));
+ /* Consider all GPIO IRQs needed to be updated */
+ gen |= gen0 | gen1;
+
+ if (gen) {
+ u32 old0, old1;
+
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ old0 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_LEVELDETECT0);
+ old1 = __raw_readl(bank->base +
+ OMAP24XX_GPIO_LEVELDETECT1);
+ __raw_writel(old0 | gen, bank->base +
+ OMAP24XX_GPIO_LEVELDETECT0);
+ __raw_writel(old1 | gen, bank->base +
+ OMAP24XX_GPIO_LEVELDETECT1);
+ __raw_writel(old0, bank->base +
+ OMAP24XX_GPIO_LEVELDETECT0);
+ __raw_writel(old1, bank->base +
+ OMAP24XX_GPIO_LEVELDETECT1);
+ }
+
+ if (cpu_is_omap44xx()) {
+ old0 = __raw_readl(bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ old1 = __raw_readl(bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+ __raw_writel(old0 | l, bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ __raw_writel(old1 | l, bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+ __raw_writel(old0, bank->base +
+ OMAP4_GPIO_LEVELDETECT0);
+ __raw_writel(old1, bank->base +
+ OMAP4_GPIO_LEVELDETECT1);
+ }
+ }
+ }
+
+}
+
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+/* save the registers of bank 2-6 */
+void omap_gpio_save_context(void)
+{
+ int i;
+
+ /* saving banks from 2-6 only since GPIO1 is in WKUP */
+ for (i = 1; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ gpio_context[i].irqenable1 =
+ __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1);
+ gpio_context[i].irqenable2 =
+ __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2);
+ gpio_context[i].wake_en =
+ __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN);
+ gpio_context[i].ctrl =
+ __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
+ gpio_context[i].oe =
+ __raw_readl(bank->base + OMAP24XX_GPIO_OE);
+ gpio_context[i].leveldetect0 =
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
+ gpio_context[i].leveldetect1 =
+ __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
+ gpio_context[i].risingdetect =
+ __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
+ gpio_context[i].fallingdetect =
+ __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
+ gpio_context[i].dataout =
+ __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
+ }
+}
+
+/* restore the required registers of bank 2-6 */
+void omap_gpio_restore_context(void)
+{
+ int i;
+
+ for (i = 1; i < gpio_bank_count; i++) {
+ struct gpio_bank *bank = &gpio_bank[i];
+ __raw_writel(gpio_context[i].irqenable1,
+ bank->base + OMAP24XX_GPIO_IRQENABLE1);
+ __raw_writel(gpio_context[i].irqenable2,
+ bank->base + OMAP24XX_GPIO_IRQENABLE2);
+ __raw_writel(gpio_context[i].wake_en,
+ bank->base + OMAP24XX_GPIO_WAKE_EN);
+ __raw_writel(gpio_context[i].ctrl,
+ bank->base + OMAP24XX_GPIO_CTRL);
+ __raw_writel(gpio_context[i].oe,
+ bank->base + OMAP24XX_GPIO_OE);
+ __raw_writel(gpio_context[i].leveldetect0,
+ bank->base + OMAP24XX_GPIO_LEVELDETECT0);
+ __raw_writel(gpio_context[i].leveldetect1,
+ bank->base + OMAP24XX_GPIO_LEVELDETECT1);
+ __raw_writel(gpio_context[i].risingdetect,
+ bank->base + OMAP24XX_GPIO_RISINGDETECT);
+ __raw_writel(gpio_context[i].fallingdetect,
+ bank->base + OMAP24XX_GPIO_FALLINGDETECT);
+ __raw_writel(gpio_context[i].dataout,
+ bank->base + OMAP24XX_GPIO_DATAOUT);
+ }
+}
+#endif
+
+static struct platform_driver omap_gpio_driver = {
+ .probe = omap_gpio_probe,
+ .driver = {
+ .name = "omap_gpio",
+ },
+};
+
+/*
+ * gpio driver register needs to be done before
+ * machine_init functions access gpio APIs.
+ * Hence omap_gpio_drv_reg() is a postcore_initcall.
+ */
+static int __init omap_gpio_drv_reg(void)
+{
+ return platform_driver_register(&omap_gpio_driver);
+}
+postcore_initcall(omap_gpio_drv_reg);
+
+static int __init omap_gpio_sysinit(void)
+{
+ mpuio_init();
+
+#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
+ if (cpu_is_omap16xx() || cpu_class_is_omap2())
+ register_syscore_ops(&omap_gpio_syscore_ops);
+#endif
+
+ return 0;
+}
+
+arch_initcall(omap_gpio_sysinit);
diff --git a/drivers/gpio/gpio-plat-samsung.c b/drivers/gpio/gpio-plat-samsung.c
new file mode 100644
index 000000000000..ea37c0461788
--- /dev/null
+++ b/drivers/gpio/gpio-plat-samsung.c
@@ -0,0 +1,206 @@
+/* arch/arm/plat-samsung/gpiolib.c
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * SAMSUNG - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+#ifndef DEBUG_GPIO
+#define gpio_dbg(x...) do { } while (0)
+#else
+#define gpio_dbg(x...) printk(KERN_DEBUG x)
+#endif
+
+/* The samsung_gpiolib_4bit routines are to control the gpio banks where
+ * the gpio configuration register (GPxCON) has 4 bits per GPIO, as the
+ * following example:
+ *
+ * base + 0x00: Control register, 4 bits per gpio
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x04: Data register, 1 bit per gpio
+ * bit n: data bit n
+ *
+ * Note, since the data register is one bit per gpio and is at base + 0x4
+ * we can use s3c_gpiolib_get and s3c_gpiolib_set to change the state of
+ * the output.
+*/
+
+static int samsung_gpiolib_4bit_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long con;
+
+ con = __raw_readl(base + GPIOCON_OFF);
+ con &= ~(0xf << con_4bit_shift(offset));
+ __raw_writel(con, base + GPIOCON_OFF);
+
+ gpio_dbg("%s: %p: CON now %08lx\n", __func__, base, con);
+
+ return 0;
+}
+
+static int samsung_gpiolib_4bit_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+ void __iomem *base = ourchip->base;
+ unsigned long con;
+ unsigned long dat;
+
+ con = __raw_readl(base + GPIOCON_OFF);
+ con &= ~(0xf << con_4bit_shift(offset));
+ con |= 0x1 << con_4bit_shift(offset);
+
+ dat = __raw_readl(base + GPIODAT_OFF);
+
+ if (value)
+ dat |= 1 << offset;
+ else
+ dat &= ~(1 << offset);
+
+ __raw_writel(dat, base + GPIODAT_OFF);
+ __raw_writel(con, base + GPIOCON_OFF);
+ __raw_writel(dat, base + GPIODAT_OFF);
+
+ gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+
+ return 0;
+}
+
+/* The next set of routines are for the case where the GPIO configuration
+ * registers are 4 bits per GPIO but there is more than one register (the
+ * bank has more than 8 GPIOs.
+ *
+ * This case is the similar to the 4 bit case, but the registers are as
+ * follows:
+ *
+ * base + 0x00: Control register, 4 bits per gpio (lower 8 GPIOs)
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x04: Control register, 4 bits per gpio (up to 8 additions GPIOs)
+ * gpio n: 4 bits starting at (4*n)
+ * 0000 = input, 0001 = output, others mean special-function
+ * base + 0x08: Data register, 1 bit per gpio
+ * bit n: data bit n
+ *
+ * To allow us to use the s3c_gpiolib_get and s3c_gpiolib_set routines we
+ * store the 'base + 0x4' address so that these routines see the data
+ * register at ourchip->base + 0x04.
+ */
+
+static int samsung_gpiolib_4bit2_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+ void __iomem *base = ourchip->base;
+ void __iomem *regcon = base;
+ unsigned long con;
+
+ if (offset > 7)
+ offset -= 8;
+ else
+ regcon -= 4;
+
+ con = __raw_readl(regcon);
+ con &= ~(0xf << con_4bit_shift(offset));
+ __raw_writel(con, regcon);
+
+ gpio_dbg("%s: %p: CON %08lx\n", __func__, base, con);
+
+ return 0;
+}
+
+static int samsung_gpiolib_4bit2_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct s3c_gpio_chip *ourchip = to_s3c_gpio(chip);
+ void __iomem *base = ourchip->base;
+ void __iomem *regcon = base;
+ unsigned long con;
+ unsigned long dat;
+ unsigned con_offset = offset;
+
+ if (con_offset > 7)
+ con_offset -= 8;
+ else
+ regcon -= 4;
+
+ con = __raw_readl(regcon);
+ con &= ~(0xf << con_4bit_shift(con_offset));
+ con |= 0x1 << con_4bit_shift(con_offset);
+
+ dat = __raw_readl(base + GPIODAT_OFF);
+
+ if (value)
+ dat |= 1 << offset;
+ else
+ dat &= ~(1 << offset);
+
+ __raw_writel(dat, base + GPIODAT_OFF);
+ __raw_writel(con, regcon);
+ __raw_writel(dat, base + GPIODAT_OFF);
+
+ gpio_dbg("%s: %p: CON %08lx, DAT %08lx\n", __func__, base, con, dat);
+
+ return 0;
+}
+
+void __init samsung_gpiolib_add_4bit(struct s3c_gpio_chip *chip)
+{
+ chip->chip.direction_input = samsung_gpiolib_4bit_input;
+ chip->chip.direction_output = samsung_gpiolib_4bit_output;
+ chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
+}
+
+void __init samsung_gpiolib_add_4bit2(struct s3c_gpio_chip *chip)
+{
+ chip->chip.direction_input = samsung_gpiolib_4bit2_input;
+ chip->chip.direction_output = samsung_gpiolib_4bit2_output;
+ chip->pm = __gpio_pm(&s3c_gpio_pm_4bit);
+}
+
+void __init samsung_gpiolib_add_4bit_chips(struct s3c_gpio_chip *chip,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chip++) {
+ samsung_gpiolib_add_4bit(chip);
+ s3c_gpiolib_add(chip);
+ }
+}
+
+void __init samsung_gpiolib_add_4bit2_chips(struct s3c_gpio_chip *chip,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chip++) {
+ samsung_gpiolib_add_4bit2(chip);
+ s3c_gpiolib_add(chip);
+ }
+}
+
+void __init samsung_gpiolib_add_2bit_chips(struct s3c_gpio_chip *chip,
+ int nr_chips)
+{
+ for (; nr_chips > 0; nr_chips--, chip++)
+ s3c_gpiolib_add(chip);
+}
diff --git a/drivers/gpio/gpio-s5pc100.c b/drivers/gpio/gpio-s5pc100.c
new file mode 100644
index 000000000000..2842394b28b5
--- /dev/null
+++ b/drivers/gpio/gpio-s5pc100.c
@@ -0,0 +1,355 @@
+/* linux/arch/arm/mach-s5pc100/gpiolib.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Copyright 2009 Samsung Electronics Co
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * S5PC100 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <mach/map.h>
+#include <mach/regs-gpio.h>
+
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+
+/* S5PC100 GPIO bank summary:
+ *
+ * Bank GPIOs Style INT Type
+ * A0 8 4Bit GPIO_INT0
+ * A1 5 4Bit GPIO_INT1
+ * B 8 4Bit GPIO_INT2
+ * C 5 4Bit GPIO_INT3
+ * D 7 4Bit GPIO_INT4
+ * E0 8 4Bit GPIO_INT5
+ * E1 6 4Bit GPIO_INT6
+ * F0 8 4Bit GPIO_INT7
+ * F1 8 4Bit GPIO_INT8
+ * F2 8 4Bit GPIO_INT9
+ * F3 4 4Bit GPIO_INT10
+ * G0 8 4Bit GPIO_INT11
+ * G1 3 4Bit GPIO_INT12
+ * G2 7 4Bit GPIO_INT13
+ * G3 7 4Bit GPIO_INT14
+ * H0 8 4Bit WKUP_INT
+ * H1 8 4Bit WKUP_INT
+ * H2 8 4Bit WKUP_INT
+ * H3 8 4Bit WKUP_INT
+ * I 8 4Bit GPIO_INT15
+ * J0 8 4Bit GPIO_INT16
+ * J1 5 4Bit GPIO_INT17
+ * J2 8 4Bit GPIO_INT18
+ * J3 8 4Bit GPIO_INT19
+ * J4 4 4Bit GPIO_INT20
+ * K0 8 4Bit None
+ * K1 6 4Bit None
+ * K2 8 4Bit None
+ * K3 8 4Bit None
+ * L0 8 4Bit None
+ * L1 8 4Bit None
+ * L2 8 4Bit None
+ * L3 8 4Bit None
+ */
+
+static struct s3c_gpio_cfg gpio_cfg = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_eint = {
+ .cfg_eint = 0xf,
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_noint = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+/*
+ * GPIO bank's base address given the index of the bank in the
+ * list of all gpio banks.
+ */
+#define S5PC100_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
+
+/*
+ * Following are the gpio banks in S5PC100.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+static struct s3c_gpio_chip s5pc100_gpio_chips[] = {
+ {
+ .chip = {
+ .base = S5PC100_GPA0(0),
+ .ngpio = S5PC100_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPA1(0),
+ .ngpio = S5PC100_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPB(0),
+ .ngpio = S5PC100_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPC(0),
+ .ngpio = S5PC100_GPIO_C_NR,
+ .label = "GPC",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPD(0),
+ .ngpio = S5PC100_GPIO_D_NR,
+ .label = "GPD",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPE0(0),
+ .ngpio = S5PC100_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPE1(0),
+ .ngpio = S5PC100_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF0(0),
+ .ngpio = S5PC100_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF1(0),
+ .ngpio = S5PC100_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF2(0),
+ .ngpio = S5PC100_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPF3(0),
+ .ngpio = S5PC100_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG0(0),
+ .ngpio = S5PC100_GPIO_G0_NR,
+ .label = "GPG0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG1(0),
+ .ngpio = S5PC100_GPIO_G1_NR,
+ .label = "GPG1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG2(0),
+ .ngpio = S5PC100_GPIO_G2_NR,
+ .label = "GPG2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPG3(0),
+ .ngpio = S5PC100_GPIO_G3_NR,
+ .label = "GPG3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPI(0),
+ .ngpio = S5PC100_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ0(0),
+ .ngpio = S5PC100_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ1(0),
+ .ngpio = S5PC100_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ2(0),
+ .ngpio = S5PC100_GPIO_J2_NR,
+ .label = "GPJ2",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ3(0),
+ .ngpio = S5PC100_GPIO_J3_NR,
+ .label = "GPJ3",
+ },
+ }, {
+ .chip = {
+ .base = S5PC100_GPJ4(0),
+ .ngpio = S5PC100_GPIO_J4_NR,
+ .label = "GPJ4",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPK0(0),
+ .ngpio = S5PC100_GPIO_K0_NR,
+ .label = "GPK0",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPK1(0),
+ .ngpio = S5PC100_GPIO_K1_NR,
+ .label = "GPK1",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPK2(0),
+ .ngpio = S5PC100_GPIO_K2_NR,
+ .label = "GPK2",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPK3(0),
+ .ngpio = S5PC100_GPIO_K3_NR,
+ .label = "GPK3",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL0(0),
+ .ngpio = S5PC100_GPIO_L0_NR,
+ .label = "GPL0",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL1(0),
+ .ngpio = S5PC100_GPIO_L1_NR,
+ .label = "GPL1",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL2(0),
+ .ngpio = S5PC100_GPIO_L2_NR,
+ .label = "GPL2",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL3(0),
+ .ngpio = S5PC100_GPIO_L3_NR,
+ .label = "GPL3",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PC100_GPL4(0),
+ .ngpio = S5PC100_GPIO_L4_NR,
+ .label = "GPL4",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC00),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = S5PC100_GPH0(0),
+ .ngpio = S5PC100_GPIO_H0_NR,
+ .label = "GPH0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC20),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = S5PC100_GPH1(0),
+ .ngpio = S5PC100_GPIO_H1_NR,
+ .label = "GPH1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC40),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = S5PC100_GPH2(0),
+ .ngpio = S5PC100_GPIO_H2_NR,
+ .label = "GPH2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC60),
+ .config = &gpio_cfg_eint,
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = S5PC100_GPH3(0),
+ .ngpio = S5PC100_GPIO_H3_NR,
+ .label = "GPH3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+};
+
+static __init int s5pc100_gpiolib_init(void)
+{
+ struct s3c_gpio_chip *chip = s5pc100_gpio_chips;
+ int nr_chips = ARRAY_SIZE(s5pc100_gpio_chips);
+ int gpioint_group = 0;
+ int i;
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ chip->group = gpioint_group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5PC100_BANK_BASE(i);
+ }
+
+ samsung_gpiolib_add_4bit_chips(s5pc100_gpio_chips, nr_chips);
+ s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
+
+ return 0;
+}
+core_initcall(s5pc100_gpiolib_init);
diff --git a/drivers/gpio/gpio-s5pv210.c b/drivers/gpio/gpio-s5pv210.c
new file mode 100644
index 000000000000..1ba20a703e05
--- /dev/null
+++ b/drivers/gpio/gpio-s5pv210.c
@@ -0,0 +1,288 @@
+/* linux/arch/arm/mach-s5pv210/gpiolib.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * S5PV210 - GPIOlib support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <plat/gpio-core.h>
+#include <plat/gpio-cfg.h>
+#include <plat/gpio-cfg-helpers.h>
+#include <mach/map.h>
+
+static struct s3c_gpio_cfg gpio_cfg = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+static struct s3c_gpio_cfg gpio_cfg_noint = {
+ .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
+ .set_pull = s3c_gpio_setpull_updown,
+ .get_pull = s3c_gpio_getpull_updown,
+};
+
+/* GPIO bank's base address given the index of the bank in the
+ * list of all gpio banks.
+ */
+#define S5PV210_BANK_BASE(bank_nr) (S5P_VA_GPIO + ((bank_nr) * 0x20))
+
+/*
+ * Following are the gpio banks in v210.
+ *
+ * The 'config' member when left to NULL, is initialized to the default
+ * structure gpio_cfg in the init function below.
+ *
+ * The 'base' member is also initialized in the init function below.
+ * Note: The initialization of 'base' member of s3c_gpio_chip structure
+ * uses the above macro and depends on the banks being listed in order here.
+ */
+static struct s3c_gpio_chip s5pv210_gpio_4bit[] = {
+ {
+ .chip = {
+ .base = S5PV210_GPA0(0),
+ .ngpio = S5PV210_GPIO_A0_NR,
+ .label = "GPA0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPA1(0),
+ .ngpio = S5PV210_GPIO_A1_NR,
+ .label = "GPA1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPB(0),
+ .ngpio = S5PV210_GPIO_B_NR,
+ .label = "GPB",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPC0(0),
+ .ngpio = S5PV210_GPIO_C0_NR,
+ .label = "GPC0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPC1(0),
+ .ngpio = S5PV210_GPIO_C1_NR,
+ .label = "GPC1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPD0(0),
+ .ngpio = S5PV210_GPIO_D0_NR,
+ .label = "GPD0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPD1(0),
+ .ngpio = S5PV210_GPIO_D1_NR,
+ .label = "GPD1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPE0(0),
+ .ngpio = S5PV210_GPIO_E0_NR,
+ .label = "GPE0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPE1(0),
+ .ngpio = S5PV210_GPIO_E1_NR,
+ .label = "GPE1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF0(0),
+ .ngpio = S5PV210_GPIO_F0_NR,
+ .label = "GPF0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF1(0),
+ .ngpio = S5PV210_GPIO_F1_NR,
+ .label = "GPF1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF2(0),
+ .ngpio = S5PV210_GPIO_F2_NR,
+ .label = "GPF2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPF3(0),
+ .ngpio = S5PV210_GPIO_F3_NR,
+ .label = "GPF3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG0(0),
+ .ngpio = S5PV210_GPIO_G0_NR,
+ .label = "GPG0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG1(0),
+ .ngpio = S5PV210_GPIO_G1_NR,
+ .label = "GPG1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG2(0),
+ .ngpio = S5PV210_GPIO_G2_NR,
+ .label = "GPG2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPG3(0),
+ .ngpio = S5PV210_GPIO_G3_NR,
+ .label = "GPG3",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_GPI(0),
+ .ngpio = S5PV210_GPIO_I_NR,
+ .label = "GPI",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ0(0),
+ .ngpio = S5PV210_GPIO_J0_NR,
+ .label = "GPJ0",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ1(0),
+ .ngpio = S5PV210_GPIO_J1_NR,
+ .label = "GPJ1",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ2(0),
+ .ngpio = S5PV210_GPIO_J2_NR,
+ .label = "GPJ2",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ3(0),
+ .ngpio = S5PV210_GPIO_J3_NR,
+ .label = "GPJ3",
+ },
+ }, {
+ .chip = {
+ .base = S5PV210_GPJ4(0),
+ .ngpio = S5PV210_GPIO_J4_NR,
+ .label = "GPJ4",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP01(0),
+ .ngpio = S5PV210_GPIO_MP01_NR,
+ .label = "MP01",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP02(0),
+ .ngpio = S5PV210_GPIO_MP02_NR,
+ .label = "MP02",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP03(0),
+ .ngpio = S5PV210_GPIO_MP03_NR,
+ .label = "MP03",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP04(0),
+ .ngpio = S5PV210_GPIO_MP04_NR,
+ .label = "MP04",
+ },
+ }, {
+ .config = &gpio_cfg_noint,
+ .chip = {
+ .base = S5PV210_MP05(0),
+ .ngpio = S5PV210_GPIO_MP05_NR,
+ .label = "MP05",
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC00),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(0),
+ .chip = {
+ .base = S5PV210_GPH0(0),
+ .ngpio = S5PV210_GPIO_H0_NR,
+ .label = "GPH0",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC20),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(8),
+ .chip = {
+ .base = S5PV210_GPH1(0),
+ .ngpio = S5PV210_GPIO_H1_NR,
+ .label = "GPH1",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC40),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(16),
+ .chip = {
+ .base = S5PV210_GPH2(0),
+ .ngpio = S5PV210_GPIO_H2_NR,
+ .label = "GPH2",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ }, {
+ .base = (S5P_VA_GPIO + 0xC60),
+ .config = &gpio_cfg_noint,
+ .irq_base = IRQ_EINT(24),
+ .chip = {
+ .base = S5PV210_GPH3(0),
+ .ngpio = S5PV210_GPIO_H3_NR,
+ .label = "GPH3",
+ .to_irq = samsung_gpiolib_to_irq,
+ },
+ },
+};
+
+static __init int s5pv210_gpiolib_init(void)
+{
+ struct s3c_gpio_chip *chip = s5pv210_gpio_4bit;
+ int nr_chips = ARRAY_SIZE(s5pv210_gpio_4bit);
+ int gpioint_group = 0;
+ int i = 0;
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (chip->config == NULL) {
+ chip->config = &gpio_cfg;
+ chip->group = gpioint_group++;
+ }
+ if (chip->base == NULL)
+ chip->base = S5PV210_BANK_BASE(i);
+ }
+
+ samsung_gpiolib_add_4bit_chips(s5pv210_gpio_4bit, nr_chips);
+ s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
+
+ return 0;
+}
+core_initcall(s5pv210_gpiolib_init);
diff --git a/drivers/gpio/gpio-u300.c b/drivers/gpio/gpio-u300.c
new file mode 100644
index 000000000000..d92790140fe5
--- /dev/null
+++ b/drivers/gpio/gpio-u300.c
@@ -0,0 +1,700 @@
+/*
+ *
+ * arch/arm/mach-u300/gpio.c
+ *
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * U300 GPIO module.
+ * This can driver either of the two basic GPIO cores
+ * available in the U300 platforms:
+ * COH 901 335 - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
+ * COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
+ * Notice that you also have inline macros in <asm-arch/gpio.h>
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+/* Reference to GPIO block clock */
+static struct clk *clk;
+
+/* Memory resource */
+static struct resource *memres;
+static void __iomem *virtbase;
+static struct device *gpiodev;
+
+struct u300_gpio_port {
+ const char *name;
+ int irq;
+ int number;
+};
+
+
+static struct u300_gpio_port gpio_ports[] = {
+ {
+ .name = "gpio0",
+ .number = 0,
+ },
+ {
+ .name = "gpio1",
+ .number = 1,
+ },
+ {
+ .name = "gpio2",
+ .number = 2,
+ },
+#ifdef U300_COH901571_3
+ {
+ .name = "gpio3",
+ .number = 3,
+ },
+ {
+ .name = "gpio4",
+ .number = 4,
+ },
+#ifdef CONFIG_MACH_U300_BS335
+ {
+ .name = "gpio5",
+ .number = 5,
+ },
+ {
+ .name = "gpio6",
+ .number = 6,
+ },
+#endif
+#endif
+
+};
+
+
+#ifdef U300_COH901571_3
+
+/* Default input value */
+#define DEFAULT_OUTPUT_LOW 0
+#define DEFAULT_OUTPUT_HIGH 1
+
+/* GPIO Pull-Up status */
+#define DISABLE_PULL_UP 0
+#define ENABLE_PULL_UP 1
+
+#define GPIO_NOT_USED 0
+#define GPIO_IN 1
+#define GPIO_OUT 2
+
+struct u300_gpio_configuration_data {
+ unsigned char pin_usage;
+ unsigned char default_output_value;
+ unsigned char pull_up;
+};
+
+/* Initial configuration */
+const struct u300_gpio_configuration_data
+u300_gpio_config[U300_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
+#ifdef CONFIG_MACH_U300_BS335
+ /* Port 0, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 1, pins 0-7 */
+ {
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 2, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ },
+ /* Port 3, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 4, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 5, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 6, pind 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ }
+#endif
+
+#ifdef CONFIG_MACH_U300_BS365
+ /* Port 0, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 1, pins 0-7 */
+ {
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_HIGH, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP}
+ },
+ /* Port 2, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, DISABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ },
+ /* Port 3, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ },
+ /* Port 4, pins 0-7 */
+ {
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_IN, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ /* These 4 pins doesn't exist on DB3210 */
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP},
+ {GPIO_OUT, DEFAULT_OUTPUT_LOW, ENABLE_PULL_UP}
+ }
+#endif
+};
+#endif
+
+
+/* No users == we can power down GPIO */
+static int gpio_users;
+
+struct gpio_struct {
+ int (*callback)(void *);
+ void *data;
+ int users;
+};
+
+static struct gpio_struct gpio_pin[U300_GPIO_MAX];
+
+/*
+ * Let drivers register callback in order to get notified when there is
+ * an interrupt on the gpio pin
+ */
+int gpio_register_callback(unsigned gpio, int (*func)(void *arg), void *data)
+{
+ if (gpio_pin[gpio].callback)
+ dev_warn(gpiodev, "%s: WARNING: callback already "
+ "registered for gpio pin#%d\n", __func__, gpio);
+ gpio_pin[gpio].callback = func;
+ gpio_pin[gpio].data = data;
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_register_callback);
+
+int gpio_unregister_callback(unsigned gpio)
+{
+ if (!gpio_pin[gpio].callback)
+ dev_warn(gpiodev, "%s: WARNING: callback already "
+ "unregistered for gpio pin#%d\n", __func__, gpio);
+ gpio_pin[gpio].callback = NULL;
+ gpio_pin[gpio].data = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_unregister_callback);
+
+/* Non-zero means valid */
+int gpio_is_valid(int number)
+{
+ if (number >= 0 &&
+ number < (U300_GPIO_NUM_PORTS * U300_GPIO_PINS_PER_PORT))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(gpio_is_valid);
+
+int gpio_request(unsigned gpio, const char *label)
+{
+ if (gpio_pin[gpio].users)
+ return -EINVAL;
+ else
+ gpio_pin[gpio].users++;
+
+ gpio_users++;
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_request);
+
+void gpio_free(unsigned gpio)
+{
+ gpio_users--;
+ gpio_pin[gpio].users--;
+ if (unlikely(gpio_pin[gpio].users < 0)) {
+ dev_warn(gpiodev, "warning: gpio#%d release mismatch\n",
+ gpio);
+ gpio_pin[gpio].users = 0;
+ }
+
+ return;
+}
+EXPORT_SYMBOL(gpio_free);
+
+/* This returns zero or nonzero */
+int gpio_get_value(unsigned gpio)
+{
+ return readl(virtbase + U300_GPIO_PXPDIR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING) & (1 << (gpio & 0x07));
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+/*
+ * We hope that the compiler will optimize away the unused branch
+ * in case "value" is a constant
+ */
+void gpio_set_value(unsigned gpio, int value)
+{
+ u32 val;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (value) {
+ /* set */
+ val = readl(virtbase + U300_GPIO_PXPDOR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING)
+ & (1 << (gpio & 0x07));
+ writel(val | (1 << (gpio & 0x07)), virtbase +
+ U300_GPIO_PXPDOR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ } else {
+ /* clear */
+ val = readl(virtbase + U300_GPIO_PXPDOR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING)
+ & (1 << (gpio & 0x07));
+ writel(val & ~(1 << (gpio & 0x07)), virtbase +
+ U300_GPIO_PXPDOR +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+int gpio_direction_input(unsigned gpio)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (gpio > U300_GPIO_MAX)
+ return -EINVAL;
+
+ local_irq_save(flags);
+ val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ /* Mask out this pin*/
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1));
+ /* This is not needed since it sets the bits to zero.*/
+ /* val |= (U300_GPIO_PXPCR_PIN_MODE_INPUT << (gpio*2)); */
+ writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+ return 0;
+}
+EXPORT_SYMBOL(gpio_direction_input);
+
+int gpio_direction_output(unsigned gpio, int value)
+{
+ unsigned long flags;
+ u32 val;
+
+ if (gpio > U300_GPIO_MAX)
+ return -EINVAL;
+
+ local_irq_save(flags);
+ val = readl(virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ /* Mask out this pin */
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((gpio & 0x07) << 1));
+ /*
+ * FIXME: configure for push/pull, open drain or open source per pin
+ * in setup. The current driver will only support push/pull.
+ */
+ val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
+ << ((gpio & 0x07) << 1));
+ writel(val, virtbase + U300_GPIO_PXPCR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ gpio_set_value(gpio, value);
+ local_irq_restore(flags);
+ return 0;
+}
+EXPORT_SYMBOL(gpio_direction_output);
+
+/*
+ * Enable an IRQ, edge is rising edge (!= 0) or falling edge (==0).
+ */
+void enable_irq_on_gpio_pin(unsigned gpio, int edge)
+{
+ u32 val;
+ unsigned long flags;
+ local_irq_save(flags);
+
+ val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ val |= (1 << (gpio & 0x07));
+ writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ val = readl(virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ if (edge)
+ val |= (1 << (gpio & 0x07));
+ else
+ val &= ~(1 << (gpio & 0x07));
+ writel(val, virtbase + U300_GPIO_PXICR + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(enable_irq_on_gpio_pin);
+
+void disable_irq_on_gpio_pin(unsigned gpio)
+{
+ u32 val;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ val = readl(virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ val &= ~(1 << (gpio & 0x07));
+ writel(val, virtbase + U300_GPIO_PXIEN + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(disable_irq_on_gpio_pin);
+
+/* Enable (value == 0) or disable (value == 1) internal pullup */
+void gpio_pullup(unsigned gpio, int value)
+{
+ u32 val;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (value) {
+ val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ writel(val | (1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ } else {
+ val = readl(virtbase + U300_GPIO_PXPER + PIN_TO_PORT(gpio) *
+ U300_GPIO_PORTX_SPACING);
+ writel(val & ~(1 << (gpio & 0x07)), virtbase + U300_GPIO_PXPER +
+ PIN_TO_PORT(gpio) * U300_GPIO_PORTX_SPACING);
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(gpio_pullup);
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct u300_gpio_port *port = dev_id;
+ u32 val;
+ int pin;
+
+ /* Read event register */
+ val = readl(virtbase + U300_GPIO_PXIEV + port->number *
+ U300_GPIO_PORTX_SPACING);
+ /* Mask with enable register */
+ val &= readl(virtbase + U300_GPIO_PXIEV + port->number *
+ U300_GPIO_PORTX_SPACING);
+ /* Mask relevant bits */
+ val &= U300_GPIO_PXIEV_ALL_IRQ_EVENT_MASK;
+ /* ACK IRQ (clear event) */
+ writel(val, virtbase + U300_GPIO_PXIEV + port->number *
+ U300_GPIO_PORTX_SPACING);
+ /* Print message */
+ while (val != 0) {
+ unsigned gpio;
+
+ pin = __ffs(val);
+ /* mask off this pin */
+ val &= ~(1 << pin);
+ gpio = (port->number << 3) + pin;
+
+ if (gpio_pin[gpio].callback)
+ (void)gpio_pin[gpio].callback(gpio_pin[gpio].data);
+ else
+ dev_dbg(gpiodev, "stray GPIO IRQ on line %d\n",
+ gpio);
+ }
+ return IRQ_HANDLED;
+}
+
+static void gpio_set_initial_values(void)
+{
+#ifdef U300_COH901571_3
+ int i, j;
+ unsigned long flags;
+ u32 val;
+
+ /* Write default values to all pins */
+ for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
+ val = 0;
+ for (j = 0; j < 8; j++)
+ val |= (u32) (u300_gpio_config[i][j].default_output_value != DEFAULT_OUTPUT_LOW) << j;
+ local_irq_save(flags);
+ writel(val, virtbase + U300_GPIO_PXPDOR + i * U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+ }
+
+ /*
+ * Put all pins that are set to either 'GPIO_OUT' or 'GPIO_NOT_USED'
+ * to output and 'GPIO_IN' to input for each port. And initialize
+ * default value on outputs.
+ */
+ for (i = 0; i < U300_GPIO_NUM_PORTS; i++) {
+ for (j = 0; j < U300_GPIO_PINS_PER_PORT; j++) {
+ local_irq_save(flags);
+ val = readl(virtbase + U300_GPIO_PXPCR +
+ i * U300_GPIO_PORTX_SPACING);
+ /* Mask out this pin */
+ val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << (j << 1));
+
+ if (u300_gpio_config[i][j].pin_usage != GPIO_IN)
+ val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL << (j << 1));
+ writel(val, virtbase + U300_GPIO_PXPCR +
+ i * U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+ }
+ }
+
+ /* Enable or disable the internal pull-ups in the GPIO ASIC block */
+ for (i = 0; i < U300_GPIO_MAX; i++) {
+ val = 0;
+ for (j = 0; j < 8; j++)
+ val |= (u32)((u300_gpio_config[i][j].pull_up == DISABLE_PULL_UP) << j);
+ local_irq_save(flags);
+ writel(val, virtbase + U300_GPIO_PXPER + i * U300_GPIO_PORTX_SPACING);
+ local_irq_restore(flags);
+ }
+#endif
+}
+
+static int __init gpio_probe(struct platform_device *pdev)
+{
+ u32 val;
+ int err = 0;
+ int i;
+ int num_irqs;
+
+ gpiodev = &pdev->dev;
+ memset(gpio_pin, 0, sizeof(gpio_pin));
+
+ /* Get GPIO clock */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(gpiodev, "could not get GPIO clock\n");
+ goto err_no_clk;
+ }
+ err = clk_enable(clk);
+ if (err) {
+ dev_err(gpiodev, "could not enable GPIO clock\n");
+ goto err_no_clk_enable;
+ }
+
+ memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!memres)
+ goto err_no_resource;
+
+ if (request_mem_region(memres->start, memres->end - memres->start, "GPIO Controller")
+ == NULL) {
+ err = -ENODEV;
+ goto err_no_ioregion;
+ }
+
+ virtbase = ioremap(memres->start, resource_size(memres));
+ if (!virtbase) {
+ err = -ENOMEM;
+ goto err_no_ioremap;
+ }
+ dev_info(gpiodev, "remapped 0x%08x to %p\n",
+ memres->start, virtbase);
+
+#ifdef U300_COH901335
+ dev_info(gpiodev, "initializing GPIO Controller COH 901 335\n");
+ /* Turn on the GPIO block */
+ writel(U300_GPIO_CR_BLOCK_CLOCK_ENABLE, virtbase + U300_GPIO_CR);
+#endif
+
+#ifdef U300_COH901571_3
+ dev_info(gpiodev, "initializing GPIO Controller COH 901 571/3\n");
+ val = readl(virtbase + U300_GPIO_CR);
+ dev_info(gpiodev, "COH901571/3 block version: %d, " \
+ "number of cores: %d\n",
+ ((val & 0x0000FE00) >> 9),
+ ((val & 0x000001FC) >> 2));
+ writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE, virtbase + U300_GPIO_CR);
+#endif
+
+ gpio_set_initial_values();
+
+ for (num_irqs = 0 ; num_irqs < U300_GPIO_NUM_PORTS; num_irqs++) {
+
+ gpio_ports[num_irqs].irq =
+ platform_get_irq_byname(pdev,
+ gpio_ports[num_irqs].name);
+
+ err = request_irq(gpio_ports[num_irqs].irq,
+ gpio_irq_handler, IRQF_DISABLED,
+ gpio_ports[num_irqs].name,
+ &gpio_ports[num_irqs]);
+ if (err) {
+ dev_err(gpiodev, "cannot allocate IRQ for %s!\n",
+ gpio_ports[num_irqs].name);
+ goto err_no_irq;
+ }
+ /* Turns off PortX_irq_force */
+ writel(0x0, virtbase + U300_GPIO_PXIFR +
+ num_irqs * U300_GPIO_PORTX_SPACING);
+ }
+
+ return 0;
+
+ err_no_irq:
+ for (i = 0; i < num_irqs; i++)
+ free_irq(gpio_ports[i].irq, &gpio_ports[i]);
+ iounmap(virtbase);
+ err_no_ioremap:
+ release_mem_region(memres->start, memres->end - memres->start);
+ err_no_ioregion:
+ err_no_resource:
+ clk_disable(clk);
+ err_no_clk_enable:
+ clk_put(clk);
+ err_no_clk:
+ dev_info(gpiodev, "module ERROR:%d\n", err);
+ return err;
+}
+
+static int __exit gpio_remove(struct platform_device *pdev)
+{
+ int i;
+
+ /* Turn off the GPIO block */
+ writel(0x00000000U, virtbase + U300_GPIO_CR);
+ for (i = 0 ; i < U300_GPIO_NUM_PORTS; i++)
+ free_irq(gpio_ports[i].irq, &gpio_ports[i]);
+ iounmap(virtbase);
+ release_mem_region(memres->start, memres->end - memres->start);
+ clk_disable(clk);
+ clk_put(clk);
+ return 0;
+}
+
+static struct platform_driver gpio_driver = {
+ .driver = {
+ .name = "u300-gpio",
+ },
+ .remove = __exit_p(gpio_remove),
+};
+
+
+static int __init u300_gpio_init(void)
+{
+ return platform_driver_probe(&gpio_driver, gpio_probe);
+}
+
+static void __exit u300_gpio_exit(void)
+{
+ platform_driver_unregister(&gpio_driver);
+}
+
+arch_initcall(u300_gpio_init);
+module_exit(u300_gpio_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+
+#ifdef U300_COH901571_3
+MODULE_DESCRIPTION("ST-Ericsson AB COH 901 571/3 GPIO driver");
+#endif
+
+#ifdef U300_COH901335
+MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335 GPIO driver");
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 137a8ca67822..a971e3d043ba 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1296,7 +1296,7 @@ EXPORT_SYMBOL_GPL(gpio_request_one);
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
-int gpio_request_array(struct gpio *array, size_t num)
+int gpio_request_array(const struct gpio *array, size_t num)
{
int i, err;
@@ -1319,7 +1319,7 @@ EXPORT_SYMBOL_GPL(gpio_request_array);
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
-void gpio_free_array(struct gpio *array, size_t num)
+void gpio_free_array(const struct gpio *array, size_t num)
{
while (num--)
gpio_free((array++)->gpio);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 1b06f67e1f69..bd6571e0097a 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
/*
* Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -63,6 +64,7 @@ struct lnw_gpio {
void *reg_base;
spinlock_t lock;
unsigned irq_base;
+ struct pci_dev *pdev;
};
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
@@ -104,11 +106,18 @@ static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u32 value;
unsigned long flags;
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value &= ~BIT(offset % 32);
writel(value, gpdr);
spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -120,11 +129,19 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
unsigned long flags;
lnw_gpio_set(chip, offset, value);
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
value = readl(gpdr);
value |= BIT(offset % 32);
writel(value, gpdr);
spin_unlock_irqrestore(&lnw->lock, flags);
+
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -145,6 +162,10 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
if (gpio >= lnw->chip.ngpio)
return -EINVAL;
+
+ if (lnw->pdev)
+ pm_runtime_get(&lnw->pdev->dev);
+
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)
value = readl(grer) | BIT(gpio % 32);
@@ -159,6 +180,9 @@ static int lnw_irq_type(struct irq_data *d, unsigned type)
writel(value, gfer);
spin_unlock_irqrestore(&lnw->lock, flags);
+ if (lnw->pdev)
+ pm_runtime_put(&lnw->pdev->dev);
+
return 0;
}
@@ -211,6 +235,39 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
chip->irq_eoi(data);
}
+#ifdef CONFIG_PM
+static int lnw_gpio_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int lnw_gpio_runtime_idle(struct device *dev)
+{
+ int err = pm_schedule_suspend(dev, 500);
+
+ if (!err)
+ return 0;
+
+ return -EBUSY;
+}
+
+#else
+#define lnw_gpio_runtime_suspend NULL
+#define lnw_gpio_runtime_resume NULL
+#define lnw_gpio_runtime_idle NULL
+#endif
+
+static const struct dev_pm_ops lnw_gpio_pm_ops = {
+ .runtime_suspend = lnw_gpio_runtime_suspend,
+ .runtime_resume = lnw_gpio_runtime_resume,
+ .runtime_idle = lnw_gpio_runtime_idle,
+};
+
static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -270,6 +327,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.base = gpio_base;
lnw->chip.ngpio = id->driver_data;
lnw->chip.can_sleep = 0;
+ lnw->pdev = pdev;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
if (retval) {
@@ -285,6 +343,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
}
spin_lock_init(&lnw->lock);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
goto done;
err5:
kfree(lnw);
@@ -302,6 +364,9 @@ static struct pci_driver lnw_gpio_driver = {
.name = "langwell_gpio",
.id_table = lnw_gpio_ids,
.probe = lnw_gpio_probe,
+ .driver = {
+ .pm = &lnw_gpio_pm_ops,
+ },
};
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 78a843947d82..0451d7ac94ac 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -24,33 +24,46 @@
#include <linux/of_gpio.h>
#endif
-#define PCA953X_INPUT 0
-#define PCA953X_OUTPUT 1
-#define PCA953X_INVERT 2
-#define PCA953X_DIRECTION 3
-
-#define PCA953X_GPIOS 0x00FF
-#define PCA953X_INT 0x0100
+#define PCA953X_INPUT 0
+#define PCA953X_OUTPUT 1
+#define PCA953X_INVERT 2
+#define PCA953X_DIRECTION 3
+
+#define PCA957X_IN 0
+#define PCA957X_INVRT 1
+#define PCA957X_BKEN 2
+#define PCA957X_PUPD 3
+#define PCA957X_CFG 4
+#define PCA957X_OUT 5
+#define PCA957X_MSK 6
+#define PCA957X_INTS 7
+
+#define PCA_GPIO_MASK 0x00FF
+#define PCA_INT 0x0100
+#define PCA953X_TYPE 0x1000
+#define PCA957X_TYPE 0x2000
static const struct i2c_device_id pca953x_id[] = {
- { "pca9534", 8 | PCA953X_INT, },
- { "pca9535", 16 | PCA953X_INT, },
- { "pca9536", 4, },
- { "pca9537", 4 | PCA953X_INT, },
- { "pca9538", 8 | PCA953X_INT, },
- { "pca9539", 16 | PCA953X_INT, },
- { "pca9554", 8 | PCA953X_INT, },
- { "pca9555", 16 | PCA953X_INT, },
- { "pca9556", 8, },
- { "pca9557", 8, },
-
- { "max7310", 8, },
- { "max7312", 16 | PCA953X_INT, },
- { "max7313", 16 | PCA953X_INT, },
- { "max7315", 8 | PCA953X_INT, },
- { "pca6107", 8 | PCA953X_INT, },
- { "tca6408", 8 | PCA953X_INT, },
- { "tca6416", 16 | PCA953X_INT, },
+ { "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9536", 4 | PCA953X_TYPE, },
+ { "pca9537", 4 | PCA953X_TYPE | PCA_INT, },
+ { "pca9538", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9539", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9554", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca9555", 16 | PCA953X_TYPE | PCA_INT, },
+ { "pca9556", 8 | PCA953X_TYPE, },
+ { "pca9557", 8 | PCA953X_TYPE, },
+ { "pca9574", 8 | PCA957X_TYPE | PCA_INT, },
+ { "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
+
+ { "max7310", 8 | PCA953X_TYPE, },
+ { "max7312", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7313", 16 | PCA953X_TYPE | PCA_INT, },
+ { "max7315", 8 | PCA953X_TYPE | PCA_INT, },
+ { "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
+ { "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
/* NYET: { "tca6424", 24, }, */
{ }
};
@@ -75,16 +88,32 @@ struct pca953x_chip {
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
const char *const *names;
+ int chip_type;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
{
- int ret;
+ int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_write_byte_data(chip->client, reg, val);
- else
- ret = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+ else {
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ ret = i2c_smbus_write_word_data(chip->client,
+ reg << 1, val);
+ break;
+ case PCA957X_TYPE:
+ ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
+ val & 0xff);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (reg << 1) + 1,
+ (val & 0xff00) >> 8);
+ break;
+ }
+ }
if (ret < 0) {
dev_err(&chip->client->dev, "failed writing register\n");
@@ -116,13 +145,22 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
reg_val = chip->reg_direction | (1u << off);
- ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_DIRECTION;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_CFG;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -138,7 +176,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -149,7 +187,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
else
reg_val = chip->reg_output & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_OUTPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_OUT;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -157,7 +203,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
/* then direction */
reg_val = chip->reg_direction & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_DIRECTION;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_CFG;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -172,12 +226,20 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
mutex_lock(&chip->i2c_lock);
- ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &reg_val);
mutex_unlock(&chip->i2c_lock);
if (ret < 0) {
/* NOTE: diagnostic already emitted; that's all we should
@@ -194,7 +256,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
uint16_t reg_val;
- int ret;
+ int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -204,7 +266,15 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
else
reg_val = chip->reg_output & ~(1u << off);
- ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_OUTPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_OUT;
+ break;
+ }
+ ret = pca953x_write_reg(chip, offset, reg_val);
if (ret)
goto exit;
@@ -322,9 +392,17 @@ static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
uint16_t old_stat;
uint16_t pending;
uint16_t trigger;
- int ret;
-
- ret = pca953x_read_reg(chip, PCA953X_INPUT, &cur_stat);
+ int ret, offset = 0;
+
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &cur_stat);
if (ret)
return 0;
@@ -372,14 +450,21 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- int ret;
+ int ret, offset = 0;
if (pdata->irq_base != -1
- && (id->driver_data & PCA953X_INT)) {
+ && (id->driver_data & PCA_INT)) {
int lvl;
- ret = pca953x_read_reg(chip, PCA953X_INPUT,
- &chip->irq_stat);
+ switch (chip->chip_type) {
+ case PCA953X_TYPE:
+ offset = PCA953X_INPUT;
+ break;
+ case PCA957X_TYPE:
+ offset = PCA957X_IN;
+ break;
+ }
+ ret = pca953x_read_reg(chip, offset, &chip->irq_stat);
if (ret)
goto out_failed;
@@ -439,7 +524,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
struct i2c_client *client = chip->client;
struct pca953x_platform_data *pdata = client->dev.platform_data;
- if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT))
+ if (pdata->irq_base != -1 && (id->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
@@ -499,12 +584,65 @@ pca953x_get_alt_pdata(struct i2c_client *client)
}
#endif
+static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
+{
+ int ret;
+
+ ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
+ if (ret)
+ goto out;
+
+ ret = pca953x_read_reg(chip, PCA953X_DIRECTION,
+ &chip->reg_direction);
+ if (ret)
+ goto out;
+
+ /* set platform specific polarity inversion */
+ ret = pca953x_write_reg(chip, PCA953X_INVERT, invert);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ return ret;
+}
+
+static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
+{
+ int ret;
+ uint16_t val = 0;
+
+ /* Let every port in proper state, that could save power */
+ pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
+ pca953x_write_reg(chip, PCA957X_CFG, 0xffff);
+ pca953x_write_reg(chip, PCA957X_OUT, 0x0);
+
+ ret = pca953x_read_reg(chip, PCA957X_IN, &val);
+ if (ret)
+ goto out;
+ ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output);
+ if (ret)
+ goto out;
+ ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction);
+ if (ret)
+ goto out;
+
+ /* set platform specific polarity inversion */
+ pca953x_write_reg(chip, PCA957X_INVRT, invert);
+
+ /* To enable register 6, 7 to controll pull up and pull down */
+ pca953x_write_reg(chip, PCA957X_BKEN, 0x202);
+
+ return 0;
+out:
+ return ret;
+}
+
static int __devinit pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
- int ret;
+ int ret = 0;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
@@ -531,25 +669,20 @@ static int __devinit pca953x_probe(struct i2c_client *client,
chip->gpio_start = pdata->gpio_base;
chip->names = pdata->names;
+ chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
mutex_init(&chip->i2c_lock);
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- pca953x_setup_gpio(chip, id->driver_data & PCA953X_GPIOS);
+ pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
- ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
- if (ret)
- goto out_failed;
-
- ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &chip->reg_direction);
- if (ret)
- goto out_failed;
-
- /* set platform specific polarity inversion */
- ret = pca953x_write_reg(chip, PCA953X_INVERT, pdata->invert);
- if (ret)
+ if (chip->chip_type == PCA953X_TYPE)
+ device_pca953x_init(chip, pdata->invert);
+ else if (chip->chip_type == PCA957X_TYPE)
+ device_pca957x_init(chip, pdata->invert);
+ else
goto out_failed;
ret = pca953x_irq_setup(chip, id);
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index f970a5f3585e..36919e77c495 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -283,8 +283,10 @@ static int pch_gpio_resume(struct pci_dev *pdev)
#define pch_gpio_resume NULL
#endif
+#define PCI_VENDOR_ID_ROHM 0x10DB
static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e5123b1d341..144d27261e43 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index f3698967edf6..8755f5f3ad37 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -120,21 +120,17 @@ static void serport_ldisc_close(struct tty_struct *tty)
* 'interrupt' routine.
*/
-static unsigned int serport_ldisc_receive(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
unsigned int ch_flags;
- int ret = 0;
int i;
spin_lock_irqsave(&serport->lock, flags);
- if (!test_bit(SERPORT_ACTIVE, &serport->flags)) {
- ret = -EINVAL;
+ if (!test_bit(SERPORT_ACTIVE, &serport->flags))
goto out;
- }
for (i = 0; i < count; i++) {
switch (fp[i]) {
@@ -156,8 +152,6 @@ static unsigned int serport_ldisc_receive(struct tty_struct *tty,
out:
spin_unlock_irqrestore(&serport->lock, flags);
-
- return ret == 0 ? count : ret;
}
/*
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 1d44d470897c..86a5c4f7775e 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
* cflags buffer containing error flags for received characters (ignored)
* count number of received characters
*/
-static unsigned int
+static void
gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
struct inbuf_t *inbuf;
if (!cs)
- return -ENODEV;
+ return;
inbuf = cs->inbuf;
if (!inbuf) {
dev_err(cs->dev, "%s: no inbuf\n", __func__);
cs_put(cs);
- return -EINVAL;
+ return;
}
tail = inbuf->tail;
@@ -725,8 +725,6 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
gigaset_schedule_event(cs);
cs_put(cs);
-
- return count;
}
/*
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 76a5af00a26b..2067288f61f9 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -19,6 +19,8 @@
#define DM_MSG_PREFIX "io"
#define DM_IO_MAX_REGIONS BITS_PER_LONG
+#define MIN_IOS 16
+#define MIN_BIOS 16
struct dm_io_client {
mempool_t *pool;
@@ -41,33 +43,21 @@ struct io {
static struct kmem_cache *_dm_io_cache;
/*
- * io contexts are only dynamically allocated for asynchronous
- * io. Since async io is likely to be the majority of io we'll
- * have the same number of io contexts as bios! (FIXME: must reduce this).
- */
-
-static unsigned int pages_to_ios(unsigned int pages)
-{
- return 4 * pages; /* too many ? */
-}
-
-/*
* Create a client with mempool and bioset.
*/
-struct dm_io_client *dm_io_client_create(unsigned num_pages)
+struct dm_io_client *dm_io_client_create(void)
{
- unsigned ios = pages_to_ios(num_pages);
struct dm_io_client *client;
client = kmalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
- client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
+ client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
if (!client->pool)
goto bad;
- client->bios = bioset_create(16, 0);
+ client->bios = bioset_create(MIN_BIOS, 0);
if (!client->bios)
goto bad;
@@ -81,13 +71,6 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
}
EXPORT_SYMBOL(dm_io_client_create);
-int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
-{
- return mempool_resize(client->pool, pages_to_ios(num_pages),
- GFP_KERNEL);
-}
-EXPORT_SYMBOL(dm_io_client_resize);
-
void dm_io_client_destroy(struct dm_io_client *client)
{
mempool_destroy(client->pool);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1bb73a13ca40..819e37eaaeba 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -27,15 +27,19 @@
#include "dm.h"
+#define SUB_JOB_SIZE 128
+#define SPLIT_COUNT 8
+#define MIN_JOBS 8
+#define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
+
/*-----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
*---------------------------------------------------------------*/
struct dm_kcopyd_client {
- spinlock_t lock;
struct page_list *pages;
- unsigned int nr_pages;
- unsigned int nr_free_pages;
+ unsigned nr_reserved_pages;
+ unsigned nr_free_pages;
struct dm_io_client *io_client;
@@ -67,15 +71,18 @@ static void wake(struct dm_kcopyd_client *kc)
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
}
-static struct page_list *alloc_pl(void)
+/*
+ * Obtain one page for the use of kcopyd.
+ */
+static struct page_list *alloc_pl(gfp_t gfp)
{
struct page_list *pl;
- pl = kmalloc(sizeof(*pl), GFP_KERNEL);
+ pl = kmalloc(sizeof(*pl), gfp);
if (!pl)
return NULL;
- pl->page = alloc_page(GFP_KERNEL);
+ pl->page = alloc_page(gfp);
if (!pl->page) {
kfree(pl);
return NULL;
@@ -90,41 +97,56 @@ static void free_pl(struct page_list *pl)
kfree(pl);
}
-static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
- unsigned int nr, struct page_list **pages)
+/*
+ * Add the provided pages to a client's free page list, releasing
+ * back to the system any beyond the reserved_pages limit.
+ */
+static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
{
- struct page_list *pl;
-
- spin_lock(&kc->lock);
- if (kc->nr_free_pages < nr) {
- spin_unlock(&kc->lock);
- return -ENOMEM;
- }
-
- kc->nr_free_pages -= nr;
- for (*pages = pl = kc->pages; --nr; pl = pl->next)
- ;
+ struct page_list *next;
- kc->pages = pl->next;
- pl->next = NULL;
+ do {
+ next = pl->next;
- spin_unlock(&kc->lock);
+ if (kc->nr_free_pages >= kc->nr_reserved_pages)
+ free_pl(pl);
+ else {
+ pl->next = kc->pages;
+ kc->pages = pl;
+ kc->nr_free_pages++;
+ }
- return 0;
+ pl = next;
+ } while (pl);
}
-static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
+static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
+ unsigned int nr, struct page_list **pages)
{
- struct page_list *cursor;
+ struct page_list *pl;
+
+ *pages = NULL;
+
+ do {
+ pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY);
+ if (unlikely(!pl)) {
+ /* Use reserved pages */
+ pl = kc->pages;
+ if (unlikely(!pl))
+ goto out_of_memory;
+ kc->pages = pl->next;
+ kc->nr_free_pages--;
+ }
+ pl->next = *pages;
+ *pages = pl;
+ } while (--nr);
- spin_lock(&kc->lock);
- for (cursor = pl; cursor->next; cursor = cursor->next)
- kc->nr_free_pages++;
+ return 0;
- kc->nr_free_pages++;
- cursor->next = kc->pages;
- kc->pages = pl;
- spin_unlock(&kc->lock);
+out_of_memory:
+ if (*pages)
+ kcopyd_put_pages(kc, *pages);
+ return -ENOMEM;
}
/*
@@ -141,13 +163,16 @@ static void drop_pages(struct page_list *pl)
}
}
-static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
+/*
+ * Allocate and reserve nr_pages for the use of a specific client.
+ */
+static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
{
- unsigned int i;
+ unsigned i;
struct page_list *pl = NULL, *next;
- for (i = 0; i < nr; i++) {
- next = alloc_pl();
+ for (i = 0; i < nr_pages; i++) {
+ next = alloc_pl(GFP_KERNEL);
if (!next) {
if (pl)
drop_pages(pl);
@@ -157,17 +182,18 @@ static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
pl = next;
}
+ kc->nr_reserved_pages += nr_pages;
kcopyd_put_pages(kc, pl);
- kc->nr_pages += nr;
+
return 0;
}
static void client_free_pages(struct dm_kcopyd_client *kc)
{
- BUG_ON(kc->nr_free_pages != kc->nr_pages);
+ BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
drop_pages(kc->pages);
kc->pages = NULL;
- kc->nr_free_pages = kc->nr_pages = 0;
+ kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
/*-----------------------------------------------------------------
@@ -216,16 +242,17 @@ struct kcopyd_job {
struct mutex lock;
atomic_t sub_jobs;
sector_t progress;
-};
-/* FIXME: this should scale with the number of pages */
-#define MIN_JOBS 512
+ struct kcopyd_job *master_job;
+};
static struct kmem_cache *_job_cache;
int __init dm_kcopyd_init(void)
{
- _job_cache = KMEM_CACHE(kcopyd_job, 0);
+ _job_cache = kmem_cache_create("kcopyd_job",
+ sizeof(struct kcopyd_job) * (SPLIT_COUNT + 1),
+ __alignof__(struct kcopyd_job), 0, NULL);
if (!_job_cache)
return -ENOMEM;
@@ -299,7 +326,12 @@ static int run_complete_job(struct kcopyd_job *job)
if (job->pages)
kcopyd_put_pages(kc, job->pages);
- mempool_free(job, kc->job_pool);
+ /*
+ * If this is the master job, the sub jobs have already
+ * completed so we can free everything.
+ */
+ if (job->master_job == job)
+ mempool_free(job, kc->job_pool);
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
@@ -460,14 +492,14 @@ static void dispatch_job(struct kcopyd_job *job)
wake(kc);
}
-#define SUB_JOB_SIZE 128
static void segment_complete(int read_err, unsigned long write_err,
void *context)
{
/* FIXME: tidy this function */
sector_t progress = 0;
sector_t count = 0;
- struct kcopyd_job *job = (struct kcopyd_job *) context;
+ struct kcopyd_job *sub_job = (struct kcopyd_job *) context;
+ struct kcopyd_job *job = sub_job->master_job;
struct dm_kcopyd_client *kc = job->kc;
mutex_lock(&job->lock);
@@ -498,8 +530,6 @@ static void segment_complete(int read_err, unsigned long write_err,
if (count) {
int i;
- struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
- GFP_NOIO);
*sub_job = *job;
sub_job->source.sector += progress;
@@ -511,7 +541,7 @@ static void segment_complete(int read_err, unsigned long write_err,
}
sub_job->fn = segment_complete;
- sub_job->context = job;
+ sub_job->context = sub_job;
dispatch_job(sub_job);
} else if (atomic_dec_and_test(&job->sub_jobs)) {
@@ -531,19 +561,19 @@ static void segment_complete(int read_err, unsigned long write_err,
}
/*
- * Create some little jobs that will do the move between
- * them.
+ * Create some sub jobs to share the work between them.
*/
-#define SPLIT_COUNT 8
-static void split_job(struct kcopyd_job *job)
+static void split_job(struct kcopyd_job *master_job)
{
int i;
- atomic_inc(&job->kc->nr_jobs);
+ atomic_inc(&master_job->kc->nr_jobs);
- atomic_set(&job->sub_jobs, SPLIT_COUNT);
- for (i = 0; i < SPLIT_COUNT; i++)
- segment_complete(0, 0u, job);
+ atomic_set(&master_job->sub_jobs, SPLIT_COUNT);
+ for (i = 0; i < SPLIT_COUNT; i++) {
+ master_job[i + 1].master_job = master_job;
+ segment_complete(0, 0u, &master_job[i + 1]);
+ }
}
int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
@@ -553,7 +583,8 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
struct kcopyd_job *job;
/*
- * Allocate a new job.
+ * Allocate an array of jobs consisting of one master job
+ * followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
@@ -577,10 +608,10 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->fn = fn;
job->context = context;
+ job->master_job = job;
- if (job->source.count < SUB_JOB_SIZE)
+ if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
-
else {
mutex_init(&job->lock);
job->progress = 0;
@@ -606,17 +637,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
/*-----------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
-int dm_kcopyd_client_create(unsigned int nr_pages,
- struct dm_kcopyd_client **result)
+struct dm_kcopyd_client *dm_kcopyd_client_create(void)
{
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
if (!kc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- spin_lock_init(&kc->lock);
spin_lock_init(&kc->job_lock);
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
@@ -633,12 +662,12 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
goto bad_workqueue;
kc->pages = NULL;
- kc->nr_pages = kc->nr_free_pages = 0;
- r = client_alloc_pages(kc, nr_pages);
+ kc->nr_reserved_pages = kc->nr_free_pages = 0;
+ r = client_reserve_pages(kc, RESERVE_PAGES);
if (r)
goto bad_client_pages;
- kc->io_client = dm_io_client_create(nr_pages);
+ kc->io_client = dm_io_client_create();
if (IS_ERR(kc->io_client)) {
r = PTR_ERR(kc->io_client);
goto bad_io_client;
@@ -647,8 +676,7 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
init_waitqueue_head(&kc->destroyq);
atomic_set(&kc->nr_jobs, 0);
- *result = kc;
- return 0;
+ return kc;
bad_io_client:
client_free_pages(kc);
@@ -659,7 +687,7 @@ bad_workqueue:
bad_slab:
kfree(kc);
- return r;
+ return ERR_PTR(r);
}
EXPORT_SYMBOL(dm_kcopyd_client_create);
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index a1f321889676..948e3f4925bf 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -449,8 +449,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
lc->io_req.mem.type = DM_IO_VMA;
lc->io_req.notify.fn = NULL;
- lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
- PAGE_SIZE));
+ lc->io_req.client = dm_io_client_create();
if (IS_ERR(lc->io_req.client)) {
r = PTR_ERR(lc->io_req.client);
DMWARN("couldn't allocate disk io client");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index a550a057d991..aa4e570c2cb5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1290,7 +1290,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP || error == -EREMOTEIO)
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
return error;
if (mpio->pgpath)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 976ad4688afc..9bfd057be686 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -22,8 +22,6 @@
#define DM_MSG_PREFIX "raid1"
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
-#define DM_IO_PAGES 64
-#define DM_KCOPYD_PAGES 64
#define DM_RAID1_HANDLE_ERRORS 0x01
#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -887,7 +885,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
return NULL;
}
- ms->io_client = dm_io_client_create(DM_IO_PAGES);
+ ms->io_client = dm_io_client_create();
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
mempool_destroy(ms->read_record_pool);
@@ -1117,9 +1115,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
- if (r)
+ ms->kcopyd_client = dm_kcopyd_client_create();
+ if (IS_ERR(ms->kcopyd_client)) {
+ r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
+ }
wakeup_mirrord(ms);
return 0;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 95891dfcbca0..135c2f1fdbfc 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -154,11 +154,6 @@ struct pstore {
struct workqueue_struct *metadata_wq;
};
-static unsigned sectors_to_pages(unsigned sectors)
-{
- return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
-}
-
static int alloc_area(struct pstore *ps)
{
int r = -ENOMEM;
@@ -318,8 +313,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
chunk_size_supplied = 0;
}
- ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
- chunk_size));
+ ps->io_client = dm_io_client_create();
if (IS_ERR(ps->io_client))
return PTR_ERR(ps->io_client);
@@ -368,11 +362,6 @@ static int read_header(struct pstore *ps, int *new_snapshot)
return r;
}
- r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
- ps->io_client);
- if (r)
- return r;
-
r = alloc_area(ps);
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a2d330942cb2..9ecff5f3023a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,11 +40,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
#define SNAPSHOT_COPY_PRIORITY 2
/*
- * Reserve 1MB for each snapshot initially (with minimum of 1 page).
- */
-#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
-
-/*
* The size of the mempool used to track chunks in use.
*/
#define MIN_IOS 256
@@ -1116,8 +1111,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
- if (r) {
+ s->kcopyd_client = dm_kcopyd_client_create();
+ if (IS_ERR(s->kcopyd_client)) {
+ r = PTR_ERR(s->kcopyd_client);
ti->error = "Could not create kcopyd client";
goto bad_kcopyd;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index cb8380c9767f..451c3bb176d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -362,6 +362,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
+ struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
sector_t dev_size =
@@ -370,6 +371,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
+ /*
+ * Some devices exist without request functions,
+ * such as loop devices not yet bound to backing files.
+ * Forbid the use of such devices.
+ */
+ q = bdev_get_queue(bdev);
+ if (!q || !q->make_request_fn) {
+ DMWARN("%s: %s is not yet initialised: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdevname(bdev, b),
+ (unsigned long long)start,
+ (unsigned long long)len,
+ (unsigned long long)dev_size);
+ return 1;
+ }
+
if (!dev_size)
return 0;
@@ -1346,7 +1363,8 @@ bool dm_table_supports_discards(struct dm_table *t)
return 0;
/*
- * Ensure that at least one underlying device supports discards.
+ * Unless any target used by the table set discards_supported,
+ * require at least one underlying device to support discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting discard must provide.
@@ -1354,6 +1372,9 @@ bool dm_table_supports_discards(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
+ if (ti->discards_supported)
+ return 1;
+
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_discard_capable, NULL))
return 1;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b6c267724e14..0f09c057e796 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -721,7 +721,7 @@ config MFD_PM8XXX_IRQ
config MFD_TPS65910
bool "TPS65910 Power Management chip"
- depends on I2C=y
+ depends on I2C=y && GPIOLIB
select MFD_CORE
select GPIO_TPS65910
help
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e63782107e2f..02a15d7cb3b0 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2005,7 +2005,8 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
static struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
- .mfd_data = &db8500_regulators,
+ .platform_data = &db8500_regulators,
+ .pdata_size = sizeof(db8500_regulators),
},
{
.name = "cpufreq-u8500",
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index b0c56313dbbb..8cebec5e85ee 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -304,7 +304,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
return 1;
}
/* Readjust the instruction pointer if needed */
- instruction_pointer_set(&kgdbts_regs, ip + offset);
+ ip += offset;
+#ifdef GDB_ADJUSTS_BREAK_OFFSET
+ instruction_pointer_set(&kgdbts_regs, ip);
+#endif
return 0;
}
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 1a05fe08e2cb..f91f82eabda7 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty)
pr_debug("%s: done ", __func__);
}
-static unsigned int st_tty_receive(struct tty_struct *tty,
- const unsigned char *data, char *tty_flags, int count)
+static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
+ char *tty_flags, int count)
{
#ifdef VERBOSE
print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@@ -761,8 +761,6 @@ static unsigned int st_tty_receive(struct tty_struct *tty,
*/
st_recv(tty->disc_data, data, count);
pr_debug("done %s", __func__);
-
- return count;
}
/* wake-up function called in from the TTY layer
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 73c7e03617ec..3df0c0f8b8bf 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
#endif
-static unsigned int ldisc_receive(struct tty_struct *tty,
- const u8 *data, char *flags, int count)
+static void ldisc_receive(struct tty_struct *tty, const u8 *data,
+ char *flags, int count)
{
struct sk_buff *skb = NULL;
struct ser_device *ser;
@@ -215,8 +215,6 @@ static unsigned int ldisc_receive(struct tty_struct *tty,
} else
++ser->dev->stats.rx_dropped;
update_tty_status(ser);
-
- return count;
}
static int handle_tx(struct ser_device *ser)
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 75622d54581f..1b49df6b2470 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -425,17 +425,16 @@ static void slc_setup(struct net_device *dev)
* in parallel
*/
-static unsigned int slcan_receive_buf(struct tty_struct *tty,
+static void slcan_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
- return -ENODEV;
+ return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -444,8 +443,6 @@ static unsigned int slcan_receive_buf(struct tty_struct *tty,
}
slcan_unesc(sl, *cp++);
}
-
- return count;
}
/************************************
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 992089639ea4..3e5d0b6b6516 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -456,7 +456,7 @@ out:
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
-static unsigned int sixpack_receive_buf(struct tty_struct *tty,
+static void sixpack_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct sixpack *sp;
@@ -464,11 +464,11 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
int count1;
if (!count)
- return 0;
+ return;
sp = sp_get(tty);
if (!sp)
- return -ENODEV;
+ return;
memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
@@ -487,8 +487,6 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
sp_put(sp);
tty_unthrottle(tty);
-
- return count1;
}
/*
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 0e4f23531140..4c628393c8b1 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -923,14 +923,13 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
* a block of data has been received, which can now be decapsulated
* and sent on to the AX.25 layer for further processing.
*/
-static unsigned int mkiss_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct mkiss *ax = mkiss_get(tty);
- int bytes = count;
if (!ax)
- return -ENODEV;
+ return;
/*
* Argh! mtu change time! - costs us the packet part received
@@ -940,7 +939,7 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
ax_changedmtu(ax);
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp != NULL && *fp++) {
if (!test_and_set_bit(AXF_ERROR, &ax->flags))
ax->dev->stats.rx_errors++;
@@ -953,8 +952,6 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
mkiss_put(ax);
tty_unthrottle(tty);
-
- return count;
}
/*
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 035861d8acb1..3352b2443e58 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
* usbserial: urb-complete-interrupt / softint
*/
-static unsigned int irtty_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct sir_dev *dev;
struct sirtty_cb *priv = tty->disc_data;
int i;
- IRDA_ASSERT(priv != NULL, return -ENODEV;);
- IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;);
+ IRDA_ASSERT(priv != NULL, return;);
+ IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
if (unlikely(count==0)) /* yes, this happens */
- return 0;
+ return;
dev = priv->dev;
if (!dev) {
IRDA_WARNING("%s(), not ready yet!\n", __func__);
- return -ENODEV;
+ return;
}
for (i = 0; i < count; i++) {
@@ -242,13 +242,11 @@ static unsigned int irtty_receive_buf(struct tty_struct *tty,
if (fp && *fp++) {
IRDA_DEBUG(0, "Framing or parity error!\n");
sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
- return -EINVAL;
+ return;
}
}
sirdev_receive(dev, cp, count);
-
- return count;
}
/*
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 53872d7d7382..a1b82c9c67d2 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
unsigned long flags;
if (!ap)
- return -ENODEV;
+ return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_async_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -356,8 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
tasklet_schedule(&ap->tsk);
ap_put(ap);
tty_unthrottle(tty);
-
- return count;
}
static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 0815790a5cf9..2573f525f11c 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
-static unsigned int
+static void
ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
unsigned long flags;
if (!ap)
- return -ENODEV;
+ return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_sync_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
@@ -397,8 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
tasklet_schedule(&ap->tsk);
sp_put(ap);
tty_unthrottle(tty);
-
- return count;
}
static void
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 584809c656d5..8ec1a9a0bb9a 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -670,17 +670,16 @@ static void sl_setup(struct net_device *dev)
* in parallel
*/
-static unsigned int slip_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct slip *sl = tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
- return -ENODEV;
+ return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -694,8 +693,6 @@ static unsigned int slip_receive_buf(struct tty_struct *tty,
#endif
slip_unesc(sl, *cp++);
}
-
- return count;
}
/************************************
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0cb0b0632672..f6853247a620 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -609,7 +609,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
- if (unlikely(!virtqueue_enable_cb(vi->svq))) {
+ if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 40398bf7d036..24297b274cd4 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -517,18 +517,17 @@ static int x25_asy_close(struct net_device *dev)
* and sent on to some IP layer for further processing.
*/
-static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
+static void x25_asy_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct x25_asy *sl = tty->disc_data;
- int bytes = count;
if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
return;
/* Read the characters out of the buffer */
- while (bytes--) {
+ while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@@ -537,8 +536,6 @@ static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
}
x25_asy_unesc(sl, *cp++);
}
-
- return count;
}
/*
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 4e70749f8d16..a8d5bb3cba89 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -11,7 +11,7 @@
#define EVENT_BUFFER_H
#include <linux/types.h>
-#include <asm/mutex.h>
+#include <linux/mutex.h>
int alloc_event_buffer(void);
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index f9bda64fcd1b..dccd8636095c 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -14,7 +14,7 @@
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/time.h>
-#include <asm/mutex.h>
+#include <linux/mutex.h>
#include "oprof.h"
#include "event_buffer.h"
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 12e02bf92c4a..3dc9befa5aec 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -698,12 +698,7 @@ int __init detect_intel_iommu(void)
{
#ifdef CONFIG_INTR_REMAP
struct acpi_table_dmar *dmar;
- /*
- * for now we will disable dma-remapping when interrupt
- * remapping is enabled.
- * When support for queued invalidation for IOTLB invalidation
- * is added, we will not need this any more.
- */
+
dmar = (struct acpi_table_dmar *) dmar_tbl;
if (ret && cpu_has_x2apic && dmar->flags & 0x1)
printk(KERN_INFO
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 6af6b628175b..59f17acf7f68 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -47,6 +47,8 @@
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
+#define IS_BRIDGE_HOST_DEVICE(pdev) \
+ ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@@ -116,6 +118,11 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
return (pfn + level_size(level) - 1) & level_mask(level);
}
+static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
+{
+ return 1 << ((lvl - 1) * LEVEL_STRIDE);
+}
+
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
@@ -143,6 +150,12 @@ static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
+ * set to 1 to panic kernel if can't successfully enable VT-d
+ * (used when kernel is launched w/ TXT)
+ */
+static int force_on = 0;
+
+/*
* 0: Present
* 1-11: Reserved
* 12-63: Context Ptr (12 - (haw-1))
@@ -338,6 +351,9 @@ struct dmar_domain {
int iommu_coherency;/* indicate coherency of iommu access */
int iommu_snooping; /* indicate snooping control feature*/
int iommu_count; /* reference count of iommu */
+ int iommu_superpage;/* Level of superpages supported:
+ 0 == 4KiB (no superpages), 1 == 2MiB,
+ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
};
@@ -387,6 +403,7 @@ int dmar_disabled = 1;
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
+static int intel_iommu_superpage = 1;
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
@@ -417,6 +434,10 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable batched IOTLB flush\n");
intel_iommu_strict = 1;
+ } else if (!strncmp(str, "sp_off", 6)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: disable supported super page\n");
+ intel_iommu_superpage = 0;
}
str += strcspn(str, ",");
@@ -555,11 +576,32 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
}
}
+static void domain_update_iommu_superpage(struct dmar_domain *domain)
+{
+ int i, mask = 0xf;
+
+ if (!intel_iommu_superpage) {
+ domain->iommu_superpage = 0;
+ return;
+ }
+
+ domain->iommu_superpage = 4; /* 1TiB */
+
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+ mask |= cap_super_page_val(g_iommus[i]->cap);
+ if (!mask) {
+ break;
+ }
+ }
+ domain->iommu_superpage = fls(mask);
+}
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain_update_iommu_snooping(domain);
+ domain_update_iommu_superpage(domain);
}
static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
@@ -689,23 +731,31 @@ out:
}
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn)
+ unsigned long pfn, int large_level)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
- int offset;
+ int offset, target_level;
BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
+ /* Search pte */
+ if (!large_level)
+ target_level = 1;
+ else
+ target_level = large_level;
+
while (level > 0) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
- if (level == 1)
+ if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
+ break;
+ if (level == target_level)
break;
if (!dma_pte_present(pte)) {
@@ -733,10 +783,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return pte;
}
+
/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
- int level)
+ int level, int *large_page)
{
struct dma_pte *parent, *pte = NULL;
int total = agaw_to_level(domain->agaw);
@@ -749,8 +800,16 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
if (level == total)
return pte;
- if (!dma_pte_present(pte))
+ if (!dma_pte_present(pte)) {
+ *large_page = total;
break;
+ }
+
+ if (pte->val & DMA_PTE_LARGE_PAGE) {
+ *large_page = total;
+ return pte;
+ }
+
parent = phys_to_virt(dma_pte_addr(pte));
total--;
}
@@ -763,6 +822,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long last_pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+ unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
@@ -771,14 +831,15 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
/* we don't need lock here; nobody else touches the iova range */
do {
- first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
+ large_page = 1;
+ first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
if (!pte) {
- start_pfn = align_to_level(start_pfn + 1, 2);
+ start_pfn = align_to_level(start_pfn + 1, large_page + 1);
continue;
}
- do {
+ do {
dma_clear_pte(pte);
- start_pfn++;
+ start_pfn += lvl_to_nr_pages(large_page);
pte++;
} while (start_pfn <= last_pfn && !first_pte_in_page(pte));
@@ -798,6 +859,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
int total = agaw_to_level(domain->agaw);
int level;
unsigned long tmp;
+ int large_page = 2;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -813,7 +875,10 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
return;
do {
- first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
+ large_page = level;
+ first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
+ if (large_page > level)
+ level = large_page + 1;
if (!pte) {
tmp = align_to_level(tmp + 1, level + 1);
continue;
@@ -1397,6 +1462,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
else
domain->iommu_snooping = 0;
+ domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
domain->iommu_count = 1;
domain->nid = iommu->node;
@@ -1417,6 +1483,10 @@ static void domain_exit(struct dmar_domain *domain)
if (!domain)
return;
+ /* Flush any lazy unmaps that may reference this domain */
+ if (!intel_iommu_strict)
+ flush_unmaps_timeout(0);
+
domain_remove_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
@@ -1648,6 +1718,34 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr,
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
+/* Return largest possible superpage level for a given mapping */
+static inline int hardware_largepage_caps(struct dmar_domain *domain,
+ unsigned long iov_pfn,
+ unsigned long phy_pfn,
+ unsigned long pages)
+{
+ int support, level = 1;
+ unsigned long pfnmerge;
+
+ support = domain->iommu_superpage;
+
+ /* To use a large page, the virtual *and* physical addresses
+ must be aligned to 2MiB/1GiB/etc. Lower bits set in either
+ of them will mean we have to use smaller pages. So just
+ merge them and check both at once. */
+ pfnmerge = iov_pfn | phy_pfn;
+
+ while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
+ pages >>= VTD_STRIDE_SHIFT;
+ if (!pages)
+ break;
+ pfnmerge >>= VTD_STRIDE_SHIFT;
+ level++;
+ support--;
+ }
+ return level;
+}
+
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
@@ -1656,6 +1754,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
phys_addr_t uninitialized_var(pteval);
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned long sg_res;
+ unsigned int largepage_lvl = 0;
+ unsigned long lvl_pages = 0;
BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
@@ -1671,7 +1771,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
- while (nr_pages--) {
+ while (nr_pages > 0) {
uint64_t tmp;
if (!sg_res) {
@@ -1679,11 +1779,21 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
pteval = page_to_phys(sg_page(sg)) | prot;
+ phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
+
if (!pte) {
- first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
+ largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
+
+ first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
if (!pte)
return -ENOMEM;
+ /* It is large page*/
+ if (largepage_lvl > 1)
+ pteval |= DMA_PTE_LARGE_PAGE;
+ else
+ pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+
}
/* We don't need lock here, nobody else
* touches the iova range
@@ -1699,16 +1809,38 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
WARN_ON(1);
}
+
+ lvl_pages = lvl_to_nr_pages(largepage_lvl);
+
+ BUG_ON(nr_pages < lvl_pages);
+ BUG_ON(sg_res < lvl_pages);
+
+ nr_pages -= lvl_pages;
+ iov_pfn += lvl_pages;
+ phys_pfn += lvl_pages;
+ pteval += lvl_pages * VTD_PAGE_SIZE;
+ sg_res -= lvl_pages;
+
+ /* If the next PTE would be the first in a new page, then we
+ need to flush the cache on the entries we've just written.
+ And then we'll need to recalculate 'pte', so clear it and
+ let it get set again in the if (!pte) block above.
+
+ If we're done (!nr_pages) we need to flush the cache too.
+
+ Also if we've been setting superpages, we may need to
+ recalculate 'pte' and switch back to smaller pages for the
+ end of the mapping, if the trailing size is not enough to
+ use another superpage (i.e. sg_res < lvl_pages). */
pte++;
- if (!nr_pages || first_pte_in_page(pte)) {
+ if (!nr_pages || first_pte_in_page(pte) ||
+ (largepage_lvl > 1 && sg_res < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
- iov_pfn++;
- pteval += VTD_PAGE_SIZE;
- sg_res--;
- if (!sg_res)
+
+ if (!sg_res && nr_pages)
sg = sg_next(sg);
}
return 0;
@@ -2016,7 +2148,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return 0;
return iommu_prepare_identity_map(pdev, rmrr->base_address,
- rmrr->end_address + 1);
+ rmrr->end_address);
}
#ifdef CONFIG_DMAR_FLOPPY_WA
@@ -2030,7 +2162,7 @@ static inline void iommu_prepare_isa(void)
return;
printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
+ ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
if (ret)
printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
@@ -2106,10 +2238,10 @@ static int identity_mapping(struct pci_dev *pdev)
if (likely(!iommu_identity_mapping))
return 0;
+ info = pdev->dev.archdata.iommu;
+ if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+ return (info->domain == si_domain);
- list_for_each_entry(info, &si_domain->devices, link)
- if (info->dev == pdev)
- return 1;
return 0;
}
@@ -2187,8 +2319,19 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
* Assume that they will -- if they turn out not to be, then we can
* take them out of the 1:1 domain later.
*/
- if (!startup)
- return pdev->dma_mask > DMA_BIT_MASK(32);
+ if (!startup) {
+ /*
+ * If the device's dma_mask is less than the system's memory
+ * size then this is not a candidate for identity mapping.
+ */
+ u64 dma_mask = pdev->dma_mask;
+
+ if (pdev->dev.coherent_dma_mask &&
+ pdev->dev.coherent_dma_mask < dma_mask)
+ dma_mask = pdev->dev.coherent_dma_mask;
+
+ return dma_mask >= dma_get_required_mask(&pdev->dev);
+ }
return 1;
}
@@ -2203,6 +2346,9 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return -EFAULT;
for_each_pci_dev(pdev) {
+ /* Skip Host/PCI Bridge devices */
+ if (IS_BRIDGE_HOST_DEVICE(pdev))
+ continue;
if (iommu_should_identity_map(pdev, 1)) {
printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
hw ? "hardware" : "software", pci_name(pdev));
@@ -2218,7 +2364,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
-static int __init init_dmars(int force_on)
+static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
@@ -2592,8 +2738,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
- pdev->dma_mask);
+ iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
if (!iova)
goto error;
@@ -3118,7 +3263,17 @@ static int init_iommu_hw(void)
if (iommu->qi)
dmar_reenable_qi(iommu);
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
+ if (drhd->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on
+ * this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(iommu);
+ continue;
+ }
+
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
@@ -3127,7 +3282,8 @@ static int init_iommu_hw(void)
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
+ if (iommu_enable_translation(iommu))
+ return 1;
iommu_disable_protect_mem_regions(iommu);
}
@@ -3194,7 +3350,10 @@ static void iommu_resume(void)
unsigned long flag;
if (init_iommu_hw()) {
- WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
+ if (force_on)
+ panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
+ else
+ WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
return;
}
@@ -3271,7 +3430,6 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
int ret = 0;
- int force_on = 0;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@@ -3309,7 +3467,7 @@ int __init intel_iommu_init(void)
init_no_remapping_devices();
- ret = init_dmars(force_on);
+ ret = init_dmars();
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
@@ -3380,8 +3538,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_safe(entry, tmp, &domain->devices) {
info = list_entry(entry, struct device_domain_info, link);
- /* No need to compare PCI domain; it has to be the same */
- if (info->bus == pdev->bus->number &&
+ if (info->segment == pci_domain_nr(pdev->bus) &&
+ info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
list_del(&info->link);
list_del(&info->global);
@@ -3419,10 +3577,13 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
- spin_lock_irqsave(&iommu->lock, tmp_flags);
- clear_bit(domain->id, iommu->domain_ids);
- iommu->domains[domain->id] = NULL;
- spin_unlock_irqrestore(&iommu->lock, tmp_flags);
+ if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
+ spin_lock_irqsave(&iommu->lock, tmp_flags);
+ clear_bit(domain->id, iommu->domain_ids);
+ iommu->domains[domain->id] = NULL;
+ spin_unlock_irqrestore(&iommu->lock, tmp_flags);
+ }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3505,6 +3666,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_count = 0;
domain->iommu_coherency = 0;
domain->iommu_snooping = 0;
+ domain->iommu_superpage = 0;
domain->max_addr = 0;
domain->nid = -1;
@@ -3720,7 +3882,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
struct dma_pte *pte;
u64 phys = 0;
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
if (pte)
phys = dma_pte_addr(pte);
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 9606e599a475..c5c274ab5c5a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -63,8 +63,16 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
curr = iovad->cached32_node;
cached_iova = container_of(curr, struct iova, node);
- if (free->pfn_lo >= cached_iova->pfn_lo)
- iovad->cached32_node = rb_next(&free->node);
+ if (free->pfn_lo >= cached_iova->pfn_lo) {
+ struct rb_node *node = rb_next(&free->node);
+ struct iova *iova = container_of(node, struct iova, node);
+
+ /* only cache if it's below 32bit pfn */
+ if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
+ iovad->cached32_node = node;
+ else
+ iovad->cached32_node = NULL;
+ }
}
/* Computes the padding size required, to make the
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c3b18e78cee..d36f41ea8cbf 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -195,6 +195,8 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_D2;
case ACPI_STATE_D3:
return PCI_D3hot;
+ case ACPI_STATE_D3_COLD:
+ return PCI_D3cold;
}
return PCI_POWER_ERROR;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5cb999b50f95..45e0191c35dd 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -39,7 +39,7 @@ config ACER_WMI
config ACERHDF
tristate "Acer Aspire One temperature and fan driver"
- depends on THERMAL && THERMAL_HWMON && ACPI
+ depends on THERMAL && ACPI
---help---
This is a driver for Acer Aspire One netbooks. It allows to access
the temperature sensor and to control the fan.
@@ -760,4 +760,13 @@ config MXM_WMI
MXM is a standard for laptop graphics cards, the WMI interface
is required for switchable nvidia graphics machines
+config INTEL_OAKTRAIL
+ tristate "Intel Oaktrail Platform Extras"
+ depends on ACPI
+ depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI
+ ---help---
+ Intel Oaktrail platform need this driver to provide interfaces to
+ enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
+ here; it will only load on supported platforms.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index a7ab3bc7b3a1..afc1f832aa67 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -41,5 +41,6 @@ obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
obj-$(CONFIG_XO15_EBOOK) += xo15-ebook.o
obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
-obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
+obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
+obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ac4e7f83ce6c..005417bd429e 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -98,13 +98,26 @@ enum acer_wmi_event_ids {
static const struct key_entry acer_wmi_keymap[] = {
{KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
{KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_IGNORE, 0x41, {KEY_MUTE} },
+ {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
+ {KE_IGNORE, 0x43, {KEY_NEXTSONG} },
+ {KE_IGNORE, 0x44, {KEY_PLAYPAUSE} },
+ {KE_IGNORE, 0x45, {KEY_STOP} },
+ {KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
+ {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
+ {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
+ {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
+ {KE_IGNORE, 0x81, {KEY_SLEEP} },
{KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
+ {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
{KE_END, 0}
};
@@ -122,6 +135,7 @@ struct event_return_value {
*/
#define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */
#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
+#define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */
#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
struct lm_input_params {
@@ -737,8 +751,11 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
obj = (union acpi_object *) result.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
tmp = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ tmp = (u32) obj->integer.value;
} else {
tmp = 0;
}
@@ -866,8 +883,11 @@ static acpi_status WMID_set_capabilities(void)
obj = (union acpi_object *) out.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
} else {
kfree(out.pointer);
return AE_ERROR;
@@ -876,7 +896,8 @@ static acpi_status WMID_set_capabilities(void)
dmi_walk(type_aa_dmi_decode, NULL);
if (!has_type_aa) {
interface->capability |= ACER_CAP_WIRELESS;
- interface->capability |= ACER_CAP_THREEG;
+ if (devices & 0x40)
+ interface->capability |= ACER_CAP_THREEG;
if (devices & 0x10)
interface->capability |= ACER_CAP_BLUETOOTH;
}
@@ -961,10 +982,12 @@ static void __init acer_commandline_init(void)
* These will all fail silently if the value given is invalid, or the
* capability isn't available on the given interface
*/
- set_u32(mailled, ACER_CAP_MAILLED);
- if (!has_type_aa)
+ if (mailled >= 0)
+ set_u32(mailled, ACER_CAP_MAILLED);
+ if (!has_type_aa && threeg >= 0)
set_u32(threeg, ACER_CAP_THREEG);
- set_u32(brightness, ACER_CAP_BRIGHTNESS);
+ if (brightness >= 0)
+ set_u32(brightness, ACER_CAP_BRIGHTNESS);
}
/*
@@ -1081,7 +1104,7 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
return AE_ERROR;
}
if (obj->buffer.length != 8) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return AE_ERROR;
}
@@ -1090,8 +1113,8 @@ static acpi_status wmid3_get_device_status(u32 *value, u16 device)
kfree(obj);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Get Device Status failed: "
- "0x%x - 0x%x\n", return_value.error_code,
+ pr_warn("Get Device Status failed: 0x%x - 0x%x\n",
+ return_value.error_code,
return_value.ec_return_value);
else
*value = !!(return_value.devices & device);
@@ -1124,6 +1147,114 @@ static acpi_status get_device_status(u32 *value, u32 cap)
}
}
+static acpi_status wmid3_set_device_status(u32 value, u16 device)
+{
+ struct wmid3_gds_return_value return_value;
+ acpi_status status;
+ union acpi_object *obj;
+ u16 devices;
+ struct wmid3_gds_input_param params = {
+ .function_num = 0x1,
+ .hotkey_number = 0x01,
+ .devices = ACER_WMID3_GDS_WIRELESS &
+ ACER_WMID3_GDS_THREEG &
+ ACER_WMID3_GDS_WIMAX &
+ ACER_WMID3_GDS_BLUETOOTH,
+ };
+ struct acpi_buffer input = {
+ sizeof(struct wmid3_gds_input_param),
+ &params
+ };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 8) {
+ pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value) {
+ pr_warning("Get Current Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+ return status;
+ }
+
+ devices = return_value.devices;
+ params.function_num = 0x2;
+ params.hotkey_number = 0x01;
+ params.devices = (value) ? (devices | device) : (devices & ~device);
+
+ status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output2);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = output2.pointer;
+
+ if (!obj)
+ return AE_ERROR;
+ else if (obj->type != ACPI_TYPE_BUFFER) {
+ kfree(obj);
+ return AE_ERROR;
+ }
+ if (obj->buffer.length != 4) {
+ pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ kfree(obj);
+ return AE_ERROR;
+ }
+
+ return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+ kfree(obj);
+
+ if (return_value.error_code || return_value.ec_return_value)
+ pr_warning("Set Device Status failed: "
+ "0x%x - 0x%x\n", return_value.error_code,
+ return_value.ec_return_value);
+
+ return status;
+}
+
+static acpi_status set_device_status(u32 value, u32 cap)
+{
+ if (wmi_has_guid(WMID_GUID3)) {
+ u16 device;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ device = ACER_WMID3_GDS_WIRELESS;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ device = ACER_WMID3_GDS_BLUETOOTH;
+ break;
+ case ACER_CAP_THREEG:
+ device = ACER_WMID3_GDS_THREEG;
+ break;
+ default:
+ return AE_ERROR;
+ }
+ return wmid3_set_device_status(value, device);
+
+ } else {
+ return set_u32(value, cap);
+ }
+}
+
/*
* Rfkill devices
*/
@@ -1160,7 +1291,7 @@ static int acer_rfkill_set(void *data, bool blocked)
u32 cap = (unsigned long)data;
if (rfkill_inited) {
- status = set_u32(!blocked, cap);
+ status = set_device_status(!blocked, cap);
if (ACPI_FAILURE(status))
return -ENODEV;
}
@@ -1317,7 +1448,7 @@ static void acer_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- pr_warning("bad event status 0x%x\n", status);
+ pr_warn("bad event status 0x%x\n", status);
return;
}
@@ -1326,12 +1457,12 @@ static void acer_wmi_notify(u32 value, void *context)
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
- pr_warning("Unknown response received %d\n", obj->type);
+ pr_warn("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
if (obj->buffer.length != 8) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
@@ -1343,7 +1474,7 @@ static void acer_wmi_notify(u32 value, void *context)
case WMID_HOTKEY_EVENT:
if (return_value.device_state) {
u16 device_state = return_value.device_state;
- pr_debug("deivces states: 0x%x\n", device_state);
+ pr_debug("device state: 0x%x\n", device_state);
if (has_cap(ACER_CAP_WIRELESS))
rfkill_set_sw_state(wireless_rfkill,
!(device_state & ACER_WMID3_GDS_WIRELESS));
@@ -1356,11 +1487,11 @@ static void acer_wmi_notify(u32 value, void *context)
}
if (!sparse_keymap_report_event(acer_wmi_input_dev,
return_value.key_num, 1, true))
- pr_warning("Unknown key number - 0x%x\n",
+ pr_warn("Unknown key number - 0x%x\n",
return_value.key_num);
break;
default:
- pr_warning("Unknown function number - %d - %d\n",
+ pr_warn("Unknown function number - %d - %d\n",
return_value.function, return_value.key_num);
break;
}
@@ -1389,7 +1520,7 @@ wmid3_set_lm_mode(struct lm_input_params *params,
return AE_ERROR;
}
if (obj->buffer.length != 4) {
- pr_warning("Unknown buffer length %d\n", obj->buffer.length);
+ pr_warn("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return AE_ERROR;
}
@@ -1414,11 +1545,11 @@ static int acer_wmi_enable_ec_raw(void)
status = wmid3_set_lm_mode(&params, &return_value);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Enabling EC raw mode failed: "
- "0x%x - 0x%x\n", return_value.error_code,
- return_value.ec_return_value);
+ pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
else
- pr_info("Enabled EC raw mode");
+ pr_info("Enabled EC raw mode\n");
return status;
}
@@ -1437,9 +1568,9 @@ static int acer_wmi_enable_lm(void)
status = wmid3_set_lm_mode(&params, &return_value);
if (return_value.error_code || return_value.ec_return_value)
- pr_warning("Enabling Launch Manager failed: "
- "0x%x - 0x%x\n", return_value.error_code,
- return_value.ec_return_value);
+ pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n",
+ return_value.error_code,
+ return_value.ec_return_value);
return status;
}
@@ -1506,8 +1637,11 @@ static u32 get_wmid_devices(void)
obj = (union acpi_object *) out.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER &&
- obj->buffer.length == sizeof(u32)) {
+ (obj->buffer.length == sizeof(u32) ||
+ obj->buffer.length == sizeof(u64))) {
devices = *((u32 *) obj->buffer.pointer);
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ devices = (u32) obj->integer.value;
}
kfree(out.pointer);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 60f9cfcac93f..fca3489218b7 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -35,10 +35,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/fs.h>
#include <linux/dmi.h>
-#include <acpi/acpi_drivers.h>
-#include <linux/sched.h>
+#include <linux/acpi.h>
#include <linux/thermal.h>
#include <linux/platform_device.h>
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index c53b3ff7978a..d65df92e2acc 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -318,7 +318,7 @@ static int acpi_check_handle(acpi_handle handle, const char *method,
if (status != AE_OK) {
if (ret)
- pr_warning("Error finding %s\n", method);
+ pr_warn("Error finding %s\n", method);
return -ENODEV;
}
return 0;
@@ -383,7 +383,7 @@ static int asus_kled_lvl(struct asus_laptop *asus)
rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
&params, &kblv);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading kled level\n");
+ pr_warn("Error reading kled level\n");
return -ENODEV;
}
return kblv;
@@ -397,7 +397,7 @@ static int asus_kled_set(struct asus_laptop *asus, int kblv)
kblv = 0;
if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
- pr_warning("Keyboard LED display write failed\n");
+ pr_warn("Keyboard LED display write failed\n");
return -EINVAL;
}
return 0;
@@ -531,7 +531,7 @@ static int asus_read_brightness(struct backlight_device *bd)
rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
NULL, &value);
if (ACPI_FAILURE(rv))
- pr_warning("Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
return value;
}
@@ -541,7 +541,7 @@ static int asus_set_brightness(struct backlight_device *bd, int value)
struct asus_laptop *asus = bl_get_data(bd);
if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
- pr_warning("Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
return -EIO;
}
return 0;
@@ -730,7 +730,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
rv = parse_arg(buf, count, &value);
if (rv > 0) {
if (write_acpi_int(asus->handle, METHOD_LEDD, value)) {
- pr_warning("LED display write failed\n");
+ pr_warn("LED display write failed\n");
return -ENODEV;
}
asus->ledd_status = (u32) value;
@@ -752,7 +752,7 @@ static int asus_wireless_status(struct asus_laptop *asus, int mask)
rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
NULL, &status);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading Wireless status\n");
+ pr_warn("Error reading Wireless status\n");
return -EINVAL;
}
return !!(status & mask);
@@ -764,7 +764,7 @@ static int asus_wireless_status(struct asus_laptop *asus, int mask)
static int asus_wlan_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
- pr_warning("Error setting wlan status to %d", status);
+ pr_warn("Error setting wlan status to %d\n", status);
return -EIO;
}
return 0;
@@ -792,7 +792,7 @@ static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
static int asus_bluetooth_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
- pr_warning("Error setting bluetooth status to %d", status);
+ pr_warn("Error setting bluetooth status to %d\n", status);
return -EIO;
}
return 0;
@@ -821,7 +821,7 @@ static ssize_t store_bluetooth(struct device *dev,
static int asus_wimax_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) {
- pr_warning("Error setting wimax status to %d", status);
+ pr_warn("Error setting wimax status to %d\n", status);
return -EIO;
}
return 0;
@@ -850,7 +850,7 @@ static ssize_t store_wimax(struct device *dev,
static int asus_wwan_set(struct asus_laptop *asus, int status)
{
if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) {
- pr_warning("Error setting wwan status to %d", status);
+ pr_warn("Error setting wwan status to %d\n", status);
return -EIO;
}
return 0;
@@ -880,7 +880,7 @@ static void asus_set_display(struct asus_laptop *asus, int value)
{
/* no sanity check needed for now */
if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
- pr_warning("Error setting display\n");
+ pr_warn("Error setting display\n");
return;
}
@@ -909,7 +909,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
static void asus_als_switch(struct asus_laptop *asus, int value)
{
if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
- pr_warning("Error setting light sensor switch\n");
+ pr_warn("Error setting light sensor switch\n");
asus->light_switch = value;
}
@@ -937,7 +937,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
static void asus_als_level(struct asus_laptop *asus, int value)
{
if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
- pr_warning("Error setting light sensor level\n");
+ pr_warn("Error setting light sensor level\n");
asus->light_level = value;
}
@@ -976,7 +976,7 @@ static int asus_gps_status(struct asus_laptop *asus)
rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
NULL, &status);
if (ACPI_FAILURE(rv)) {
- pr_warning("Error reading GPS status\n");
+ pr_warn("Error reading GPS status\n");
return -ENODEV;
}
return !!status;
@@ -1284,7 +1284,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
if (ACPI_FAILURE(status))
- pr_warning("Couldn't get the DSDT table header\n");
+ pr_warn("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
@@ -1296,7 +1296,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
status =
acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
- pr_warning("Error calling BSTS\n");
+ pr_warn("Error calling BSTS\n");
else if (bsts_result)
pr_notice("BSTS called, 0x%02x returned\n",
(uint) bsts_result);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 832a3fd7c1c8..00460cb9587b 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -425,7 +425,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
if (asus->hotplug_slot) {
bus = pci_find_bus(0, 1);
if (!bus) {
- pr_warning("Unable to find PCI bus 1?\n");
+ pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
@@ -436,12 +436,12 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
absent = (l == 0xffffffff);
if (blocked != absent) {
- pr_warning("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
- blocked ? "blocked" : "unblocked",
- absent ? "absent" : "present");
- pr_warning("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ pr_warn("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warn("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
goto out_unlock;
}
@@ -500,7 +500,7 @@ static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
ACPI_SYSTEM_NOTIFY,
asus_rfkill_notify, asus);
if (ACPI_FAILURE(status))
- pr_warning("Failed to register notify on %s\n", node);
+ pr_warn("Failed to register notify on %s\n", node);
} else
return -ENODEV;
@@ -1223,7 +1223,7 @@ static int asus_wmi_sysfs_init(struct platform_device *device)
/*
* Platform device
*/
-static int __init asus_wmi_platform_init(struct asus_wmi *asus)
+static int asus_wmi_platform_init(struct asus_wmi *asus)
{
int rv;
@@ -1583,12 +1583,12 @@ static int asus_wmi_probe(struct platform_device *pdev)
int ret;
if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
- pr_warning("Management GUID not found\n");
+ pr_warn("Management GUID not found\n");
return -ENODEV;
}
if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
- pr_warning("Event GUID not found\n");
+ pr_warn("Event GUID not found\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index f503607c0645..d9312b3073e5 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -30,6 +30,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -581,8 +583,7 @@ static int read_led(const char *ledname, int ledmask)
if (read_acpi_int(NULL, ledname, &led_status))
return led_status;
else
- printk(KERN_WARNING "Asus ACPI: Error reading LED "
- "status\n");
+ pr_warn("Error reading LED status\n");
}
return (hotk->status & ledmask) ? 1 : 0;
}
@@ -621,8 +622,7 @@ write_led(const char __user *buffer, unsigned long count,
led_out = !led_out;
if (!write_acpi_int(hotk->handle, ledname, led_out, NULL))
- printk(KERN_WARNING "Asus ACPI: LED (%s) write failed\n",
- ledname);
+ pr_warn("LED (%s) write failed\n", ledname);
return rv;
}
@@ -679,8 +679,7 @@ static ssize_t ledd_proc_write(struct file *file, const char __user *buffer,
if (rv > 0) {
if (!write_acpi_int
(hotk->handle, hotk->methods->mt_ledd, value, NULL))
- printk(KERN_WARNING
- "Asus ACPI: LED display write failed\n");
+ pr_warn("LED display write failed\n");
else
hotk->ledd_status = (u32) value;
}
@@ -838,8 +837,7 @@ static int get_lcd_state(void)
} else {
/* We don't have to check anything if we are here */
if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
- printk(KERN_WARNING
- "Asus ACPI: Error reading LCD status\n");
+ pr_warn("Error reading LCD status\n");
if (hotk->model == L2D)
lcd = ~lcd;
@@ -871,7 +869,7 @@ static int set_lcd_state(int value)
the exact behaviour is simulated here */
}
if (ACPI_FAILURE(status))
- printk(KERN_WARNING "Asus ACPI: Error switching LCD\n");
+ pr_warn("Error switching LCD\n");
}
return 0;
@@ -915,13 +913,11 @@ static int read_brightness(struct backlight_device *bd)
if (hotk->methods->brightness_get) { /* SPLV/GPLV laptop */
if (!read_acpi_int(hotk->handle, hotk->methods->brightness_get,
&value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
} else if (hotk->methods->brightness_status) { /* For D1 for example */
if (!read_acpi_int(NULL, hotk->methods->brightness_status,
&value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading brightness\n");
+ pr_warn("Error reading brightness\n");
} else /* No GPLV method */
value = hotk->brightness;
return value;
@@ -939,8 +935,7 @@ static int set_brightness(int value)
if (hotk->methods->brightness_set) {
if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
value, NULL)) {
- printk(KERN_WARNING
- "Asus ACPI: Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
ret = -EIO;
}
goto out;
@@ -955,8 +950,7 @@ static int set_brightness(int value)
NULL, NULL);
(value > 0) ? value-- : value++;
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING
- "Asus ACPI: Error changing brightness\n");
+ pr_warn("Error changing brightness\n");
ret = -EIO;
}
}
@@ -1008,7 +1002,7 @@ static void set_display(int value)
/* no sanity check needed for now */
if (!write_acpi_int(hotk->handle, hotk->methods->display_set,
value, NULL))
- printk(KERN_WARNING "Asus ACPI: Error setting display\n");
+ pr_warn("Error setting display\n");
return;
}
@@ -1021,8 +1015,7 @@ static int disp_proc_show(struct seq_file *m, void *v)
int value = 0;
if (!read_acpi_int(hotk->handle, hotk->methods->display_get, &value))
- printk(KERN_WARNING
- "Asus ACPI: Error reading display status\n");
+ pr_warn("Error reading display status\n");
value &= 0x07; /* needed for some models, shouldn't hurt others */
seq_printf(m, "%d\n", value);
return 0;
@@ -1068,7 +1061,7 @@ asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
proc = proc_create_data(name, mode, acpi_device_dir(device),
proc_fops, acpi_driver_data(device));
if (!proc) {
- printk(KERN_WARNING " Unable to create %s fs entry\n", name);
+ pr_warn(" Unable to create %s fs entry\n", name);
return -1;
}
proc->uid = asus_uid;
@@ -1085,8 +1078,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
- printk(KERN_WARNING " asus_uid and asus_gid parameters are "
- "deprecated, use chown and chmod instead!\n");
+ pr_warn(" asus_uid and asus_gid parameters are "
+ "deprecated, use chown and chmod instead!\n");
}
acpi_device_dir(device) = asus_proc_dir;
@@ -1099,8 +1092,7 @@ static int asus_hotk_add_fs(struct acpi_device *device)
proc->uid = asus_uid;
proc->gid = asus_gid;
} else {
- printk(KERN_WARNING " Unable to create " PROC_INFO
- " fs entry\n");
+ pr_warn(" Unable to create " PROC_INFO " fs entry\n");
}
if (hotk->methods->mt_wled) {
@@ -1283,20 +1275,19 @@ static int asus_hotk_get_info(void)
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Couldn't get the DSDT table header\n");
+ pr_warn(" Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
- printk(KERN_ERR " Hotkey initialization failed\n");
+ pr_err(" Hotkey initialization failed\n");
return -ENODEV;
}
/* This needs to be called for some laptops to init properly */
if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result))
- printk(KERN_WARNING " Error calling BSTS\n");
+ pr_warn(" Error calling BSTS\n");
else if (bsts_result)
- printk(KERN_NOTICE " BSTS called, 0x%02x returned\n",
- bsts_result);
+ pr_notice(" BSTS called, 0x%02x returned\n", bsts_result);
/*
* Try to match the object returned by INIT to the specific model.
@@ -1324,23 +1315,21 @@ static int asus_hotk_get_info(void)
if (asus_info &&
strncmp(asus_info->oem_table_id, "ODEM", 4) == 0) {
hotk->model = P30;
- printk(KERN_NOTICE
- " Samsung P30 detected, supported\n");
+ pr_notice(" Samsung P30 detected, supported\n");
hotk->methods = &model_conf[hotk->model];
kfree(model);
return 0;
} else {
hotk->model = M2E;
- printk(KERN_NOTICE " unsupported model %s, trying "
- "default values\n", string);
- printk(KERN_NOTICE
- " send /proc/acpi/dsdt to the developers\n");
+ pr_notice(" unsupported model %s, trying default values\n",
+ string);
+ pr_notice(" send /proc/acpi/dsdt to the developers\n");
kfree(model);
return -ENODEV;
}
}
hotk->methods = &model_conf[hotk->model];
- printk(KERN_NOTICE " %s model detected, supported\n", string);
+ pr_notice(" %s model detected, supported\n", string);
/* Sort of per-model blacklist */
if (strncmp(string, "L2B", 3) == 0)
@@ -1385,7 +1374,7 @@ static int asus_hotk_check(void)
if (hotk->device->status.present) {
result = asus_hotk_get_info();
} else {
- printk(KERN_ERR " Hotkey device not present, aborting\n");
+ pr_err(" Hotkey device not present, aborting\n");
return -EINVAL;
}
@@ -1399,8 +1388,7 @@ static int asus_hotk_add(struct acpi_device *device)
acpi_status status = AE_OK;
int result;
- printk(KERN_NOTICE "Asus Laptop ACPI Extras version %s\n",
- ASUS_ACPI_VERSION);
+ pr_notice("Asus Laptop ACPI Extras version %s\n", ASUS_ACPI_VERSION);
hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
if (!hotk)
@@ -1428,15 +1416,14 @@ static int asus_hotk_add(struct acpi_device *device)
acpi_evaluate_object(NULL, hotk->methods->brightness_down,
NULL, NULL);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Error changing brightness\n");
+ pr_warn(" Error changing brightness\n");
else {
status =
acpi_evaluate_object(NULL,
hotk->methods->brightness_up,
NULL, NULL);
if (ACPI_FAILURE(status))
- printk(KERN_WARNING " Strange, error changing"
- " brightness\n");
+ pr_warn(" Strange, error changing brightness\n");
}
}
@@ -1488,7 +1475,7 @@ static int __init asus_acpi_init(void)
asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir);
if (!asus_proc_dir) {
- printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n");
+ pr_err("Unable to create /proc entry\n");
acpi_bus_unregister_driver(&asus_hotk_driver);
return -ENODEV;
}
@@ -1513,7 +1500,7 @@ static int __init asus_acpi_init(void)
&asus_backlight_data,
&props);
if (IS_ERR(asus_backlight_device)) {
- printk(KERN_ERR "Could not register asus backlight device\n");
+ pr_err("Could not register asus backlight device\n");
asus_backlight_device = NULL;
asus_acpi_exit();
return -ENODEV;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index c16a27641ced..3f204fde1b02 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -68,6 +68,8 @@
* only enabled on a JHL90 board until it is verified that they work on the
* other boards too. See the extra_features variable. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -200,8 +202,8 @@ static bool extra_features;
* watching the output of address 0x4F (do an ec_transaction writing 0x33
* into 0x4F and read a few bytes from the output, like so:
* u8 writeData = 0x33;
- * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0);
- * That address is labelled "fan1 table information" in the service manual.
+ * ec_transaction(0x4F, &writeData, 1, buffer, 32);
+ * That address is labeled "fan1 table information" in the service manual.
* It should be clear which value in 'buffer' changes). This seems to be
* related to fan speed. It isn't a proper 'realtime' fan speed value
* though, because physically stopping or speeding up the fan doesn't
@@ -286,7 +288,7 @@ static int get_backlight_level(void)
static void set_backlight_state(bool on)
{
u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA;
- ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0, 0);
+ ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0);
}
@@ -294,24 +296,24 @@ static void set_backlight_state(bool on)
static void pwm_enable_control(void)
{
unsigned char writeData = PWM_ENABLE_DATA;
- ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0, 0);
+ ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0);
}
static void pwm_disable_control(void)
{
unsigned char writeData = PWM_DISABLE_DATA;
- ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0, 0);
+ ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0);
}
static void set_pwm(int pwm)
{
- ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0, 0);
+ ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0);
}
static int get_fan_rpm(void)
{
u8 value, data = FAN_DATA;
- ec_transaction(FAN_ADDRESS, &data, 1, &value, 1, 0);
+ ec_transaction(FAN_ADDRESS, &data, 1, &value, 1);
return 100 * (int)value;
}
@@ -760,16 +762,14 @@ static struct rfkill *bt_rfkill;
static int dmi_check_cb(const struct dmi_system_id *id)
{
- printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s'\n",
- id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
extra_features = false;
return 1;
}
static int dmi_check_cb_extra(const struct dmi_system_id *id)
{
- printk(KERN_INFO DRIVER_NAME": Identified laptop model '%s', "
- "enabling extra features\n",
+ pr_info("Identified laptop model '%s', enabling extra features\n",
id->ident);
extra_features = true;
return 1;
@@ -956,14 +956,12 @@ static int __init compal_init(void)
int ret;
if (acpi_disabled) {
- printk(KERN_ERR DRIVER_NAME": ACPI needs to be enabled for "
- "this driver to work!\n");
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
return -ENODEV;
}
if (!force && !dmi_check_system(compal_dmi_table)) {
- printk(KERN_ERR DRIVER_NAME": Motherboard not recognized (You "
- "could try the module's force-parameter)");
+ pr_err("Motherboard not recognized (You could try the module's force-parameter)\n");
return -ENODEV;
}
@@ -998,8 +996,7 @@ static int __init compal_init(void)
if (ret)
goto err_rfkill;
- printk(KERN_INFO DRIVER_NAME": Driver "DRIVER_VERSION
- " successfully loaded\n");
+ pr_info("Driver " DRIVER_VERSION " successfully loaded\n");
return 0;
err_rfkill:
@@ -1064,7 +1061,7 @@ static void __exit compal_cleanup(void)
rfkill_destroy(wifi_rfkill);
rfkill_destroy(bt_rfkill);
- printk(KERN_INFO DRIVER_NAME": Driver unloaded\n");
+ pr_info("Driver unloaded\n");
}
static int __devexit compal_remove(struct platform_device *pdev)
@@ -1074,8 +1071,7 @@ static int __devexit compal_remove(struct platform_device *pdev)
if (!extra_features)
return 0;
- printk(KERN_INFO DRIVER_NAME": Unloading: resetting fan control "
- "to motherboard\n");
+ pr_info("Unloading: resetting fan control to motherboard\n");
pwm_disable_control();
data = platform_get_drvdata(pdev);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index de301aa8e5c3..d3841de6a8cf 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -434,8 +436,7 @@ static int __init dell_setup_rfkill(void)
int ret;
if (dmi_check_system(dell_blacklist)) {
- printk(KERN_INFO "dell-laptop: Blacklisted hardware detected - "
- "not enabling rfkill\n");
+ pr_info("Blacklisted hardware detected - not enabling rfkill\n");
return 0;
}
@@ -606,7 +607,7 @@ static int __init dell_init(void)
dmi_walk(find_tokens, NULL);
if (!da_tokens) {
- printk(KERN_INFO "dell-laptop: Unable to find dmi tokens\n");
+ pr_info("Unable to find dmi tokens\n");
return -ENODEV;
}
@@ -636,14 +637,13 @@ static int __init dell_init(void)
ret = dell_setup_rfkill();
if (ret) {
- printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n");
+ pr_warn("Unable to setup rfkill\n");
goto fail_rfkill;
}
ret = i8042_install_filter(dell_laptop_i8042_filter);
if (ret) {
- printk(KERN_WARNING
- "dell-laptop: Unable to install key filter\n");
+ pr_warn("Unable to install key filter\n");
goto fail_filter;
}
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index 0ed84573ae1f..3f945457f71c 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -15,6 +15,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
@@ -138,7 +139,7 @@ static int __init dell_wmi_aio_init(void)
guid = dell_wmi_aio_find();
if (!guid) {
- pr_warning("No known WMI GUID found\n");
+ pr_warn("No known WMI GUID found\n");
return -ENXIO;
}
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 77f1d55414c6..ce790827e199 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -23,6 +23,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -141,7 +143,7 @@ static void dell_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -153,8 +155,8 @@ static void dell_wmi_notify(u32 value, void *context)
u16 *buffer_entry = (u16 *)obj->buffer.pointer;
if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
- printk(KERN_INFO "dell-wmi: Received unknown WMI event"
- " (0x%x)\n", buffer_entry[1]);
+ pr_info("Received unknown WMI event (0x%x)\n",
+ buffer_entry[1]);
kfree(obj);
return;
}
@@ -167,8 +169,7 @@ static void dell_wmi_notify(u32 value, void *context)
key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
reported_key);
if (!key) {
- printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
- reported_key);
+ pr_info("Unknown key %x pressed\n", reported_key);
} else if ((key->keycode == KEY_BRIGHTNESSUP ||
key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
/* Don't report brightness notifications that will also
@@ -275,7 +276,7 @@ static int __init dell_wmi_init(void)
acpi_status status;
if (!wmi_has_guid(DELL_EVENT_GUID)) {
- printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
+ pr_warn("No known WMI GUID found\n");
return -ENODEV;
}
@@ -290,9 +291,7 @@ static int __init dell_wmi_init(void)
dell_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
dell_wmi_input_destroy();
- printk(KERN_ERR
- "dell-wmi: Unable to register notify handler - %d\n",
- status);
+ pr_err("Unable to register notify handler - %d\n", status);
return -ENODEV;
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 2c1abf63957f..1c45d92e2163 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -228,7 +228,7 @@ static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
return -ENODEV;
if (write_acpi_int(eeepc->handle, method, value))
- pr_warning("Error writing %s\n", method);
+ pr_warn("Error writing %s\n", method);
return 0;
}
@@ -243,7 +243,7 @@ static int get_acpi(struct eeepc_laptop *eeepc, int cm)
return -ENODEV;
if (read_acpi_int(eeepc->handle, method, &value))
- pr_warning("Error reading %s\n", method);
+ pr_warn("Error reading %s\n", method);
return value;
}
@@ -261,7 +261,7 @@ static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
status = acpi_get_handle(eeepc->handle, (char *)method,
handle);
if (status != AE_OK) {
- pr_warning("Error finding %s\n", method);
+ pr_warn("Error finding %s\n", method);
return -ENODEV;
}
return 0;
@@ -417,7 +417,7 @@ static ssize_t store_cpufv_disabled(struct device *dev,
switch (value) {
case 0:
if (eeepc->cpufv_disabled)
- pr_warning("cpufv enabled (not officially supported "
+ pr_warn("cpufv enabled (not officially supported "
"on this model)\n");
eeepc->cpufv_disabled = false;
return rv;
@@ -609,7 +609,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
bus = port->subordinate;
if (!bus) {
- pr_warning("Unable to find PCI bus?\n");
+ pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
@@ -621,12 +621,12 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
absent = (l == 0xffffffff);
if (blocked != absent) {
- pr_warning("BIOS says wireless lan is %s, "
- "but the pci device is %s\n",
+ pr_warn("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
blocked ? "blocked" : "unblocked",
absent ? "absent" : "present");
- pr_warning("skipped wireless hotplug as probably "
- "inappropriate for this model\n");
+ pr_warn("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
goto out_unlock;
}
@@ -691,7 +691,8 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
eeepc_rfkill_notify,
eeepc);
if (ACPI_FAILURE(status))
- pr_warning("Failed to register notify on %s\n", node);
+ pr_warn("Failed to register notify on %s\n", node);
+
/*
* Refresh pci hotplug in case the rfkill state was
* changed during setup.
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 649dcadd8ea3..4aa867a9b88b 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -84,7 +84,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
void *context, void **retval)
{
- pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID);
+ pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
*(bool *)context = true;
return AE_CTRL_TERMINATE;
}
@@ -105,12 +105,12 @@ static int eeepc_wmi_check_atkd(void)
static int eeepc_wmi_probe(struct platform_device *pdev)
{
if (eeepc_wmi_check_atkd()) {
- pr_warning("WMI device present, but legacy ATKD device is also "
- "present and enabled.");
- pr_warning("You probably booted with acpi_osi=\"Linux\" or "
- "acpi_osi=\"!Windows 2009\"");
- pr_warning("Can't load eeepc-wmi, use default acpi_osi "
- "(preferred) or eeepc-laptop");
+ pr_warn("WMI device present, but legacy ATKD device is also "
+ "present and enabled\n");
+ pr_warn("You probably booted with acpi_osi=\"Linux\" or "
+ "acpi_osi=\"!Windows 2009\"\n");
+ pr_warn("Can't load eeepc-wmi, use default acpi_osi "
+ "(preferred) or eeepc-laptop\n");
return -EBUSY;
}
return 0;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 493054c2dbe1..6b26666b37f2 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -56,6 +56,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -585,8 +587,7 @@ static struct platform_driver fujitsupf_driver = {
static void dmi_check_cb_common(const struct dmi_system_id *id)
{
acpi_handle handle;
- printk(KERN_INFO "fujitsu-laptop: Identified laptop model '%s'.\n",
- id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
if (use_alt_lcd_levels == -1) {
if (ACPI_SUCCESS(acpi_get_handle(NULL,
"\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
@@ -691,11 +692,11 @@ static int acpi_fujitsu_add(struct acpi_device *device)
result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
if (result) {
- printk(KERN_ERR "Error reading power state\n");
+ pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
- printk(KERN_INFO "ACPI: %s [%s] (%s)\n",
+ pr_info("ACPI: %s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
@@ -707,7 +708,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (ACPI_FAILURE
(acpi_evaluate_object
(device->handle, METHOD_NAME__INI, NULL, NULL)))
- printk(KERN_ERR "_INI Method failed\n");
+ pr_err("_INI Method failed\n");
}
/* do config (detect defaults) */
@@ -827,7 +828,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
GFP_KERNEL);
if (error) {
- printk(KERN_ERR "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
goto err_stop;
}
@@ -859,13 +860,13 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
if (result) {
- printk(KERN_ERR "Error reading power state\n");
+ pr_err("Error reading power state\n");
goto err_unregister_input_dev;
}
- printk(KERN_INFO "ACPI: %s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
- !device->power.state ? "on" : "off");
+ pr_info("ACPI: %s [%s] (%s)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+ !device->power.state ? "on" : "off");
fujitsu_hotkey->dev = device;
@@ -875,7 +876,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (ACPI_FAILURE
(acpi_evaluate_object
(device->handle, METHOD_NAME__INI, NULL, NULL)))
- printk(KERN_ERR "_INI Method failed\n");
+ pr_err("_INI Method failed\n");
}
i = 0;
@@ -897,8 +898,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
/* Suspect this is a keymap of the application panel, print it */
- printk(KERN_INFO "fujitsu-laptop: BTNI: [0x%x]\n",
- call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
+ pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
@@ -907,8 +907,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (result == 0) {
fujitsu_hotkey->logolamp_registered = 1;
} else {
- printk(KERN_ERR "fujitsu-laptop: Could not register "
- "LED handler for logo lamp, error %i\n", result);
+ pr_err("Could not register LED handler for logo lamp, error %i\n",
+ result);
}
}
@@ -919,8 +919,8 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (result == 0) {
fujitsu_hotkey->kblamps_registered = 1;
} else {
- printk(KERN_ERR "fujitsu-laptop: Could not register "
- "LED handler for keyboard lamps, error %i\n", result);
+ pr_err("Could not register LED handler for keyboard lamps, error %i\n",
+ result);
}
}
#endif
@@ -1169,8 +1169,7 @@ static int __init fujitsu_init(void)
fujitsu->bl_device->props.power = 0;
}
- printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION
- " successfully loaded.\n");
+ pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
return 0;
@@ -1216,7 +1215,7 @@ static void __exit fujitsu_cleanup(void)
kfree(fujitsu);
- printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(fujitsu_init);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 067bf36d32f3..5a34973dc164 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -26,6 +26,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
@@ -238,7 +240,7 @@ static int hdaps_device_init(void)
__check_latch(0x1611, 0x01))
goto out;
- printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x).\n",
+ printk(KERN_DEBUG "hdaps: initial latch check good (0x%02x)\n",
__get_latch(0x1611));
outb(0x17, 0x1610);
@@ -299,7 +301,7 @@ static int hdaps_probe(struct platform_device *dev)
if (ret)
return ret;
- printk(KERN_INFO "hdaps: device successfully initialized.\n");
+ pr_info("device successfully initialized\n");
return 0;
}
@@ -480,7 +482,7 @@ static struct attribute_group hdaps_attribute_group = {
/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
static int __init hdaps_dmi_match(const struct dmi_system_id *id)
{
- printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
+ pr_info("%s detected\n", id->ident);
return 1;
}
@@ -488,8 +490,7 @@ static int __init hdaps_dmi_match(const struct dmi_system_id *id)
static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
{
hdaps_invert = (unsigned long)id->driver_data;
- printk(KERN_INFO "hdaps: inverting axis (%u) readings.\n",
- hdaps_invert);
+ pr_info("inverting axis (%u) readings\n", hdaps_invert);
return hdaps_dmi_match(id);
}
@@ -543,7 +544,7 @@ static int __init hdaps_init(void)
int ret;
if (!dmi_check_system(hdaps_whitelist)) {
- printk(KERN_WARNING "hdaps: supported laptop not found!\n");
+ pr_warn("supported laptop not found!\n");
ret = -ENODEV;
goto out;
}
@@ -595,7 +596,7 @@ static int __init hdaps_init(void)
if (ret)
goto out_idev;
- printk(KERN_INFO "hdaps: driver successfully loaded.\n");
+ pr_info("driver successfully loaded\n");
return 0;
out_idev:
@@ -609,7 +610,7 @@ out_driver:
out_region:
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
out:
- printk(KERN_WARNING "hdaps: driver init failed (ret=%d)!\n", ret);
+ pr_warn("driver init failed (ret=%d)!\n", ret);
return ret;
}
@@ -622,7 +623,7 @@ static void __exit hdaps_exit(void)
platform_driver_unregister(&hdaps_driver);
release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
- printk(KERN_INFO "hdaps: driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(hdaps_init);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1bc4a7539ba9..f94017bcdd6e 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -24,6 +24,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -54,9 +56,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
-#define PREFIX "HP WMI: "
-#define UNIMP "Unimplemented "
-
enum hp_wmi_radio {
HPWMI_WIFI = 0,
HPWMI_BLUETOOTH = 1,
@@ -228,9 +227,8 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
if (bios_return->return_code) {
if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
- printk(KERN_WARNING PREFIX "query 0x%x returned "
- "error 0x%x\n",
- query, bios_return->return_code);
+ pr_warn("query 0x%x returned error 0x%x\n",
+ query, bios_return->return_code);
kfree(obj);
return bios_return->return_code;
}
@@ -384,8 +382,7 @@ static int hp_wmi_rfkill2_refresh(void)
if (num >= state.count ||
devstate->rfkill_id != rfkill2[i].id) {
- printk(KERN_WARNING PREFIX "power configuration of "
- "the wireless devices unexpectedly changed\n");
+ pr_warn("power configuration of the wireless devices unexpectedly changed\n");
continue;
}
@@ -471,7 +468,7 @@ static void hp_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO PREFIX "bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -480,8 +477,7 @@ static void hp_wmi_notify(u32 value, void *context)
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
- printk(KERN_INFO "hp-wmi: Unknown response received %d\n",
- obj->type);
+ pr_info("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
@@ -498,8 +494,7 @@ static void hp_wmi_notify(u32 value, void *context)
event_id = *location;
event_data = *(location + 2);
} else {
- printk(KERN_INFO "hp-wmi: Unknown buffer length %d\n",
- obj->buffer.length);
+ pr_info("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
@@ -527,8 +522,7 @@ static void hp_wmi_notify(u32 value, void *context)
if (!sparse_keymap_report_event(hp_wmi_input_dev,
key_code, 1, true))
- printk(KERN_INFO PREFIX "Unknown key code - 0x%x\n",
- key_code);
+ pr_info("Unknown key code - 0x%x\n", key_code);
break;
case HPWMI_WIRELESS:
if (rfkill2_count) {
@@ -550,14 +544,12 @@ static void hp_wmi_notify(u32 value, void *context)
hp_wmi_get_hw_state(HPWMI_WWAN));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
- printk(KERN_INFO PREFIX UNIMP "CPU throttle because of 3 Cell"
- " battery event detected\n");
+ pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
break;
case HPWMI_LOCK_SWITCH:
break;
default:
- printk(KERN_INFO PREFIX "Unknown event_id - %d - 0x%x\n",
- event_id, event_data);
+ pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
}
}
@@ -705,7 +697,7 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
return err;
if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
- printk(KERN_WARNING PREFIX "unable to parse 0x1b query output\n");
+ pr_warn("unable to parse 0x1b query output\n");
return -EINVAL;
}
@@ -727,14 +719,14 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
name = "hp-wwan";
break;
default:
- printk(KERN_WARNING PREFIX "unknown device type 0x%x\n",
- state.device[i].radio_type);
+ pr_warn("unknown device type 0x%x\n",
+ state.device[i].radio_type);
continue;
}
if (!state.device[i].vendor_id) {
- printk(KERN_WARNING PREFIX "zero device %d while %d "
- "reported\n", i, state.count);
+ pr_warn("zero device %d while %d reported\n",
+ i, state.count);
continue;
}
@@ -755,8 +747,7 @@ static int __devinit hp_wmi_rfkill2_setup(struct platform_device *device)
IS_HWBLOCKED(state.device[i].power));
if (!(state.device[i].power & HPWMI_POWER_BIOS))
- printk(KERN_INFO PREFIX "device %s blocked by BIOS\n",
- name);
+ pr_info("device %s blocked by BIOS\n", name);
err = rfkill_register(rfkill);
if (err) {
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index b1396e5b2953..811d436cd677 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -22,6 +22,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -69,9 +71,10 @@ struct ibm_rtl_table {
#define RTL_SIGNATURE 0x0000005f4c54525fULL
#define RTL_MASK 0x000000ffffffffffULL
-#define RTL_DEBUG(A, ...) do { \
- if (debug) \
- pr_info("ibm-rtl: " A, ##__VA_ARGS__ ); \
+#define RTL_DEBUG(fmt, ...) \
+do { \
+ if (debug) \
+ pr_info(fmt, ##__VA_ARGS__); \
} while (0)
static DEFINE_MUTEX(rtl_lock);
@@ -114,7 +117,7 @@ static int ibm_rtl_write(u8 value)
int ret = 0, count = 0;
static u32 cmd_port_val;
- RTL_DEBUG("%s(%d)\n", __FUNCTION__, value);
+ RTL_DEBUG("%s(%d)\n", __func__, value);
value = value == 1 ? RTL_CMD_ENTER_PRTM : RTL_CMD_EXIT_PRTM;
@@ -144,8 +147,8 @@ static int ibm_rtl_write(u8 value)
while (ioread8(&rtl_table->command)) {
msleep(10);
if (count++ > 500) {
- pr_err("ibm-rtl: Hardware not responding to "
- "mode switch request\n");
+ pr_err("Hardware not responding to "
+ "mode switch request\n");
ret = -EIO;
break;
}
@@ -250,7 +253,7 @@ static int __init ibm_rtl_init(void) {
int ret = -ENODEV, i;
if (force)
- pr_warning("ibm-rtl: module loaded by force\n");
+ pr_warn("module loaded by force\n");
/* first ensure that we are running on IBM HW */
else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
return -ENODEV;
@@ -288,19 +291,19 @@ static int __init ibm_rtl_init(void) {
if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) {
phys_addr_t addr;
unsigned int plen;
- RTL_DEBUG("found RTL_SIGNATURE at %#llx\n", (u64)tmp);
+ RTL_DEBUG("found RTL_SIGNATURE at %p\n", tmp);
rtl_table = tmp;
/* The address, value, width and offset are platform
* dependent and found in the ibm_rtl_table */
rtl_cmd_width = ioread8(&rtl_table->cmd_granularity);
rtl_cmd_type = ioread8(&rtl_table->cmd_address_type);
RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n",
- rtl_cmd_width, rtl_cmd_type);
+ rtl_cmd_width, rtl_cmd_type);
addr = ioread32(&rtl_table->cmd_port_address);
RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr);
plen = rtl_cmd_width/sizeof(char);
rtl_cmd_addr = rtl_port_map(addr, plen);
- RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr);
+ RTL_DEBUG("rtl_cmd_addr = %p\n", rtl_cmd_addr);
if (!rtl_cmd_addr) {
ret = -ENOMEM;
break;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 21b101899bae..bfdda33feb26 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -20,6 +20,8 @@
* 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index eacd5da7dd24..809adea4965f 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -27,6 +27,8 @@
* to get/set bandwidth.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -135,8 +137,7 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list,
&temp);
- printk(KERN_INFO
- "Bandwidth value was %ld: status is %d\n", state, status);
+ pr_info("Bandwidth value was %ld: status is %d\n", state, status);
if (ACPI_FAILURE(status))
return -EFAULT;
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 213e79ba68d5..f1ae5078b7ec 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -23,58 +23,48 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/input.h>
+
#include <asm/intel_scu_ipc.h>
#define DRIVER_NAME "msic_power_btn"
-#define MSIC_IRQ_STAT 0x02
- #define MSIC_IRQ_PB (1 << 0)
-#define MSIC_PB_CONFIG 0x3e
#define MSIC_PB_STATUS 0x3f
- #define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
-
-struct mfld_pb_priv {
- struct input_dev *input;
- unsigned int irq;
-};
+#define MSIC_PB_LEVEL (1 << 3) /* 1 - release, 0 - press */
static irqreturn_t mfld_pb_isr(int irq, void *dev_id)
{
- struct mfld_pb_priv *priv = dev_id;
+ struct input_dev *input = dev_id;
int ret;
u8 pbstat;
ret = intel_scu_ipc_ioread8(MSIC_PB_STATUS, &pbstat);
- if (ret < 0)
- return IRQ_HANDLED;
-
- input_event(priv->input, EV_KEY, KEY_POWER, !(pbstat & MSIC_PB_LEVEL));
- input_sync(priv->input);
+ if (ret < 0) {
+ dev_err(input->dev.parent, "Read error %d while reading"
+ " MSIC_PB_STATUS\n", ret);
+ } else {
+ input_event(input, EV_KEY, KEY_POWER,
+ !(pbstat & MSIC_PB_LEVEL));
+ input_sync(input);
+ }
return IRQ_HANDLED;
}
static int __devinit mfld_pb_probe(struct platform_device *pdev)
{
- struct mfld_pb_priv *priv;
struct input_dev *input;
- int irq;
+ int irq = platform_get_irq(pdev, 0);
int error;
- irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
- priv = kzalloc(sizeof(struct mfld_pb_priv), GFP_KERNEL);
input = input_allocate_device();
- if (!priv || !input) {
- error = -ENOMEM;
- goto err_free_mem;
+ if (!input) {
+ dev_err(&pdev->dev, "Input device allocation error\n");
+ return -ENOMEM;
}
- priv->input = input;
- priv->irq = irq;
-
input->name = pdev->name;
input->phys = "power-button/input0";
input->id.bustype = BUS_HOST;
@@ -82,42 +72,40 @@ static int __devinit mfld_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(priv->irq, NULL, mfld_pb_isr,
- 0, DRIVER_NAME, priv);
+ error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+ DRIVER_NAME, input);
if (error) {
- dev_err(&pdev->dev,
- "unable to request irq %d for mfld power button\n",
- irq);
- goto err_free_mem;
+ dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
+ "button\n", irq);
+ goto err_free_input;
}
error = input_register_device(input);
if (error) {
- dev_err(&pdev->dev,
- "unable to register input dev, error %d\n", error);
+ dev_err(&pdev->dev, "Unable to register input dev, error "
+ "%d\n", error);
goto err_free_irq;
}
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, input);
return 0;
err_free_irq:
- free_irq(priv->irq, priv);
-err_free_mem:
+ free_irq(irq, input);
+err_free_input:
input_free_device(input);
- kfree(priv);
return error;
}
static int __devexit mfld_pb_remove(struct platform_device *pdev)
{
- struct mfld_pb_priv *priv = platform_get_drvdata(pdev);
-
- free_irq(priv->irq, priv);
- input_unregister_device(priv->input);
- kfree(priv);
+ struct input_dev *input = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ free_irq(irq, input);
+ input_unregister_device(input);
platform_set_drvdata(pdev, NULL);
+
return 0;
}
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index c2f4bd8013b5..3a578323122b 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -37,49 +37,50 @@
#include <asm/intel_scu_ipc.h>
/* Number of thermal sensors */
-#define MSIC_THERMAL_SENSORS 4
+#define MSIC_THERMAL_SENSORS 4
/* ADC1 - thermal registers */
-#define MSIC_THERM_ADC1CNTL1 0x1C0
-#define MSIC_ADC_ENBL 0x10
-#define MSIC_ADC_START 0x08
+#define MSIC_THERM_ADC1CNTL1 0x1C0
+#define MSIC_ADC_ENBL 0x10
+#define MSIC_ADC_START 0x08
-#define MSIC_THERM_ADC1CNTL3 0x1C2
-#define MSIC_ADCTHERM_ENBL 0x04
-#define MSIC_ADCRRDATA_ENBL 0x05
-#define MSIC_CHANL_MASK_VAL 0x0F
+#define MSIC_THERM_ADC1CNTL3 0x1C2
+#define MSIC_ADCTHERM_ENBL 0x04
+#define MSIC_ADCRRDATA_ENBL 0x05
+#define MSIC_CHANL_MASK_VAL 0x0F
-#define MSIC_STOPBIT_MASK 16
-#define MSIC_ADCTHERM_MASK 4
-#define ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
+#define MSIC_STOPBIT_MASK 16
+#define MSIC_ADCTHERM_MASK 4
+/* Number of ADC channels */
+#define ADC_CHANLS_MAX 15
+#define ADC_LOOP_MAX (ADC_CHANLS_MAX - MSIC_THERMAL_SENSORS)
/* ADC channel code values */
-#define SKIN_SENSOR0_CODE 0x08
-#define SKIN_SENSOR1_CODE 0x09
-#define SYS_SENSOR_CODE 0x0A
-#define MSIC_DIE_SENSOR_CODE 0x03
+#define SKIN_SENSOR0_CODE 0x08
+#define SKIN_SENSOR1_CODE 0x09
+#define SYS_SENSOR_CODE 0x0A
+#define MSIC_DIE_SENSOR_CODE 0x03
-#define SKIN_THERM_SENSOR0 0
-#define SKIN_THERM_SENSOR1 1
-#define SYS_THERM_SENSOR2 2
-#define MSIC_DIE_THERM_SENSOR3 3
+#define SKIN_THERM_SENSOR0 0
+#define SKIN_THERM_SENSOR1 1
+#define SYS_THERM_SENSOR2 2
+#define MSIC_DIE_THERM_SENSOR3 3
/* ADC code range */
-#define ADC_MAX 977
-#define ADC_MIN 162
-#define ADC_VAL0C 887
-#define ADC_VAL20C 720
-#define ADC_VAL40C 508
-#define ADC_VAL60C 315
+#define ADC_MAX 977
+#define ADC_MIN 162
+#define ADC_VAL0C 887
+#define ADC_VAL20C 720
+#define ADC_VAL40C 508
+#define ADC_VAL60C 315
/* ADC base addresses */
-#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
+#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
+#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
/* MSIC die attributes */
-#define MSIC_DIE_ADC_MIN 488
-#define MSIC_DIE_ADC_MAX 1004
+#define MSIC_DIE_ADC_MIN 488
+#define MSIC_DIE_ADC_MAX 1004
/* This holds the address of the first free ADC channel,
* among the 15 channels
@@ -87,15 +88,15 @@
static int channel_index;
struct platform_info {
- struct platform_device *pdev;
- struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
+ struct platform_device *pdev;
+ struct thermal_zone_device *tzd[MSIC_THERMAL_SENSORS];
};
struct thermal_device_info {
- unsigned int chnl_addr;
- int direct;
- /* This holds the current temperature in millidegree celsius */
- long curr_temp;
+ unsigned int chnl_addr;
+ int direct;
+ /* This holds the current temperature in millidegree celsius */
+ long curr_temp;
};
/**
@@ -106,7 +107,7 @@ struct thermal_device_info {
*/
static int to_msic_die_temp(uint16_t adc_val)
{
- return (368 * (adc_val) / 1000) - 220;
+ return (368 * (adc_val) / 1000) - 220;
}
/**
@@ -118,7 +119,7 @@ static int to_msic_die_temp(uint16_t adc_val)
*/
static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
{
- return (adc_val >= min) && (adc_val <= max);
+ return (adc_val >= min) && (adc_val <= max);
}
/**
@@ -136,35 +137,35 @@ static int is_valid_adc(uint16_t adc_val, uint16_t min, uint16_t max)
*/
static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
{
- int temp;
-
- /* Direct conversion for die temperature */
- if (direct) {
- if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
- *tp = to_msic_die_temp(adc_val) * 1000;
- return 0;
- }
- return -ERANGE;
- }
-
- if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
- return -ERANGE;
-
- /* Linear approximation for skin temperature */
- if (adc_val > ADC_VAL0C)
- temp = 177 - (adc_val/5);
- else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
- temp = 111 - (adc_val/8);
- else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
- temp = 92 - (adc_val/10);
- else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
- temp = 91 - (adc_val/10);
- else
- temp = 112 - (adc_val/6);
-
- /* Convert temperature in celsius to milli degree celsius */
- *tp = temp * 1000;
- return 0;
+ int temp;
+
+ /* Direct conversion for die temperature */
+ if (direct) {
+ if (is_valid_adc(adc_val, MSIC_DIE_ADC_MIN, MSIC_DIE_ADC_MAX)) {
+ *tp = to_msic_die_temp(adc_val) * 1000;
+ return 0;
+ }
+ return -ERANGE;
+ }
+
+ if (!is_valid_adc(adc_val, ADC_MIN, ADC_MAX))
+ return -ERANGE;
+
+ /* Linear approximation for skin temperature */
+ if (adc_val > ADC_VAL0C)
+ temp = 177 - (adc_val/5);
+ else if ((adc_val <= ADC_VAL0C) && (adc_val > ADC_VAL20C))
+ temp = 111 - (adc_val/8);
+ else if ((adc_val <= ADC_VAL20C) && (adc_val > ADC_VAL40C))
+ temp = 92 - (adc_val/10);
+ else if ((adc_val <= ADC_VAL40C) && (adc_val > ADC_VAL60C))
+ temp = 91 - (adc_val/10);
+ else
+ temp = 112 - (adc_val/6);
+
+ /* Convert temperature in celsius to milli degree celsius */
+ *tp = temp * 1000;
+ return 0;
}
/**
@@ -178,47 +179,47 @@ static int adc_to_temp(int direct, uint16_t adc_val, unsigned long *tp)
*/
static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
{
- struct thermal_device_info *td_info = tzd->devdata;
- uint16_t adc_val, addr;
- uint8_t data = 0;
- int ret;
- unsigned long curr_temp;
-
-
- addr = td_info->chnl_addr;
-
- /* Enable the msic for conversion before reading */
- ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
- if (ret)
- return ret;
-
- /* Re-toggle the RRDATARD bit (temporary workaround) */
- ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
- if (ret)
- return ret;
-
- /* Read the higher bits of data */
- ret = intel_scu_ipc_ioread8(addr, &data);
- if (ret)
- return ret;
-
- /* Shift bits to accommodate the lower two data bits */
- adc_val = (data << 2);
- addr++;
-
- ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */
- if (ret)
- return ret;
-
- /* Adding lower two bits to the higher bits */
- data &= 03;
- adc_val += data;
-
- /* Convert ADC value to temperature */
- ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
- if (ret == 0)
- *temp = td_info->curr_temp = curr_temp;
- return ret;
+ struct thermal_device_info *td_info = tzd->devdata;
+ uint16_t adc_val, addr;
+ uint8_t data = 0;
+ int ret;
+ unsigned long curr_temp;
+
+
+ addr = td_info->chnl_addr;
+
+ /* Enable the msic for conversion before reading */
+ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCRRDATA_ENBL);
+ if (ret)
+ return ret;
+
+ /* Re-toggle the RRDATARD bit (temporary workaround) */
+ ret = intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL3, MSIC_ADCTHERM_ENBL);
+ if (ret)
+ return ret;
+
+ /* Read the higher bits of data */
+ ret = intel_scu_ipc_ioread8(addr, &data);
+ if (ret)
+ return ret;
+
+ /* Shift bits to accommodate the lower two data bits */
+ adc_val = (data << 2);
+ addr++;
+
+ ret = intel_scu_ipc_ioread8(addr, &data);/* Read lower bits */
+ if (ret)
+ return ret;
+
+ /* Adding lower two bits to the higher bits */
+ data &= 03;
+ adc_val += data;
+
+ /* Convert ADC value to temperature */
+ ret = adc_to_temp(td_info->direct, adc_val, &curr_temp);
+ if (ret == 0)
+ *temp = td_info->curr_temp = curr_temp;
+ return ret;
}
/**
@@ -231,22 +232,21 @@ static int mid_read_temp(struct thermal_zone_device *tzd, unsigned long *temp)
*/
static int configure_adc(int val)
{
- int ret;
- uint8_t data;
-
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if (val) {
- /* Enable and start the ADC */
- data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
- } else {
- /* Just stop the ADC */
- data &= (~MSIC_ADC_START);
- }
-
- return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
+ int ret;
+ uint8_t data;
+
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
+ if (ret)
+ return ret;
+
+ if (val) {
+ /* Enable and start the ADC */
+ data |= (MSIC_ADC_ENBL | MSIC_ADC_START);
+ } else {
+ /* Just stop the ADC */
+ data &= (~MSIC_ADC_START);
+ }
+ return intel_scu_ipc_iowrite8(MSIC_THERM_ADC1CNTL1, data);
}
/**
@@ -259,30 +259,30 @@ static int configure_adc(int val)
*/
static int set_up_therm_channel(u16 base_addr)
{
- int ret;
-
- /* Enable all the sensor channels */
- ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
- if (ret)
- return ret;
-
- ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
- if (ret)
- return ret;
-
- ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
- if (ret)
- return ret;
-
- /* Since this is the last channel, set the stop bit
- to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- ret = intel_scu_ipc_iowrite8(base_addr + 3,
- (MSIC_DIE_SENSOR_CODE | 0x10));
- if (ret)
- return ret;
-
- /* Enable ADC and start it */
- return configure_adc(1);
+ int ret;
+
+ /* Enable all the sensor channels */
+ ret = intel_scu_ipc_iowrite8(base_addr, SKIN_SENSOR0_CODE);
+ if (ret)
+ return ret;
+
+ ret = intel_scu_ipc_iowrite8(base_addr + 1, SKIN_SENSOR1_CODE);
+ if (ret)
+ return ret;
+
+ ret = intel_scu_ipc_iowrite8(base_addr + 2, SYS_SENSOR_CODE);
+ if (ret)
+ return ret;
+
+ /* Since this is the last channel, set the stop bit
+ * to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
+ ret = intel_scu_ipc_iowrite8(base_addr + 3,
+ (MSIC_DIE_SENSOR_CODE | 0x10));
+ if (ret)
+ return ret;
+
+ /* Enable ADC and start it */
+ return configure_adc(1);
}
/**
@@ -293,13 +293,13 @@ static int set_up_therm_channel(u16 base_addr)
*/
static int reset_stopbit(uint16_t addr)
{
- int ret;
- uint8_t data;
- ret = intel_scu_ipc_ioread8(addr, &data);
- if (ret)
- return ret;
- /* Set the stop bit to zero */
- return intel_scu_ipc_iowrite8(addr, (data & 0xEF));
+ int ret;
+ uint8_t data;
+ ret = intel_scu_ipc_ioread8(addr, &data);
+ if (ret)
+ return ret;
+ /* Set the stop bit to zero */
+ return intel_scu_ipc_iowrite8(addr, (data & 0xEF));
}
/**
@@ -317,30 +317,30 @@ static int reset_stopbit(uint16_t addr)
*/
static int find_free_channel(void)
{
- int ret;
- int i;
- uint8_t data;
-
- /* check whether ADC is enabled */
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
- if (ret)
- return ret;
-
- if ((data & MSIC_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < ADC_CHANLS_MAX; i++) {
- ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
- if (ret)
- return ret;
-
- if (data & MSIC_STOPBIT_MASK) {
- ret = i;
- break;
- }
- }
- return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
+ int ret;
+ int i;
+ uint8_t data;
+
+ /* check whether ADC is enabled */
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL1, &data);
+ if (ret)
+ return ret;
+
+ if ((data & MSIC_ADC_ENBL) == 0)
+ return 0;
+
+ /* ADC is already enabled; Looking for an empty channel */
+ for (i = 0; i < ADC_CHANLS_MAX; i++) {
+ ret = intel_scu_ipc_ioread8(ADC_CHNL_START_ADDR + i, &data);
+ if (ret)
+ return ret;
+
+ if (data & MSIC_STOPBIT_MASK) {
+ ret = i;
+ break;
+ }
+ }
+ return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
}
/**
@@ -351,48 +351,48 @@ static int find_free_channel(void)
*/
static int mid_initialize_adc(struct device *dev)
{
- u8 data;
- u16 base_addr;
- int ret;
-
- /*
- * Ensure that adctherm is disabled before we
- * initialize the ADC
- */
- ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
- if (ret)
- return ret;
-
- if (data & MSIC_ADCTHERM_MASK)
- dev_warn(dev, "ADCTHERM already set");
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel();
- if (channel_index < 0) {
- dev_err(dev, "No free ADC channels");
- return channel_index;
- }
-
- base_addr = ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- ret = reset_stopbit(base_addr);
- if (ret)
- return ret;
-
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- ret = set_up_therm_channel(base_addr);
- if (ret) {
- dev_err(dev, "unable to enable ADC");
- return ret;
- }
- dev_dbg(dev, "ADC initialization successful");
- return ret;
+ u8 data;
+ u16 base_addr;
+ int ret;
+
+ /*
+ * Ensure that adctherm is disabled before we
+ * initialize the ADC
+ */
+ ret = intel_scu_ipc_ioread8(MSIC_THERM_ADC1CNTL3, &data);
+ if (ret)
+ return ret;
+
+ if (data & MSIC_ADCTHERM_MASK)
+ dev_warn(dev, "ADCTHERM already set");
+
+ /* Index of the first channel in which the stop bit is set */
+ channel_index = find_free_channel();
+ if (channel_index < 0) {
+ dev_err(dev, "No free ADC channels");
+ return channel_index;
+ }
+
+ base_addr = ADC_CHNL_START_ADDR + channel_index;
+
+ if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
+ /* Reset stop bit for channels other than 0 and 12 */
+ ret = reset_stopbit(base_addr);
+ if (ret)
+ return ret;
+
+ /* Index of the first free channel */
+ base_addr++;
+ channel_index++;
+ }
+
+ ret = set_up_therm_channel(base_addr);
+ if (ret) {
+ dev_err(dev, "unable to enable ADC");
+ return ret;
+ }
+ dev_dbg(dev, "ADC initialization successful");
+ return ret;
}
/**
@@ -403,18 +403,18 @@ static int mid_initialize_adc(struct device *dev)
*/
static struct thermal_device_info *initialize_sensor(int index)
{
- struct thermal_device_info *td_info =
- kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
-
- if (!td_info)
- return NULL;
-
- /* Set the base addr of the channel for this sensor */
- td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
- /* Sensor 3 is direct conversion */
- if (index == 3)
- td_info->direct = 1;
- return td_info;
+ struct thermal_device_info *td_info =
+ kzalloc(sizeof(struct thermal_device_info), GFP_KERNEL);
+
+ if (!td_info)
+ return NULL;
+
+ /* Set the base addr of the channel for this sensor */
+ td_info->chnl_addr = ADC_DATA_START_ADDR + 2 * (channel_index + index);
+ /* Sensor 3 is direct conversion */
+ if (index == 3)
+ td_info->direct = 1;
+ return td_info;
}
/**
@@ -425,7 +425,7 @@ static struct thermal_device_info *initialize_sensor(int index)
*/
static int mid_thermal_resume(struct platform_device *pdev)
{
- return mid_initialize_adc(&pdev->dev);
+ return mid_initialize_adc(&pdev->dev);
}
/**
@@ -437,12 +437,12 @@ static int mid_thermal_resume(struct platform_device *pdev)
*/
static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
{
- /*
- * This just stops the ADC and does not disable it.
- * temporary workaround until we have a generic ADC driver.
- * If 0 is passed, it disables the ADC.
- */
- return configure_adc(0);
+ /*
+ * This just stops the ADC and does not disable it.
+ * temporary workaround until we have a generic ADC driver.
+ * If 0 is passed, it disables the ADC.
+ */
+ return configure_adc(0);
}
/**
@@ -453,16 +453,15 @@ static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
*/
static int read_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
{
- WARN_ON(tzd == NULL);
- return mid_read_temp(tzd, temp);
+ WARN_ON(tzd == NULL);
+ return mid_read_temp(tzd, temp);
}
/* Can't be const */
static struct thermal_zone_device_ops tzd_ops = {
- .get_temp = read_curr_temp,
+ .get_temp = read_curr_temp,
};
-
/**
* mid_thermal_probe - mfld thermal initialize
* @pdev: platform device structure
@@ -472,46 +471,45 @@ static struct thermal_zone_device_ops tzd_ops = {
*/
static int mid_thermal_probe(struct platform_device *pdev)
{
- static char *name[MSIC_THERMAL_SENSORS] = {
- "skin0", "skin1", "sys", "msicdie"
- };
-
- int ret;
- int i;
- struct platform_info *pinfo;
-
- pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
-
- /* Initializing the hardware */
- ret = mid_initialize_adc(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "ADC init failed");
- kfree(pinfo);
- return ret;
- }
-
- /* Register each sensor with the generic thermal framework*/
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
- pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, initialize_sensor(i),
- &tzd_ops, 0, 0, 0, 0);
- if (IS_ERR(pinfo->tzd[i]))
- goto reg_fail;
- }
-
- pinfo->pdev = pdev;
- platform_set_drvdata(pdev, pinfo);
- return 0;
+ static char *name[MSIC_THERMAL_SENSORS] = {
+ "skin0", "skin1", "sys", "msicdie"
+ };
+
+ int ret;
+ int i;
+ struct platform_info *pinfo;
+
+ pinfo = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ /* Initializing the hardware */
+ ret = mid_initialize_adc(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "ADC init failed");
+ kfree(pinfo);
+ return ret;
+ }
+
+ /* Register each sensor with the generic thermal framework*/
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ pinfo->tzd[i] = thermal_zone_device_register(name[i],
+ 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0);
+ if (IS_ERR(pinfo->tzd[i]))
+ goto reg_fail;
+ }
+
+ pinfo->pdev = pdev;
+ platform_set_drvdata(pdev, pinfo);
+ return 0;
reg_fail:
- ret = PTR_ERR(pinfo->tzd[i]);
- while (--i >= 0)
- thermal_zone_device_unregister(pinfo->tzd[i]);
- configure_adc(0);
- kfree(pinfo);
- return ret;
+ ret = PTR_ERR(pinfo->tzd[i]);
+ while (--i >= 0)
+ thermal_zone_device_unregister(pinfo->tzd[i]);
+ configure_adc(0);
+ kfree(pinfo);
+ return ret;
}
/**
@@ -523,49 +521,46 @@ reg_fail:
*/
static int mid_thermal_remove(struct platform_device *pdev)
{
- int i;
- struct platform_info *pinfo = platform_get_drvdata(pdev);
+ int i;
+ struct platform_info *pinfo = platform_get_drvdata(pdev);
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
- thermal_zone_device_unregister(pinfo->tzd[i]);
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
+ thermal_zone_device_unregister(pinfo->tzd[i]);
- platform_set_drvdata(pdev, NULL);
+ kfree(pinfo);
+ platform_set_drvdata(pdev, NULL);
- /* Stop the ADC */
- return configure_adc(0);
+ /* Stop the ADC */
+ return configure_adc(0);
}
-/*********************************************************************
- * Driver initialisation and finalization
- *********************************************************************/
-
#define DRIVER_NAME "msic_sensor"
static const struct platform_device_id therm_id_table[] = {
- { DRIVER_NAME, 1 },
- { }
+ { DRIVER_NAME, 1 },
+ { }
};
static struct platform_driver mid_thermal_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
- .probe = mid_thermal_probe,
- .suspend = mid_thermal_suspend,
- .resume = mid_thermal_resume,
- .remove = __devexit_p(mid_thermal_remove),
- .id_table = therm_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = mid_thermal_probe,
+ .suspend = mid_thermal_suspend,
+ .resume = mid_thermal_resume,
+ .remove = __devexit_p(mid_thermal_remove),
+ .id_table = therm_id_table,
};
static int __init mid_thermal_module_init(void)
{
- return platform_driver_register(&mid_thermal_driver);
+ return platform_driver_register(&mid_thermal_driver);
}
static void __exit mid_thermal_module_exit(void)
{
- platform_driver_unregister(&mid_thermal_driver);
+ platform_driver_unregister(&mid_thermal_driver);
}
module_init(mid_thermal_module_init);
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
new file mode 100644
index 000000000000..e936364a609d
--- /dev/null
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -0,0 +1,396 @@
+/*
+ * intel_oaktrail.c - Intel OakTrail Platform support.
+ *
+ * Copyright (C) 2010-2011 Intel Corporation
+ * Author: Yin Kangkai (kangkai.yin@intel.com)
+ *
+ * based on Compal driver, Copyright (C) 2008 Cezary Jackiewicz
+ * <cezary.jackiewicz (at) gmail.com>, based on MSI driver
+ * Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ * This driver does below things:
+ * 1. registers itself in the Linux backlight control in
+ * /sys/class/backlight/intel_oaktrail/
+ *
+ * 2. registers in the rfkill subsystem here: /sys/class/rfkill/rfkillX/
+ * for these components: wifi, bluetooth, wwan (3g), gps
+ *
+ * This driver might work on other products based on Oaktrail. If you
+ * want to try it you can pass force=1 as argument to the module which
+ * will force it to load even when the DMI data doesn't identify the
+ * product as compatible.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/rfkill.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+
+#define DRIVER_NAME "intel_oaktrail"
+#define DRIVER_VERSION "0.4ac1"
+
+/*
+ * This is the devices status address in EC space, and the control bits
+ * definition:
+ *
+ * (1 << 0): Camera enable/disable, RW.
+ * (1 << 1): Bluetooth enable/disable, RW.
+ * (1 << 2): GPS enable/disable, RW.
+ * (1 << 3): WiFi enable/disable, RW.
+ * (1 << 4): WWAN (3G) enable/disalbe, RW.
+ * (1 << 5): Touchscreen enable/disable, Read Only.
+ */
+#define OT_EC_DEVICE_STATE_ADDRESS 0xD6
+
+#define OT_EC_CAMERA_MASK (1 << 0)
+#define OT_EC_BT_MASK (1 << 1)
+#define OT_EC_GPS_MASK (1 << 2)
+#define OT_EC_WIFI_MASK (1 << 3)
+#define OT_EC_WWAN_MASK (1 << 4)
+#define OT_EC_TS_MASK (1 << 5)
+
+/*
+ * This is the address in EC space and commands used to control LCD backlight:
+ *
+ * Two steps needed to change the LCD backlight:
+ * 1. write the backlight percentage into OT_EC_BL_BRIGHTNESS_ADDRESS;
+ * 2. write OT_EC_BL_CONTROL_ON_DATA into OT_EC_BL_CONTROL_ADDRESS.
+ *
+ * To read the LCD back light, just read out the value from
+ * OT_EC_BL_BRIGHTNESS_ADDRESS.
+ *
+ * LCD backlight brightness range: 0 - 100 (OT_EC_BL_BRIGHTNESS_MAX)
+ */
+#define OT_EC_BL_BRIGHTNESS_ADDRESS 0x44
+#define OT_EC_BL_BRIGHTNESS_MAX 100
+#define OT_EC_BL_CONTROL_ADDRESS 0x3A
+#define OT_EC_BL_CONTROL_ON_DATA 0x1A
+
+
+static int force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static struct platform_device *oaktrail_device;
+static struct backlight_device *oaktrail_bl_device;
+static struct rfkill *bt_rfkill;
+static struct rfkill *gps_rfkill;
+static struct rfkill *wifi_rfkill;
+static struct rfkill *wwan_rfkill;
+
+
+/* rfkill */
+static int oaktrail_rfkill_set(void *data, bool blocked)
+{
+ u8 value;
+ u8 result;
+ unsigned long radio = (unsigned long) data;
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &result);
+
+ if (!blocked)
+ value = (u8) (result | radio);
+ else
+ value = (u8) (result & ~radio);
+
+ ec_write(OT_EC_DEVICE_STATE_ADDRESS, value);
+
+ return 0;
+}
+
+static const struct rfkill_ops oaktrail_rfkill_ops = {
+ .set_block = oaktrail_rfkill_set,
+};
+
+static struct rfkill *oaktrail_rfkill_new(char *name, enum rfkill_type type,
+ unsigned long mask)
+{
+ struct rfkill *rfkill_dev;
+ u8 value;
+ int err;
+
+ rfkill_dev = rfkill_alloc(name, &oaktrail_device->dev, type,
+ &oaktrail_rfkill_ops, (void *)mask);
+ if (!rfkill_dev)
+ return ERR_PTR(-ENOMEM);
+
+ ec_read(OT_EC_DEVICE_STATE_ADDRESS, &value);
+ rfkill_init_sw_state(rfkill_dev, (value & mask) != 1);
+
+ err = rfkill_register(rfkill_dev);
+ if (err) {
+ rfkill_destroy(rfkill_dev);
+ return ERR_PTR(err);
+ }
+
+ return rfkill_dev;
+}
+
+static inline void __oaktrail_rfkill_cleanup(struct rfkill *rf)
+{
+ if (rf) {
+ rfkill_unregister(rf);
+ rfkill_destroy(rf);
+ }
+}
+
+static void oaktrail_rfkill_cleanup(void)
+{
+ __oaktrail_rfkill_cleanup(wifi_rfkill);
+ __oaktrail_rfkill_cleanup(bt_rfkill);
+ __oaktrail_rfkill_cleanup(gps_rfkill);
+ __oaktrail_rfkill_cleanup(wwan_rfkill);
+}
+
+static int oaktrail_rfkill_init(void)
+{
+ int ret;
+
+ wifi_rfkill = oaktrail_rfkill_new("oaktrail-wifi",
+ RFKILL_TYPE_WLAN,
+ OT_EC_WIFI_MASK);
+ if (IS_ERR(wifi_rfkill)) {
+ ret = PTR_ERR(wifi_rfkill);
+ wifi_rfkill = NULL;
+ goto cleanup;
+ }
+
+ bt_rfkill = oaktrail_rfkill_new("oaktrail-bluetooth",
+ RFKILL_TYPE_BLUETOOTH,
+ OT_EC_BT_MASK);
+ if (IS_ERR(bt_rfkill)) {
+ ret = PTR_ERR(bt_rfkill);
+ bt_rfkill = NULL;
+ goto cleanup;
+ }
+
+ gps_rfkill = oaktrail_rfkill_new("oaktrail-gps",
+ RFKILL_TYPE_GPS,
+ OT_EC_GPS_MASK);
+ if (IS_ERR(gps_rfkill)) {
+ ret = PTR_ERR(gps_rfkill);
+ gps_rfkill = NULL;
+ goto cleanup;
+ }
+
+ wwan_rfkill = oaktrail_rfkill_new("oaktrail-wwan",
+ RFKILL_TYPE_WWAN,
+ OT_EC_WWAN_MASK);
+ if (IS_ERR(wwan_rfkill)) {
+ ret = PTR_ERR(wwan_rfkill);
+ wwan_rfkill = NULL;
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ oaktrail_rfkill_cleanup();
+ return ret;
+}
+
+
+/* backlight */
+static int get_backlight_brightness(struct backlight_device *b)
+{
+ u8 value;
+ ec_read(OT_EC_BL_BRIGHTNESS_ADDRESS, &value);
+
+ return value;
+}
+
+static int set_backlight_brightness(struct backlight_device *b)
+{
+ u8 percent = (u8) b->props.brightness;
+ if (percent < 0 || percent > OT_EC_BL_BRIGHTNESS_MAX)
+ return -EINVAL;
+
+ ec_write(OT_EC_BL_BRIGHTNESS_ADDRESS, percent);
+ ec_write(OT_EC_BL_CONTROL_ADDRESS, OT_EC_BL_CONTROL_ON_DATA);
+
+ return 0;
+}
+
+static const struct backlight_ops oaktrail_bl_ops = {
+ .get_brightness = get_backlight_brightness,
+ .update_status = set_backlight_brightness,
+};
+
+static int oaktrail_backlight_init(void)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
+ bd = backlight_device_register(DRIVER_NAME,
+ &oaktrail_device->dev, NULL,
+ &oaktrail_bl_ops,
+ &props);
+
+ if (IS_ERR(bd)) {
+ oaktrail_bl_device = NULL;
+ pr_warning("Unable to register backlight device\n");
+ return PTR_ERR(bd);
+ }
+
+ oaktrail_bl_device = bd;
+
+ bd->props.brightness = get_backlight_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static void oaktrail_backlight_exit(void)
+{
+ if (oaktrail_bl_device)
+ backlight_device_unregister(oaktrail_bl_device);
+}
+
+static int __devinit oaktrail_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int __devexit oaktrail_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver oaktrail_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = oaktrail_probe,
+ .remove = __devexit_p(oaktrail_remove)
+};
+
+static int dmi_check_cb(const struct dmi_system_id *id)
+{
+ pr_info("Identified model '%s'\n", id->ident);
+ return 0;
+}
+
+static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
+ {
+ .ident = "OakTrail platform",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OakTrail platform"),
+ },
+ .callback = dmi_check_cb
+ },
+ { }
+};
+
+static int __init oaktrail_init(void)
+{
+ int ret;
+
+ if (acpi_disabled) {
+ pr_err("ACPI needs to be enabled for this driver to work!\n");
+ return -ENODEV;
+ }
+
+ if (!force && !dmi_check_system(oaktrail_dmi_table)) {
+ pr_err("Platform not recognized (You could try the module's force-parameter)");
+ return -ENODEV;
+ }
+
+ ret = platform_driver_register(&oaktrail_driver);
+ if (ret) {
+ pr_warning("Unable to register platform driver\n");
+ goto err_driver_reg;
+ }
+
+ oaktrail_device = platform_device_alloc(DRIVER_NAME, -1);
+ if (!oaktrail_device) {
+ pr_warning("Unable to allocate platform device\n");
+ ret = -ENOMEM;
+ goto err_device_alloc;
+ }
+
+ ret = platform_device_add(oaktrail_device);
+ if (ret) {
+ pr_warning("Unable to add platform device\n");
+ goto err_device_add;
+ }
+
+ if (!acpi_video_backlight_support()) {
+ ret = oaktrail_backlight_init();
+ if (ret)
+ goto err_backlight;
+
+ } else
+ pr_info("Backlight controlled by ACPI video driver\n");
+
+ ret = oaktrail_rfkill_init();
+ if (ret) {
+ pr_warning("Setup rfkill failed\n");
+ goto err_rfkill;
+ }
+
+ pr_info("Driver "DRIVER_VERSION" successfully loaded\n");
+ return 0;
+
+err_rfkill:
+ oaktrail_backlight_exit();
+err_backlight:
+ platform_device_del(oaktrail_device);
+err_device_add:
+ platform_device_put(oaktrail_device);
+err_device_alloc:
+ platform_driver_unregister(&oaktrail_driver);
+err_driver_reg:
+
+ return ret;
+}
+
+static void __exit oaktrail_cleanup(void)
+{
+ oaktrail_backlight_exit();
+ oaktrail_rfkill_cleanup();
+ platform_device_unregister(oaktrail_device);
+ platform_driver_unregister(&oaktrail_driver);
+
+ pr_info("Driver unloaded\n");
+}
+
+module_init(oaktrail_init);
+module_exit(oaktrail_cleanup);
+
+MODULE_AUTHOR("Yin Kangkai (kangkai.yin@intel.com)");
+MODULE_DESCRIPTION("Intel Oaktrail Platform ACPI Extras");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("dmi:*:svnIntelCorporation:pnOakTrailplatform:*");
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 464bb3fc4d88..1686c1e07d5d 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -19,6 +19,8 @@
* Moorestown platform PMIC chip
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -90,8 +92,7 @@ static void pmic_program_irqtype(int gpio, int type)
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
- printk(KERN_ERR
- "%s: only pin 0-7 support input\n", __func__);
+ pr_err("only pin 0-7 support input\n");
return -1;/* we only have 8 GPIO can use as input */
}
return intel_scu_ipc_update_register(GPIO0 + offset,
@@ -116,8 +117,7 @@ static int pmic_gpio_direction_output(struct gpio_chip *chip,
value ? 1 << (offset - 16) : 0,
1 << (offset - 16));
else {
- printk(KERN_ERR
- "%s: invalid PMIC GPIO pin %d!\n", __func__, offset);
+ pr_err("invalid PMIC GPIO pin %d!\n", offset);
WARN_ON(1);
}
@@ -260,7 +260,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
/* setting up SRAM mapping for GPIOINT register */
pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
if (!pg->gpiointr) {
- printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__);
+ pr_err("Can not map GPIOINT\n");
retval = -EINVAL;
goto err2;
}
@@ -281,13 +281,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
pg->chip.dev = dev;
retval = gpiochip_add(&pg->chip);
if (retval) {
- printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
+ pr_err("Can not add pmic gpio chip\n");
goto err;
}
retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
if (retval) {
- printk(KERN_WARNING "pmic: Interrupt request failed\n");
+ pr_warn("Interrupt request failed\n");
goto err;
}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 23fb2afda00b..3ff629df9f01 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -135,7 +135,7 @@ static int set_lcd_level(int level)
buf[1] = (u8) (level*31);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
- NULL, 0, 1);
+ NULL, 0);
}
static int get_lcd_level(void)
@@ -144,7 +144,7 @@ static int get_lcd_level(void)
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -157,7 +157,7 @@ static int get_auto_brightness(void)
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -172,7 +172,7 @@ static int set_auto_brightness(int enable)
wdata[0] = 4;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
- &rdata, 1, 1);
+ &rdata, 1);
if (result < 0)
return result;
@@ -180,7 +180,7 @@ static int set_auto_brightness(int enable)
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
- NULL, 0, 1);
+ NULL, 0);
}
static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
@@ -217,7 +217,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
u8 wdata = 0, rdata;
int result;
- result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1);
+ result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
if (result < 0)
return -1;
@@ -447,7 +447,7 @@ static struct platform_device *msipf_device;
static int dmi_check_cb(const struct dmi_system_id *id)
{
- pr_info("Identified laptop model '%s'.\n", id->ident);
+ pr_info("Identified laptop model '%s'\n", id->ident);
return 1;
}
@@ -800,7 +800,7 @@ static void msi_laptop_input_destroy(void)
input_unregister_device(msi_laptop_input_dev);
}
-static int load_scm_model_init(struct platform_device *sdev)
+static int __init load_scm_model_init(struct platform_device *sdev)
{
u8 data;
int result;
@@ -875,8 +875,7 @@ static int __init msi_init(void)
/* Register backlight stuff */
if (acpi_video_backlight_support()) {
- pr_info("Brightness ignored, must be controlled "
- "by ACPI video driver\n");
+ pr_info("Brightness ignored, must be controlled by ACPI video driver\n");
} else {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -930,7 +929,7 @@ static int __init msi_init(void)
if (auto_brightness != 2)
set_auto_brightness(auto_brightness);
- pr_info("driver "MSI_DRIVER_VERSION" successfully loaded.\n");
+ pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
return 0;
@@ -978,7 +977,7 @@ static void __exit msi_cleanup(void)
if (auto_brightness != 2)
set_auto_brightness(1);
- pr_info("driver unloaded.\n");
+ pr_info("driver unloaded\n");
}
module_init(msi_init);
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index d5419c9ec07a..c832e3356cd6 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/input.h>
@@ -36,13 +37,10 @@ MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
#define DRV_NAME "msi-wmi"
-#define DRV_PFX DRV_NAME ": "
#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
-#define dprintk(msg...) pr_debug(DRV_PFX msg)
-
#define SCANCODE_BASE 0xD0
#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE
#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
@@ -78,7 +76,7 @@ static int msi_wmi_query_block(int instance, int *ret)
if (!obj || obj->type != ACPI_TYPE_INTEGER) {
if (obj) {
- printk(KERN_ERR DRV_PFX "query block returned object "
+ pr_err("query block returned object "
"type: %d - buffer length:%d\n", obj->type,
obj->type == ACPI_TYPE_BUFFER ?
obj->buffer.length : 0);
@@ -97,8 +95,8 @@ static int msi_wmi_set_block(int instance, int value)
struct acpi_buffer input = { sizeof(int), &value };
- dprintk("Going to set block of instance: %d - value: %d\n",
- instance, value);
+ pr_debug("Going to set block of instance: %d - value: %d\n",
+ instance, value);
status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input);
@@ -112,20 +110,19 @@ static int bl_get(struct backlight_device *bd)
/* Instance 1 is "get backlight", cmp with DSDT */
err = msi_wmi_query_block(1, &ret);
if (err) {
- printk(KERN_ERR DRV_PFX "Could not query backlight: %d\n", err);
+ pr_err("Could not query backlight: %d\n", err);
return -EINVAL;
}
- dprintk("Get: Query block returned: %d\n", ret);
+ pr_debug("Get: Query block returned: %d\n", ret);
for (level = 0; level < ARRAY_SIZE(backlight_map); level++) {
if (backlight_map[level] == ret) {
- dprintk("Current backlight level: 0x%X - index: %d\n",
- backlight_map[level], level);
+ pr_debug("Current backlight level: 0x%X - index: %d\n",
+ backlight_map[level], level);
break;
}
}
if (level == ARRAY_SIZE(backlight_map)) {
- printk(KERN_ERR DRV_PFX "get: Invalid brightness value: 0x%X\n",
- ret);
+ pr_err("get: Invalid brightness value: 0x%X\n", ret);
return -EINVAL;
}
return level;
@@ -156,7 +153,7 @@ static void msi_wmi_notify(u32 value, void *context)
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- printk(KERN_INFO DRV_PFX "bad event status 0x%x\n", status);
+ pr_info("bad event status 0x%x\n", status);
return;
}
@@ -164,7 +161,7 @@ static void msi_wmi_notify(u32 value, void *context)
if (obj && obj->type == ACPI_TYPE_INTEGER) {
int eventcode = obj->integer.value;
- dprintk("Eventcode: 0x%x\n", eventcode);
+ pr_debug("Eventcode: 0x%x\n", eventcode);
key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
eventcode);
if (key) {
@@ -175,8 +172,8 @@ static void msi_wmi_notify(u32 value, void *context)
/* Ignore event if the same event happened in a 50 ms
timeframe -> Key press may result in 10-20 GPEs */
if (ktime_to_us(diff) < 1000 * 50) {
- dprintk("Suppressed key event 0x%X - "
- "Last press was %lld us ago\n",
+ pr_debug("Suppressed key event 0x%X - "
+ "Last press was %lld us ago\n",
key->code, ktime_to_us(diff));
return;
}
@@ -187,17 +184,16 @@ static void msi_wmi_notify(u32 value, void *context)
(!acpi_video_backlight_support() ||
(key->code != MSI_WMI_BRIGHTNESSUP &&
key->code != MSI_WMI_BRIGHTNESSDOWN))) {
- dprintk("Send key: 0x%X - "
- "Input layer keycode: %d\n", key->code,
- key->keycode);
+ pr_debug("Send key: 0x%X - "
+ "Input layer keycode: %d\n",
+ key->code, key->keycode);
sparse_keymap_report_entry(msi_wmi_input_dev,
key, 1, true);
}
} else
- printk(KERN_INFO "Unknown key pressed - %x\n",
- eventcode);
+ pr_info("Unknown key pressed - %x\n", eventcode);
} else
- printk(KERN_INFO DRV_PFX "Unknown event received\n");
+ pr_info("Unknown event received\n");
kfree(response.pointer);
}
@@ -238,8 +234,7 @@ static int __init msi_wmi_init(void)
int err;
if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
- printk(KERN_ERR
- "This machine doesn't have MSI-hotkeys through WMI\n");
+ pr_err("This machine doesn't have MSI-hotkeys through WMI\n");
return -ENODEV;
}
err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
@@ -270,7 +265,7 @@ static int __init msi_wmi_init(void)
backlight->props.brightness = err;
}
- dprintk("Event handler installed\n");
+ pr_debug("Event handler installed\n");
return 0;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 6fe8cd6e23b5..bbd182e178cb 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -42,6 +42,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -70,10 +72,10 @@
#include <linux/miscdevice.h>
#endif
-#define DRV_PFX "sony-laptop: "
-#define dprintk(msg...) do { \
- if (debug) \
- pr_warn(DRV_PFX msg); \
+#define dprintk(fmt, ...) \
+do { \
+ if (debug) \
+ pr_warn(fmt, ##__VA_ARGS__); \
} while (0)
#define SONY_LAPTOP_DRIVER_VERSION "0.6"
@@ -418,7 +420,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
error = kfifo_alloc(&sony_laptop_input.fifo,
SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
- pr_err(DRV_PFX "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
goto err_dec_users;
}
@@ -702,7 +704,7 @@ static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
return 0;
}
- pr_warn(DRV_PFX "acpi_callreadfunc failed\n");
+ pr_warn("acpi_callreadfunc failed\n");
return -1;
}
@@ -728,8 +730,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
if (status == AE_OK) {
if (result != NULL) {
if (out_obj.type != ACPI_TYPE_INTEGER) {
- pr_warn(DRV_PFX "acpi_evaluate_object bad "
- "return type\n");
+ pr_warn("acpi_evaluate_object bad return type\n");
return -1;
}
*result = out_obj.integer.value;
@@ -737,7 +738,7 @@ static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
return 0;
}
- pr_warn(DRV_PFX "acpi_evaluate_object failed\n");
+ pr_warn("acpi_evaluate_object failed\n");
return -1;
}
@@ -961,7 +962,6 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
static int sony_nc_get_brightness_ng(struct backlight_device *bd)
{
int result;
- int *handle = (int *)bl_get_data(bd);
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
@@ -973,7 +973,6 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
static int sony_nc_update_status_ng(struct backlight_device *bd)
{
int value, result;
- int *handle = (int *)bl_get_data(bd);
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
@@ -1104,10 +1103,8 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
}
if (!key_event->data)
- pr_info(DRV_PFX
- "Unknown event: 0x%x 0x%x\n",
- key_handle,
- ev);
+ pr_info("Unknown event: 0x%x 0x%x\n",
+ key_handle, ev);
else
sony_laptop_report_input_event(ev);
}
@@ -1128,7 +1125,7 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
struct acpi_device_info *info;
if (ACPI_SUCCESS(acpi_get_object_info(handle, &info))) {
- pr_warn(DRV_PFX "method: name: %4.4s, args %X\n",
+ pr_warn("method: name: %4.4s, args %X\n",
(char *)&info->name, info->param_count);
kfree(info);
@@ -1169,7 +1166,7 @@ static int sony_nc_resume(struct acpi_device *device)
ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
item->value, NULL);
if (ret < 0) {
- pr_err(DRV_PFX "%s: %d\n", __func__, ret);
+ pr_err("%s: %d\n", __func__, ret);
break;
}
}
@@ -1336,12 +1333,12 @@ static void sony_nc_rfkill_setup(struct acpi_device *device)
device_enum = (union acpi_object *) buffer.pointer;
if (!device_enum) {
- pr_err(DRV_PFX "No SN06 return object.");
+ pr_err("No SN06 return object\n");
goto out_no_enum;
}
if (device_enum->type != ACPI_TYPE_BUFFER) {
- pr_err(DRV_PFX "Invalid SN06 return object 0x%.2x\n",
- device_enum->type);
+ pr_err("Invalid SN06 return object 0x%.2x\n",
+ device_enum->type);
goto out_no_enum;
}
@@ -1662,7 +1659,7 @@ static void sony_nc_backlight_setup(void)
ops, &props);
if (IS_ERR(sony_bl_props.dev)) {
- pr_warn(DRV_PFX "unable to register backlight device\n");
+ pr_warn("unable to register backlight device\n");
sony_bl_props.dev = NULL;
} else
sony_bl_props.dev->props.brightness =
@@ -1682,8 +1679,7 @@ static int sony_nc_add(struct acpi_device *device)
acpi_handle handle;
struct sony_nc_value *item;
- pr_info(DRV_PFX "%s v%s.\n", SONY_NC_DRIVER_NAME,
- SONY_LAPTOP_DRIVER_VERSION);
+ pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
sony_nc_acpi_device = device;
strcpy(acpi_device_class(device), "sony/hotkey");
@@ -1708,7 +1704,7 @@ static int sony_nc_add(struct acpi_device *device)
sony_nc_acpi_handle, 1, sony_walk_callback,
NULL, NULL, NULL);
if (ACPI_FAILURE(status)) {
- pr_warn(DRV_PFX "unable to walk acpi resources\n");
+ pr_warn("unable to walk acpi resources\n");
result = -ENODEV;
goto outpresent;
}
@@ -1736,13 +1732,12 @@ static int sony_nc_add(struct acpi_device *device)
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
- pr_err(DRV_PFX "Unable to create input devices.\n");
+ pr_err("Unable to create input devices\n");
goto outkbdbacklight;
}
if (acpi_video_backlight_support()) {
- pr_info(DRV_PFX "brightness ignored, must be "
- "controlled by ACPI video driver\n");
+ pr_info("brightness ignored, must be controlled by ACPI video driver\n");
} else {
sony_nc_backlight_setup();
}
@@ -2265,9 +2260,9 @@ out:
if (pcidev)
pci_dev_put(pcidev);
- pr_info(DRV_PFX "detected Type%d model\n",
- dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
- dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
+ pr_info("detected Type%d model\n",
+ dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
+ dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
}
/* camera tests and poweron/poweroff */
@@ -2313,7 +2308,7 @@ static int __sony_pic_camera_ready(void)
static int __sony_pic_camera_off(void)
{
if (!camera) {
- pr_warn(DRV_PFX "camera control not enabled\n");
+ pr_warn("camera control not enabled\n");
return -ENODEV;
}
@@ -2333,7 +2328,7 @@ static int __sony_pic_camera_on(void)
int i, j, x;
if (!camera) {
- pr_warn(DRV_PFX "camera control not enabled\n");
+ pr_warn("camera control not enabled\n");
return -ENODEV;
}
@@ -2356,7 +2351,7 @@ static int __sony_pic_camera_on(void)
}
if (j == 0) {
- pr_warn(DRV_PFX "failed to power on camera\n");
+ pr_warn("failed to power on camera\n");
return -ENODEV;
}
@@ -2412,8 +2407,7 @@ int sony_pic_camera_command(int command, u8 value)
ITERATIONS_SHORT);
break;
default:
- pr_err(DRV_PFX "sony_pic_camera_command invalid: %d\n",
- command);
+ pr_err("sony_pic_camera_command invalid: %d\n", command);
break;
}
mutex_unlock(&spic_dev.lock);
@@ -2819,7 +2813,7 @@ static int sonypi_compat_init(void)
error =
kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
- pr_err(DRV_PFX "kfifo_alloc failed\n");
+ pr_err("kfifo_alloc failed\n");
return error;
}
@@ -2829,12 +2823,12 @@ static int sonypi_compat_init(void)
sonypi_misc_device.minor = minor;
error = misc_register(&sonypi_misc_device);
if (error) {
- pr_err(DRV_PFX "misc_register failed\n");
+ pr_err("misc_register failed\n");
goto err_free_kfifo;
}
if (minor == -1)
- pr_info(DRV_PFX "device allocated minor is %d\n",
- sonypi_misc_device.minor);
+ pr_info("device allocated minor is %d\n",
+ sonypi_misc_device.minor);
return 0;
@@ -2893,8 +2887,8 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
}
for (i = 0; i < p->interrupt_count; i++) {
if (!p->interrupts[i]) {
- pr_warn(DRV_PFX "Invalid IRQ %d\n",
- p->interrupts[i]);
+ pr_warn("Invalid IRQ %d\n",
+ p->interrupts[i]);
continue;
}
interrupt = kzalloc(sizeof(*interrupt),
@@ -2932,14 +2926,14 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
ioport->io2.address_length);
}
else {
- pr_err(DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n");
+ pr_err("Unknown SPIC Type, more than 2 IO Ports\n");
return AE_ERROR;
}
return AE_OK;
}
default:
dprintk("Resource %d isn't an IRQ nor an IO port\n",
- resource->type);
+ resource->type);
case ACPI_RESOURCE_TYPE_END_TAG:
return AE_OK;
@@ -2960,7 +2954,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
dprintk("Evaluating _STA\n");
result = acpi_bus_get_status(device);
if (result) {
- pr_warn(DRV_PFX "Unable to read status\n");
+ pr_warn("Unable to read status\n");
goto end;
}
@@ -2976,8 +2970,7 @@ static int sony_pic_possible_resources(struct acpi_device *device)
status = acpi_walk_resources(device->handle, METHOD_NAME__PRS,
sony_pic_read_possible_resource, &spic_dev);
if (ACPI_FAILURE(status)) {
- pr_warn(DRV_PFX "Failure evaluating %s\n",
- METHOD_NAME__PRS);
+ pr_warn("Failure evaluating %s\n", METHOD_NAME__PRS);
result = -ENODEV;
}
end:
@@ -3090,7 +3083,7 @@ static int sony_pic_enable(struct acpi_device *device,
/* check for total failure */
if (ACPI_FAILURE(status)) {
- pr_err(DRV_PFX "Error evaluating _SRS\n");
+ pr_err("Error evaluating _SRS\n");
result = -ENODEV;
goto end;
}
@@ -3182,7 +3175,7 @@ static int sony_pic_remove(struct acpi_device *device, int type)
struct sony_pic_irq *irq, *tmp_irq;
if (sony_pic_disable(device)) {
- pr_err(DRV_PFX "Couldn't disable device.\n");
+ pr_err("Couldn't disable device\n");
return -ENXIO;
}
@@ -3222,8 +3215,7 @@ static int sony_pic_add(struct acpi_device *device)
struct sony_pic_ioport *io, *tmp_io;
struct sony_pic_irq *irq, *tmp_irq;
- pr_info(DRV_PFX "%s v%s.\n", SONY_PIC_DRIVER_NAME,
- SONY_LAPTOP_DRIVER_VERSION);
+ pr_info("%s v%s\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
spic_dev.acpi_dev = device;
strcpy(acpi_device_class(device), "sony/hotkey");
@@ -3233,14 +3225,14 @@ static int sony_pic_add(struct acpi_device *device)
/* read _PRS resources */
result = sony_pic_possible_resources(device);
if (result) {
- pr_err(DRV_PFX "Unable to read possible resources.\n");
+ pr_err("Unable to read possible resources\n");
goto err_free_resources;
}
/* setup input devices and helper fifo */
result = sony_laptop_setup_input(device);
if (result) {
- pr_err(DRV_PFX "Unable to create input devices.\n");
+ pr_err("Unable to create input devices\n");
goto err_free_resources;
}
@@ -3281,7 +3273,7 @@ static int sony_pic_add(struct acpi_device *device)
}
}
if (!spic_dev.cur_ioport) {
- pr_err(DRV_PFX "Failed to request_region.\n");
+ pr_err("Failed to request_region\n");
result = -ENODEV;
goto err_remove_compat;
}
@@ -3301,7 +3293,7 @@ static int sony_pic_add(struct acpi_device *device)
}
}
if (!spic_dev.cur_irq) {
- pr_err(DRV_PFX "Failed to request_irq.\n");
+ pr_err("Failed to request_irq\n");
result = -ENODEV;
goto err_release_region;
}
@@ -3309,7 +3301,7 @@ static int sony_pic_add(struct acpi_device *device)
/* set resource status _SRS */
result = sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
if (result) {
- pr_err(DRV_PFX "Couldn't enable device.\n");
+ pr_err("Couldn't enable device\n");
goto err_free_irq;
}
@@ -3418,7 +3410,7 @@ static int __init sony_laptop_init(void)
if (!no_spic && dmi_check_system(sonypi_dmi_table)) {
result = acpi_bus_register_driver(&sony_pic_driver);
if (result) {
- pr_err(DRV_PFX "Unable to register SPIC driver.");
+ pr_err("Unable to register SPIC driver\n");
goto out;
}
spic_drv_registered = 1;
@@ -3426,7 +3418,7 @@ static int __init sony_laptop_init(void)
result = acpi_bus_register_driver(&sony_nc_driver);
if (result) {
- pr_err(DRV_PFX "Unable to register SNC driver.");
+ pr_err("Unable to register SNC driver\n");
goto out_unregister_pic;
}
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 865ef78d6f1a..e24f5ae475af 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -25,6 +25,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -40,9 +42,6 @@
#define TC1100_INSTANCE_WIRELESS 1
#define TC1100_INSTANCE_JOGDIAL 2
-#define TC1100_LOGPREFIX "tc1100-wmi: "
-#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX
-
MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
MODULE_LICENSE("GPL");
@@ -264,7 +263,7 @@ static int __init tc1100_init(void)
if (error)
goto err_device_del;
- printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n");
+ pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n");
return 0;
err_device_del:
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 562fcf0dd2b5..77f6e707a2a9 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -21,6 +21,8 @@
* 02110-1301, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define TPACPI_VERSION "0.24"
#define TPACPI_SYSFS_VERSION 0x020700
@@ -224,17 +226,6 @@ enum tpacpi_hkey_event_t {
#define TPACPI_MAX_ACPI_ARGS 3
-/* printk headers */
-#define TPACPI_LOG TPACPI_FILE ": "
-#define TPACPI_EMERG KERN_EMERG TPACPI_LOG
-#define TPACPI_ALERT KERN_ALERT TPACPI_LOG
-#define TPACPI_CRIT KERN_CRIT TPACPI_LOG
-#define TPACPI_ERR KERN_ERR TPACPI_LOG
-#define TPACPI_WARN KERN_WARNING TPACPI_LOG
-#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG
-#define TPACPI_INFO KERN_INFO TPACPI_LOG
-#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
-
/* Debugging printk groups */
#define TPACPI_DBG_ALL 0xffff
#define TPACPI_DBG_DISCLOSETASK 0x8000
@@ -389,34 +380,36 @@ static int tpacpi_uwb_emulstate;
* Debugging helpers
*/
-#define dbg_printk(a_dbg_level, format, arg...) \
- do { if (dbg_level & (a_dbg_level)) \
- printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \
- } while (0)
+#define dbg_printk(a_dbg_level, format, arg...) \
+do { \
+ if (dbg_level & (a_dbg_level)) \
+ printk(KERN_DEBUG pr_fmt("%s: " format), \
+ __func__, ##arg); \
+} while (0)
#ifdef CONFIG_THINKPAD_ACPI_DEBUG
#define vdbg_printk dbg_printk
static const char *str_supported(int is_supported);
#else
-#define vdbg_printk(a_dbg_level, format, arg...) \
- do { } while (0)
+static inline const char *str_supported(int is_supported) { return ""; }
+#define vdbg_printk(a_dbg_level, format, arg...) \
+ no_printk(format, ##arg)
#endif
static void tpacpi_log_usertask(const char * const what)
{
- printk(TPACPI_DEBUG "%s: access by process with PID %d\n",
- what, task_tgid_vnr(current));
+ printk(KERN_DEBUG pr_fmt("%s: access by process with PID %d\n"),
+ what, task_tgid_vnr(current));
}
-#define tpacpi_disclose_usertask(what, format, arg...) \
- do { \
- if (unlikely( \
- (dbg_level & TPACPI_DBG_DISCLOSETASK) && \
- (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \
- printk(TPACPI_DEBUG "%s: PID %d: " format, \
- what, task_tgid_vnr(current), ## arg); \
- } \
- } while (0)
+#define tpacpi_disclose_usertask(what, format, arg...) \
+do { \
+ if (unlikely((dbg_level & TPACPI_DBG_DISCLOSETASK) && \
+ (tpacpi_lifecycle == TPACPI_LIFE_RUNNING))) { \
+ printk(KERN_DEBUG pr_fmt("%s: PID %d: " format), \
+ what, task_tgid_vnr(current), ## arg); \
+ } \
+} while (0)
/*
* Quirk handling helpers
@@ -535,15 +528,6 @@ TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
"HKEY", /* all others */
); /* 570 */
-TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
- "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
- "\\_SB.PCI0.VID0", /* 770e */
- "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
- "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
- "\\_SB.PCI0.AGP.VID", /* all others */
- ); /* R30, R31 */
-
-
/*************************************************************************
* ACPI helpers
*/
@@ -563,7 +547,7 @@ static int acpi_evalf(acpi_handle handle,
int quiet;
if (!*fmt) {
- printk(TPACPI_ERR "acpi_evalf() called with empty format\n");
+ pr_err("acpi_evalf() called with empty format\n");
return 0;
}
@@ -588,7 +572,7 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(TPACPI_ERR "acpi_evalf() called "
+ pr_err("acpi_evalf() called "
"with invalid format character '%c'\n", c);
va_end(ap);
return 0;
@@ -617,13 +601,13 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(TPACPI_ERR "acpi_evalf() called "
+ pr_err("acpi_evalf() called "
"with invalid format character '%c'\n", res_type);
return 0;
}
if (!success && !quiet)
- printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %s\n",
+ pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
method, fmt0, acpi_format_exception(status));
return success;
@@ -767,8 +751,7 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device);
if (rc < 0) {
- printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n",
- ibm->name, rc);
+ pr_err("acpi_bus_get_device(%s) failed: %d\n", ibm->name, rc);
return -ENODEV;
}
@@ -781,12 +764,10 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
ibm->acpi->type, dispatch_acpi_notify, ibm);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
- printk(TPACPI_NOTICE
- "another device driver is already "
- "handling %s events\n", ibm->name);
+ pr_notice("another device driver is already "
+ "handling %s events\n", ibm->name);
} else {
- printk(TPACPI_ERR
- "acpi_install_notify_handler(%s) failed: %s\n",
+ pr_err("acpi_install_notify_handler(%s) failed: %s\n",
ibm->name, acpi_format_exception(status));
}
return -ENODEV;
@@ -811,8 +792,7 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
if (!ibm->acpi->driver) {
- printk(TPACPI_ERR
- "failed to allocate memory for ibm->acpi->driver\n");
+ pr_err("failed to allocate memory for ibm->acpi->driver\n");
return -ENOMEM;
}
@@ -823,7 +803,7 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
rc = acpi_bus_register_driver(ibm->acpi->driver);
if (rc < 0) {
- printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n",
+ pr_err("acpi_bus_register_driver(%s) failed: %d\n",
ibm->name, rc);
kfree(ibm->acpi->driver);
ibm->acpi->driver = NULL;
@@ -1081,15 +1061,14 @@ static int parse_strtoul(const char *buf,
static void tpacpi_disable_brightness_delay(void)
{
if (acpi_evalf(hkey_handle, NULL, "PWMS", "qvd", 0))
- printk(TPACPI_NOTICE
- "ACPI backlight control delay disabled\n");
+ pr_notice("ACPI backlight control delay disabled\n");
}
static void printk_deprecated_attribute(const char * const what,
const char * const details)
{
tpacpi_log_usertask("deprecated sysfs attribute");
- printk(TPACPI_WARN "WARNING: sysfs attribute %s is deprecated and "
+ pr_warn("WARNING: sysfs attribute %s is deprecated and "
"will be removed. %s\n",
what, details);
}
@@ -1264,8 +1243,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
&tpacpi_rfk_rfkill_ops,
atp_rfk);
if (!atp_rfk || !atp_rfk->rfkill) {
- printk(TPACPI_ERR
- "failed to allocate memory for rfkill class\n");
+ pr_err("failed to allocate memory for rfkill class\n");
kfree(atp_rfk);
return -ENOMEM;
}
@@ -1275,9 +1253,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
sw_status = (tp_rfkops->get_status)();
if (sw_status < 0) {
- printk(TPACPI_ERR
- "failed to read initial state for %s, error %d\n",
- name, sw_status);
+ pr_err("failed to read initial state for %s, error %d\n",
+ name, sw_status);
} else {
sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
if (set_default) {
@@ -1291,9 +1268,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
res = rfkill_register(atp_rfk->rfkill);
if (res < 0) {
- printk(TPACPI_ERR
- "failed to register %s rfkill switch: %d\n",
- name, res);
+ pr_err("failed to register %s rfkill switch: %d\n", name, res);
rfkill_destroy(atp_rfk->rfkill);
kfree(atp_rfk);
return res;
@@ -1301,7 +1276,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
tpacpi_rfkill_switches[id] = atp_rfk;
- printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n",
+ pr_info("rfkill switch %s: radio is %sblocked\n",
name, (sw_state || hw_state) ? "" : "un");
return 0;
}
@@ -1825,10 +1800,8 @@ static void __init tpacpi_check_outdated_fw(void)
* broken, or really stable to begin with, so it is
* best if the user upgrades the firmware anyway.
*/
- printk(TPACPI_WARN
- "WARNING: Outdated ThinkPad BIOS/EC firmware\n");
- printk(TPACPI_WARN
- "WARNING: This firmware may be missing critical bug "
+ pr_warn("WARNING: Outdated ThinkPad BIOS/EC firmware\n");
+ pr_warn("WARNING: This firmware may be missing critical bug "
"fixes and/or important features\n");
}
}
@@ -2117,9 +2090,7 @@ void static hotkey_mask_warn_incomplete_mask(void)
(hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
if (wantedmask)
- printk(TPACPI_NOTICE
- "required events 0x%08x not enabled!\n",
- wantedmask);
+ pr_notice("required events 0x%08x not enabled!\n", wantedmask);
}
/*
@@ -2157,10 +2128,9 @@ static int hotkey_mask_set(u32 mask)
* a given event.
*/
if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
- printk(TPACPI_NOTICE
- "asked for hotkey mask 0x%08x, but "
- "firmware forced it to 0x%08x\n",
- fwmask, hotkey_acpi_mask);
+ pr_notice("asked for hotkey mask 0x%08x, but "
+ "firmware forced it to 0x%08x\n",
+ fwmask, hotkey_acpi_mask);
}
if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
@@ -2184,13 +2154,11 @@ static int hotkey_user_mask_set(const u32 mask)
(mask == 0xffff || mask == 0xffffff ||
mask == 0xffffffff)) {
tp_warned.hotkey_mask_ff = 1;
- printk(TPACPI_NOTICE
- "setting the hotkey mask to 0x%08x is likely "
- "not the best way to go about it\n", mask);
- printk(TPACPI_NOTICE
- "please consider using the driver defaults, "
- "and refer to up-to-date thinkpad-acpi "
- "documentation\n");
+ pr_notice("setting the hotkey mask to 0x%08x is likely "
+ "not the best way to go about it\n", mask);
+ pr_notice("please consider using the driver defaults, "
+ "and refer to up-to-date thinkpad-acpi "
+ "documentation\n");
}
/* Try to enable what the user asked for, plus whatever we need.
@@ -2574,8 +2542,7 @@ static void hotkey_poll_setup(const bool may_warn)
NULL, TPACPI_NVRAM_KTHREAD_NAME);
if (IS_ERR(tpacpi_hotkey_task)) {
tpacpi_hotkey_task = NULL;
- printk(TPACPI_ERR
- "could not create kernel thread "
+ pr_err("could not create kernel thread "
"for hotkey polling\n");
}
}
@@ -2583,11 +2550,10 @@ static void hotkey_poll_setup(const bool may_warn)
hotkey_poll_stop_sync();
if (may_warn && (poll_driver_mask || poll_user_mask) &&
hotkey_poll_freq == 0) {
- printk(TPACPI_NOTICE
- "hot keys 0x%08x and/or events 0x%08x "
- "require polling, which is currently "
- "disabled\n",
- poll_user_mask, poll_driver_mask);
+ pr_notice("hot keys 0x%08x and/or events 0x%08x "
+ "require polling, which is currently "
+ "disabled\n",
+ poll_user_mask, poll_driver_mask);
}
}
}
@@ -2811,13 +2777,13 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
mutex_unlock(&hotkey_mutex);
if (rc < 0)
- printk(TPACPI_ERR "hotkey_source_mask: failed to update the"
- "firmware event mask!\n");
+ pr_err("hotkey_source_mask: "
+ "failed to update the firmware event mask!\n");
if (r_ev)
- printk(TPACPI_NOTICE "hotkey_source_mask: "
- "some important events were disabled: "
- "0x%04x\n", r_ev);
+ pr_notice("hotkey_source_mask: "
+ "some important events were disabled: 0x%04x\n",
+ r_ev);
tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
@@ -3048,8 +3014,7 @@ static void hotkey_exit(void)
if (((tp_features.hotkey_mask &&
hotkey_mask_set(hotkey_orig_mask)) |
hotkey_status_set(false)) != 0)
- printk(TPACPI_ERR
- "failed to restore hot key mask "
+ pr_err("failed to restore hot key mask "
"to BIOS defaults\n");
}
@@ -3288,10 +3253,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
if ((hkeyv >> 8) != 1) {
- printk(TPACPI_ERR "unknown version of the "
- "HKEY interface: 0x%x\n", hkeyv);
- printk(TPACPI_ERR "please report this to %s\n",
- TPACPI_MAIL);
+ pr_err("unknown version of the HKEY interface: 0x%x\n",
+ hkeyv);
+ pr_err("please report this to %s\n", TPACPI_MAIL);
} else {
/*
* MHKV 0x100 in A31, R40, R40e,
@@ -3304,8 +3268,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Paranoia check AND init hotkey_all_mask */
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
"MHKA", "qd")) {
- printk(TPACPI_ERR
- "missing MHKA handler, "
+ pr_err("missing MHKA handler, "
"please report this to %s\n",
TPACPI_MAIL);
/* Fallback: pre-init for FN+F3,F4,F12 */
@@ -3343,16 +3306,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (dbg_wlswemul) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!tpacpi_wlsw_emulstate;
- printk(TPACPI_INFO
- "radio switch emulation enabled\n");
+ pr_info("radio switch emulation enabled\n");
} else
#endif
/* Not all thinkpads have a hardware radio switch */
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
radiosw_state = !!status;
- printk(TPACPI_INFO
- "radio switch found; radios are %s\n",
+ pr_info("radio switch found; radios are %s\n",
enabled(status, 0));
}
if (tp_features.hotkey_wlsw)
@@ -3363,8 +3324,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
tp_features.hotkey_tablet = 1;
tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
- printk(TPACPI_INFO
- "possible tablet mode switch found; "
+ pr_info("possible tablet mode switch found; "
"ThinkPad in %s mode\n",
(tabletsw_state) ? "tablet" : "laptop");
res = add_to_attr_set(hotkey_dev_attributes,
@@ -3382,8 +3342,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
GFP_KERNEL);
if (!hotkey_keycode_map) {
- printk(TPACPI_ERR
- "failed to allocate memory for key map\n");
+ pr_err("failed to allocate memory for key map\n");
res = -ENOMEM;
goto err_exit;
}
@@ -3426,13 +3385,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
* userspace. tpacpi_detect_brightness_capabilities() must have
* been called before this point */
if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
- printk(TPACPI_INFO
- "This ThinkPad has standard ACPI backlight "
- "brightness control, supported by the ACPI "
- "video driver\n");
- printk(TPACPI_NOTICE
- "Disabling thinkpad-acpi brightness events "
- "by default...\n");
+ pr_info("This ThinkPad has standard ACPI backlight "
+ "brightness control, supported by the ACPI "
+ "video driver\n");
+ pr_notice("Disabling thinkpad-acpi brightness events "
+ "by default...\n");
/* Disable brightness up/down on Lenovo thinkpads when
* ACPI is handling them, otherwise it is plain impossible
@@ -3539,8 +3496,7 @@ static bool hotkey_notify_wakeup(const u32 hkey,
case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
- printk(TPACPI_ALERT
- "EMERGENCY WAKEUP: battery almost empty\n");
+ pr_alert("EMERGENCY WAKEUP: battery almost empty\n");
/* how to auto-heal: */
/* 2313: woke up from S3, go to S4/S5 */
/* 2413: woke up from S4, go to S5 */
@@ -3551,9 +3507,7 @@ static bool hotkey_notify_wakeup(const u32 hkey,
}
if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
- printk(TPACPI_INFO
- "woke up due to a hot-unplug "
- "request...\n");
+ pr_info("woke up due to a hot-unplug request...\n");
hotkey_wakeup_reason_notify_change();
}
return true;
@@ -3605,37 +3559,31 @@ static bool hotkey_notify_thermal(const u32 hkey,
switch (hkey) {
case TP_HKEY_EV_THM_TABLE_CHANGED:
- printk(TPACPI_INFO
- "EC reports that Thermal Table has changed\n");
+ pr_info("EC reports that Thermal Table has changed\n");
/* recommended action: do nothing, we don't have
* Lenovo ATM information */
return true;
case TP_HKEY_EV_ALARM_BAT_HOT:
- printk(TPACPI_CRIT
- "THERMAL ALARM: battery is too hot!\n");
+ pr_crit("THERMAL ALARM: battery is too hot!\n");
/* recommended action: warn user through gui */
break;
case TP_HKEY_EV_ALARM_BAT_XHOT:
- printk(TPACPI_ALERT
- "THERMAL EMERGENCY: battery is extremely hot!\n");
+ pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
- printk(TPACPI_CRIT
- "THERMAL ALARM: "
+ pr_crit("THERMAL ALARM: "
"a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
/* some internal component is too hot */
break;
case TP_HKEY_EV_ALARM_SENSOR_XHOT:
- printk(TPACPI_ALERT
- "THERMAL EMERGENCY: "
- "a sensor reports something is extremely hot!\n");
+ pr_alert("THERMAL EMERGENCY: "
+ "a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
default:
- printk(TPACPI_ALERT
- "THERMAL ALERT: unknown thermal alarm received\n");
+ pr_alert("THERMAL ALERT: unknown thermal alarm received\n");
known = false;
}
@@ -3652,8 +3600,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
bool known_ev;
if (event != 0x80) {
- printk(TPACPI_ERR
- "unknown HKEY notification event %d\n", event);
+ pr_err("unknown HKEY notification event %d\n", event);
/* forward it to userspace, maybe it knows how to handle it */
acpi_bus_generate_netlink_event(
ibm->acpi->device->pnp.device_class,
@@ -3664,7 +3611,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
while (1) {
if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
- printk(TPACPI_ERR "failed to retrieve HKEY event\n");
+ pr_err("failed to retrieve HKEY event\n");
return;
}
@@ -3692,8 +3639,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
switch (hkey) {
case TP_HKEY_EV_BAYEJ_ACK:
hotkey_autosleep_ack = 1;
- printk(TPACPI_INFO
- "bay ejected\n");
+ pr_info("bay ejected\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
break;
@@ -3709,8 +3655,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
/* 0x4000-0x4FFF: dock-related wakeups */
if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
hotkey_autosleep_ack = 1;
- printk(TPACPI_INFO
- "undocked\n");
+ pr_info("undocked\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
} else {
@@ -3741,11 +3686,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
known_ev = false;
}
if (!known_ev) {
- printk(TPACPI_NOTICE
- "unhandled HKEY event 0x%04x\n", hkey);
- printk(TPACPI_NOTICE
- "please report the conditions when this "
- "event happened to %s\n", TPACPI_MAIL);
+ pr_notice("unhandled HKEY event 0x%04x\n", hkey);
+ pr_notice("please report the conditions when this "
+ "event happened to %s\n", TPACPI_MAIL);
}
/* Legacy events */
@@ -3778,8 +3721,7 @@ static void hotkey_resume(void)
if (hotkey_status_set(true) < 0 ||
hotkey_mask_set(hotkey_acpi_mask) < 0)
- printk(TPACPI_ERR
- "error while attempting to reset the event "
+ pr_err("error while attempting to reset the event "
"firmware interface\n");
tpacpi_send_radiosw_update();
@@ -3824,14 +3766,12 @@ static void hotkey_enabledisable_warn(bool enable)
{
tpacpi_log_usertask("procfs hotkey enable/disable");
if (!WARN((tpacpi_lifecycle == TPACPI_LIFE_RUNNING || !enable),
- TPACPI_WARN
- "hotkey enable/disable functionality has been "
- "removed from the driver. Hotkeys are always "
- "enabled\n"))
- printk(TPACPI_ERR
- "Please remove the hotkey=enable module "
- "parameter, it is deprecated. Hotkeys are always "
- "enabled\n");
+ pr_fmt("hotkey enable/disable functionality has been "
+ "removed from the driver. "
+ "Hotkeys are always enabled.\n")))
+ pr_err("Please remove the hotkey=enable module "
+ "parameter, it is deprecated. "
+ "Hotkeys are always enabled.\n");
}
static int hotkey_write(char *buf)
@@ -4011,8 +3951,7 @@ static void bluetooth_shutdown(void)
/* Order firmware to save current state to NVRAM */
if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
TP_ACPI_BLTH_SAVE_STATE))
- printk(TPACPI_NOTICE
- "failed to save bluetooth state to NVRAM\n");
+ pr_notice("failed to save bluetooth state to NVRAM\n");
else
vdbg_printk(TPACPI_DBG_RFKILL,
"bluestooth state saved to NVRAM\n");
@@ -4051,8 +3990,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_bluetoothemul) {
tp_features.bluetooth = 1;
- printk(TPACPI_INFO
- "bluetooth switch emulation enabled\n");
+ pr_info("bluetooth switch emulation enabled\n");
} else
#endif
if (tp_features.bluetooth &&
@@ -4203,8 +4141,7 @@ static void wan_shutdown(void)
/* Order firmware to save current state to NVRAM */
if (!acpi_evalf(NULL, NULL, "\\WGSV", "vd",
TP_ACPI_WGSV_SAVE_STATE))
- printk(TPACPI_NOTICE
- "failed to save WWAN state to NVRAM\n");
+ pr_notice("failed to save WWAN state to NVRAM\n");
else
vdbg_printk(TPACPI_DBG_RFKILL,
"WWAN state saved to NVRAM\n");
@@ -4241,8 +4178,7 @@ static int __init wan_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wwanemul) {
tp_features.wan = 1;
- printk(TPACPI_INFO
- "wwan switch emulation enabled\n");
+ pr_info("wwan switch emulation enabled\n");
} else
#endif
if (tp_features.wan &&
@@ -4382,8 +4318,7 @@ static int __init uwb_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_uwbemul) {
tp_features.uwb = 1;
- printk(TPACPI_INFO
- "uwb switch emulation enabled\n");
+ pr_info("uwb switch emulation enabled\n");
} else
#endif
if (tp_features.uwb &&
@@ -4444,6 +4379,15 @@ static int video_orig_autosw;
static int video_autosw_get(void);
static int video_autosw_set(int enable);
+TPACPI_HANDLE(vid, root,
+ "\\_SB.PCI.AGP.VGA", /* 570 */
+ "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
+ "\\_SB.PCI0.VID0", /* 770e */
+ "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
+ "\\_SB.PCI0.AGP.VGA", /* X100e and a few others */
+ "\\_SB.PCI0.AGP.VID", /* all others */
+ ); /* R30, R31 */
+
TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
static int __init video_init(struct ibm_init_struct *iibm)
@@ -4487,7 +4431,7 @@ static void video_exit(void)
dbg_printk(TPACPI_DBG_EXIT,
"restoring original video autoswitch mode\n");
if (video_autosw_set(video_orig_autosw))
- printk(TPACPI_ERR "error while trying to restore original "
+ pr_err("error while trying to restore original "
"video autoswitch mode\n");
}
@@ -4560,8 +4504,7 @@ static int video_outputsw_set(int status)
res = acpi_evalf(vid_handle, NULL,
"ASWT", "vdd", status * 0x100, 0);
if (!autosw && video_autosw_set(autosw)) {
- printk(TPACPI_ERR
- "video auto-switch left enabled due to error\n");
+ pr_err("video auto-switch left enabled due to error\n");
return -EIO;
}
break;
@@ -4630,8 +4573,7 @@ static int video_outputsw_cycle(void)
return -ENOSYS;
}
if (!autosw && video_autosw_set(autosw)) {
- printk(TPACPI_ERR
- "video auto-switch left enabled due to error\n");
+ pr_err("video auto-switch left enabled due to error\n");
return -EIO;
}
@@ -5348,7 +5290,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
GFP_KERNEL);
if (!tpacpi_leds) {
- printk(TPACPI_ERR "Out of memory for LED data\n");
+ pr_err("Out of memory for LED data\n");
return -ENOMEM;
}
@@ -5367,9 +5309,8 @@ static int __init led_init(struct ibm_init_struct *iibm)
}
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
- printk(TPACPI_NOTICE
- "warning: userspace override of important "
- "firmware LEDs is enabled\n");
+ pr_notice("warning: userspace override of important "
+ "firmware LEDs is enabled\n");
#endif
return 0;
}
@@ -5639,17 +5580,16 @@ static void thermal_dump_all_sensors(void)
if (n <= 0)
return;
- printk(TPACPI_NOTICE
- "temperatures (Celsius):");
+ pr_notice("temperatures (Celsius):");
for (i = 0; i < n; i++) {
if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
- printk(KERN_CONT " %d", (int)(t.temp[i] / 1000));
+ pr_cont(" %d", (int)(t.temp[i] / 1000));
else
- printk(KERN_CONT " N/A");
+ pr_cont(" N/A");
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
/* sysfs temp##_input -------------------------------------------------- */
@@ -5769,14 +5709,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
if (ta1 == 0) {
/* This is sheer paranoia, but we handle it anyway */
if (acpi_tmp7) {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"falling back to ACPI TMPx access "
"mode\n");
thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
} else {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"disabling thermal sensors access\n");
thermal_read_mode = TPACPI_THERMAL_NONE;
}
@@ -6129,8 +6067,8 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- printk(TPACPI_ERR "Unknown _BCL data, "
- "please report this to %s\n", TPACPI_MAIL);
+ pr_err("Unknown _BCL data, please report this to %s\n",
+ TPACPI_MAIL);
rc = 0;
} else {
rc = obj->package.count;
@@ -6214,18 +6152,15 @@ static void __init tpacpi_detect_brightness_capabilities(void)
switch (b) {
case 16:
bright_maxlvl = 15;
- printk(TPACPI_INFO
- "detected a 16-level brightness capable ThinkPad\n");
+ pr_info("detected a 16-level brightness capable ThinkPad\n");
break;
case 8:
case 0:
bright_maxlvl = 7;
- printk(TPACPI_INFO
- "detected a 8-level brightness capable ThinkPad\n");
+ pr_info("detected a 8-level brightness capable ThinkPad\n");
break;
default:
- printk(TPACPI_ERR
- "Unsupported brightness interface, "
+ pr_err("Unsupported brightness interface, "
"please contact %s\n", TPACPI_MAIL);
tp_features.bright_unkfw = 1;
bright_maxlvl = b - 1;
@@ -6260,22 +6195,19 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (acpi_video_backlight_support()) {
if (brightness_enable > 1) {
- printk(TPACPI_INFO
- "Standard ACPI backlight interface "
- "available, not loading native one.\n");
+ pr_info("Standard ACPI backlight interface "
+ "available, not loading native one\n");
return 1;
} else if (brightness_enable == 1) {
- printk(TPACPI_WARN
- "Cannot enable backlight brightness support, "
+ pr_warn("Cannot enable backlight brightness support, "
"ACPI is already handling it. Refer to the "
- "acpi_backlight kernel parameter\n");
+ "acpi_backlight kernel parameter.\n");
return 1;
}
} else if (tp_features.bright_acpimode && brightness_enable > 1) {
- printk(TPACPI_NOTICE
- "Standard ACPI backlight interface not "
- "available, thinkpad_acpi native "
- "brightness control enabled\n");
+ pr_notice("Standard ACPI backlight interface not "
+ "available, thinkpad_acpi native "
+ "brightness control enabled\n");
}
/*
@@ -6319,19 +6251,17 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (IS_ERR(ibm_backlight_device)) {
int rc = PTR_ERR(ibm_backlight_device);
ibm_backlight_device = NULL;
- printk(TPACPI_ERR "Could not register backlight device\n");
+ pr_err("Could not register backlight device\n");
return rc;
}
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
"brightness is supported\n");
if (quirks & TPACPI_BRGHT_Q_ASK) {
- printk(TPACPI_NOTICE
- "brightness: will use unverified default: "
- "brightness_mode=%d\n", brightness_mode);
- printk(TPACPI_NOTICE
- "brightness: please report to %s whether it works well "
- "or not on your ThinkPad\n", TPACPI_MAIL);
+ pr_notice("brightness: will use unverified default: "
+ "brightness_mode=%d\n", brightness_mode);
+ pr_notice("brightness: please report to %s whether it works well "
+ "or not on your ThinkPad\n", TPACPI_MAIL);
}
/* Added by mistake in early 2007. Probably useless, but it could
@@ -6804,8 +6734,7 @@ static int __init volume_create_alsa_mixer(void)
rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
sizeof(struct tpacpi_alsa_data), &card);
if (rc < 0 || !card) {
- printk(TPACPI_ERR
- "Failed to create ALSA card structures: %d\n", rc);
+ pr_err("Failed to create ALSA card structures: %d\n", rc);
return 1;
}
@@ -6839,9 +6768,8 @@ static int __init volume_create_alsa_mixer(void)
ctl_vol = snd_ctl_new1(&volume_alsa_control_vol, NULL);
rc = snd_ctl_add(card, ctl_vol);
if (rc < 0) {
- printk(TPACPI_ERR
- "Failed to create ALSA volume control: %d\n",
- rc);
+ pr_err("Failed to create ALSA volume control: %d\n",
+ rc);
goto err_exit;
}
data->ctl_vol_id = &ctl_vol->id;
@@ -6850,8 +6778,7 @@ static int __init volume_create_alsa_mixer(void)
ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
rc = snd_ctl_add(card, ctl_mute);
if (rc < 0) {
- printk(TPACPI_ERR "Failed to create ALSA mute control: %d\n",
- rc);
+ pr_err("Failed to create ALSA mute control: %d\n", rc);
goto err_exit;
}
data->ctl_mute_id = &ctl_mute->id;
@@ -6859,7 +6786,7 @@ static int __init volume_create_alsa_mixer(void)
snd_card_set_dev(card, &tpacpi_pdev->dev);
rc = snd_card_register(card);
if (rc < 0) {
- printk(TPACPI_ERR "Failed to register ALSA card: %d\n", rc);
+ pr_err("Failed to register ALSA card: %d\n", rc);
goto err_exit;
}
@@ -6915,9 +6842,8 @@ static int __init volume_init(struct ibm_init_struct *iibm)
return -EINVAL;
if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
- printk(TPACPI_ERR
- "UCMS step volume mode not implemented, "
- "please contact %s\n", TPACPI_MAIL);
+ pr_err("UCMS step volume mode not implemented, "
+ "please contact %s\n", TPACPI_MAIL);
return 1;
}
@@ -6981,13 +6907,11 @@ static int __init volume_init(struct ibm_init_struct *iibm)
rc = volume_create_alsa_mixer();
if (rc) {
- printk(TPACPI_ERR
- "Could not create the ALSA mixer interface\n");
+ pr_err("Could not create the ALSA mixer interface\n");
return rc;
}
- printk(TPACPI_INFO
- "Console audio control enabled, mode: %s\n",
+ pr_info("Console audio control enabled, mode: %s\n",
(volume_control_allowed) ?
"override (read/write)" :
"monitor (read only)");
@@ -7049,12 +6973,10 @@ static int volume_write(char *buf)
if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) {
if (unlikely(!tp_warned.volume_ctrl_forbidden)) {
tp_warned.volume_ctrl_forbidden = 1;
- printk(TPACPI_NOTICE
- "Console audio control in monitor mode, "
- "changes are not allowed.\n");
- printk(TPACPI_NOTICE
- "Use the volume_control=1 module parameter "
- "to enable volume control\n");
+ pr_notice("Console audio control in monitor mode, "
+ "changes are not allowed\n");
+ pr_notice("Use the volume_control=1 module parameter "
+ "to enable volume control\n");
}
return -EPERM;
}
@@ -7129,8 +7051,7 @@ static void inline volume_alsa_notify_change(void)
static int __init volume_init(struct ibm_init_struct *iibm)
{
- printk(TPACPI_INFO
- "volume: disabled as there is no ALSA support in this kernel\n");
+ pr_info("volume: disabled as there is no ALSA support in this kernel\n");
return 1;
}
@@ -7337,9 +7258,8 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
static void fan_quirk1_setup(void)
{
if (fan_control_initial_status == 0x07) {
- printk(TPACPI_NOTICE
- "fan_init: initial fan status is unknown, "
- "assuming it is in auto mode\n");
+ pr_notice("fan_init: initial fan status is unknown, "
+ "assuming it is in auto mode\n");
tp_features.fan_ctrl_status_undef = 1;
}
}
@@ -7726,8 +7646,7 @@ static void fan_watchdog_reset(void)
if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
msecs_to_jiffies(fan_watchdog_maxinterval
* 1000))) {
- printk(TPACPI_ERR
- "failed to queue the fan watchdog, "
+ pr_err("failed to queue the fan watchdog, "
"watchdog will not trigger\n");
}
} else
@@ -7741,11 +7660,11 @@ static void fan_watchdog_fire(struct work_struct *ignored)
if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
return;
- printk(TPACPI_NOTICE "fan watchdog: enabling fan\n");
+ pr_notice("fan watchdog: enabling fan\n");
rc = fan_set_enable();
if (rc < 0) {
- printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, "
- "will try again later...\n", -rc);
+ pr_err("fan watchdog: error %d while enabling fan, "
+ "will try again later...\n", -rc);
/* reschedule for later */
fan_watchdog_reset();
}
@@ -8049,8 +7968,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
"secondary fan support enabled\n");
}
} else {
- printk(TPACPI_ERR
- "ThinkPad ACPI EC access misbehaving, "
+ pr_err("ThinkPad ACPI EC access misbehaving, "
"fan status and control unavailable\n");
return 1;
}
@@ -8150,9 +8068,8 @@ static void fan_suspend(pm_message_t state)
fan_control_resume_level = 0;
rc = fan_get_status_safe(&fan_control_resume_level);
if (rc < 0)
- printk(TPACPI_NOTICE
- "failed to read fan level for later "
- "restore during resume: %d\n", rc);
+ pr_notice("failed to read fan level for later "
+ "restore during resume: %d\n", rc);
/* if it is undefined, don't attempt to restore it.
* KEEP THIS LAST */
@@ -8207,13 +8124,11 @@ static void fan_resume(void)
return;
}
if (do_set) {
- printk(TPACPI_NOTICE
- "restoring fan level to 0x%02x\n",
- fan_control_resume_level);
+ pr_notice("restoring fan level to 0x%02x\n",
+ fan_control_resume_level);
rc = fan_set_level_safe(fan_control_resume_level);
if (rc < 0)
- printk(TPACPI_NOTICE
- "failed to restore fan level: %d\n", rc);
+ pr_notice("failed to restore fan level: %d\n", rc);
}
}
@@ -8305,8 +8220,8 @@ static int fan_write_cmd_level(const char *cmd, int *rc)
*rc = fan_set_level_safe(level);
if (*rc == -ENXIO)
- printk(TPACPI_ERR "level command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("level command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan",
"set level to %d\n", level);
@@ -8321,8 +8236,8 @@ static int fan_write_cmd_enable(const char *cmd, int *rc)
*rc = fan_set_enable();
if (*rc == -ENXIO)
- printk(TPACPI_ERR "enable command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("enable command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan", "enable\n");
@@ -8336,8 +8251,8 @@ static int fan_write_cmd_disable(const char *cmd, int *rc)
*rc = fan_set_disable();
if (*rc == -ENXIO)
- printk(TPACPI_ERR "disable command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("disable command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan", "disable\n");
@@ -8356,8 +8271,8 @@ static int fan_write_cmd_speed(const char *cmd, int *rc)
*rc = fan_set_speed(speed);
if (*rc == -ENXIO)
- printk(TPACPI_ERR "speed command accepted for unsupported "
- "access mode %d", fan_control_access_mode);
+ pr_err("speed command accepted for unsupported access mode %d\n",
+ fan_control_access_mode);
else if (!*rc)
tpacpi_disclose_usertask("procfs fan",
"set speed to %d\n", speed);
@@ -8560,8 +8475,8 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
if (ibm->acpi->notify) {
ret = setup_acpi_notify(ibm);
if (ret == -ENODEV) {
- printk(TPACPI_NOTICE "disabling subdriver %s\n",
- ibm->name);
+ pr_notice("disabling subdriver %s\n",
+ ibm->name);
ret = 0;
goto err_out;
}
@@ -8583,8 +8498,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
entry = proc_create_data(ibm->name, mode, proc_dir,
&dispatch_proc_fops, ibm);
if (!entry) {
- printk(TPACPI_ERR "unable to create proc entry %s\n",
- ibm->name);
+ pr_err("unable to create proc entry %s\n", ibm->name);
ret = -ENODEV;
goto err_out;
}
@@ -8683,13 +8597,11 @@ static int __must_check __init get_thinkpad_model_data(
tp->ec_release = (ec_fw_string[4] << 8)
| ec_fw_string[5];
} else {
- printk(TPACPI_NOTICE
- "ThinkPad firmware release %s "
- "doesn't match the known patterns\n",
- ec_fw_string);
- printk(TPACPI_NOTICE
- "please report this to %s\n",
- TPACPI_MAIL);
+ pr_notice("ThinkPad firmware release %s "
+ "doesn't match the known patterns\n",
+ ec_fw_string);
+ pr_notice("please report this to %s\n",
+ TPACPI_MAIL);
}
break;
}
@@ -8733,8 +8645,7 @@ static int __init probe_for_thinkpad(void)
tpacpi_acpi_handle_locate("ec", TPACPI_ACPI_EC_HID, &ec_handle);
if (!ec_handle) {
if (is_thinkpad)
- printk(TPACPI_ERR
- "Not yet supported ThinkPad detected!\n");
+ pr_err("Not yet supported ThinkPad detected!\n");
return -ENODEV;
}
@@ -8746,10 +8657,10 @@ static int __init probe_for_thinkpad(void)
static void __init thinkpad_acpi_init_banner(void)
{
- printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
- printk(TPACPI_INFO "%s\n", TPACPI_URL);
+ pr_info("%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
+ pr_info("%s\n", TPACPI_URL);
- printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
+ pr_info("ThinkPad BIOS %s, EC %s\n",
(thinkpad_id.bios_version_str) ?
thinkpad_id.bios_version_str : "unknown",
(thinkpad_id.ec_version_str) ?
@@ -8758,7 +8669,7 @@ static void __init thinkpad_acpi_init_banner(void)
BUG_ON(!thinkpad_id.vendor);
if (thinkpad_id.model_str)
- printk(TPACPI_INFO "%s %s, model %s\n",
+ pr_info("%s %s, model %s\n",
(thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
"IBM" : ((thinkpad_id.vendor ==
PCI_VENDOR_ID_LENOVO) ?
@@ -9024,8 +8935,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = get_thinkpad_model_data(&thinkpad_id);
if (ret) {
- printk(TPACPI_ERR
- "unable to get DMI data: %d\n", ret);
+ pr_err("unable to get DMI data: %d\n", ret);
thinkpad_acpi_module_exit();
return ret;
}
@@ -9051,16 +8961,14 @@ static int __init thinkpad_acpi_module_init(void)
proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
if (!proc_dir) {
- printk(TPACPI_ERR
- "unable to create proc dir " TPACPI_PROC_DIR);
+ pr_err("unable to create proc dir " TPACPI_PROC_DIR "\n");
thinkpad_acpi_module_exit();
return -ENODEV;
}
ret = platform_driver_register(&tpacpi_pdriver);
if (ret) {
- printk(TPACPI_ERR
- "unable to register main platform driver\n");
+ pr_err("unable to register main platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9068,8 +8976,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = platform_driver_register(&tpacpi_hwmon_pdriver);
if (ret) {
- printk(TPACPI_ERR
- "unable to register hwmon platform driver\n");
+ pr_err("unable to register hwmon platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9082,8 +8989,7 @@ static int __init thinkpad_acpi_module_init(void)
&tpacpi_hwmon_pdriver.driver);
}
if (ret) {
- printk(TPACPI_ERR
- "unable to create sysfs driver attributes\n");
+ pr_err("unable to create sysfs driver attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9096,7 +9002,7 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
tpacpi_pdev = NULL;
- printk(TPACPI_ERR "unable to register platform device\n");
+ pr_err("unable to register platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9106,16 +9012,14 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
- printk(TPACPI_ERR
- "unable to register hwmon platform device\n");
+ pr_err("unable to register hwmon platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
ret = device_create_file(&tpacpi_sensors_pdev->dev,
&dev_attr_thinkpad_acpi_pdev_name);
if (ret) {
- printk(TPACPI_ERR
- "unable to create sysfs hwmon device attributes\n");
+ pr_err("unable to create sysfs hwmon device attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -9124,14 +9028,14 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_hwmon)) {
ret = PTR_ERR(tpacpi_hwmon);
tpacpi_hwmon = NULL;
- printk(TPACPI_ERR "unable to register hwmon device\n");
+ pr_err("unable to register hwmon device\n");
thinkpad_acpi_module_exit();
return ret;
}
mutex_init(&tpacpi_inputdev_send_mutex);
tpacpi_inputdev = input_allocate_device();
if (!tpacpi_inputdev) {
- printk(TPACPI_ERR "unable to allocate input device\n");
+ pr_err("unable to allocate input device\n");
thinkpad_acpi_module_exit();
return -ENOMEM;
} else {
@@ -9163,7 +9067,7 @@ static int __init thinkpad_acpi_module_init(void)
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
- printk(TPACPI_ERR "unable to register input device\n");
+ pr_err("unable to register input device\n");
thinkpad_acpi_module_exit();
return ret;
} else {
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 1d07d6d09f27..4c20447ddbb7 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -194,7 +194,7 @@ static int __init topstar_laptop_init(void)
if (ret < 0)
return ret;
- printk(KERN_INFO "Topstar Laptop ACPI extras driver loaded\n");
+ pr_info("ACPI extras driver loaded\n");
return 0;
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 63f42a22e102..cb009b2629ee 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -35,6 +35,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define TOSHIBA_ACPI_VERSION "0.19"
#define PROC_INTERFACE_VERSION 1
@@ -60,11 +62,6 @@ MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
-#define MY_LOGPREFIX "toshiba_acpi: "
-#define MY_ERR KERN_ERR MY_LOGPREFIX
-#define MY_NOTICE KERN_NOTICE MY_LOGPREFIX
-#define MY_INFO KERN_INFO MY_LOGPREFIX
-
/* Toshiba ACPI method paths */
#define METHOD_LCD_BRIGHTNESS "\\_SB_.PCI0.VGA_.LCD_._BCM"
#define TOSH_INTERFACE_1 "\\_SB_.VALD"
@@ -301,7 +298,7 @@ static int toshiba_illumination_available(void)
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return 0;
}
in[0] = 0xf400;
@@ -320,7 +317,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return;
}
@@ -331,7 +328,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[2] = 1;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed\n");
return;
}
} else {
@@ -341,7 +338,7 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
in[2] = 0;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed.\n");
return;
}
}
@@ -364,7 +361,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
in[0] = 0xf100;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Illumination device not available\n");
+ pr_info("Illumination device not available\n");
return LED_OFF;
}
@@ -373,7 +370,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
in[1] = 0x14e;
status = hci_raw(in, out);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "ACPI call for illumination failed.\n");
+ pr_info("ACPI call for illumination failed.\n");
return LED_OFF;
}
@@ -517,7 +514,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
seq_printf(m, "brightness_levels: %d\n",
HCI_LCD_BRIGHTNESS_LEVELS);
} else {
- printk(MY_ERR "Error reading LCD brightness\n");
+ pr_err("Error reading LCD brightness\n");
}
return 0;
@@ -592,7 +589,7 @@ static int video_proc_show(struct seq_file *m, void *v)
seq_printf(m, "crt_out: %d\n", is_crt);
seq_printf(m, "tv_out: %d\n", is_tv);
} else {
- printk(MY_ERR "Error reading video out status\n");
+ pr_err("Error reading video out status\n");
}
return 0;
@@ -686,7 +683,7 @@ static int fan_proc_show(struct seq_file *m, void *v)
seq_printf(m, "running: %d\n", (value > 0));
seq_printf(m, "force_on: %d\n", force_fan);
} else {
- printk(MY_ERR "Error reading fan status\n");
+ pr_err("Error reading fan status\n");
}
return 0;
@@ -750,9 +747,9 @@ static int keys_proc_show(struct seq_file *m, void *v)
* some machines where system events sporadically
* become disabled. */
hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
- printk(MY_NOTICE "Re-enabled hotkeys\n");
+ pr_notice("Re-enabled hotkeys\n");
} else {
- printk(MY_ERR "Error reading hotkey status\n");
+ pr_err("Error reading hotkey status\n");
goto end;
}
}
@@ -863,7 +860,7 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
if (!sparse_keymap_report_event(toshiba_acpi.hotkey_dev,
value, 1, true)) {
- printk(MY_INFO "Unknown key %x\n",
+ pr_info("Unknown key %x\n",
value);
}
} else if (hci_result == HCI_NOT_SUPPORTED) {
@@ -871,7 +868,7 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
* some machines where system events sporadically
* become disabled. */
hci_write1(HCI_SYSTEM_EVENT, 1, &hci_result);
- printk(MY_NOTICE "Re-enabled hotkeys\n");
+ pr_notice("Re-enabled hotkeys\n");
}
} while (hci_result != HCI_EMPTY);
}
@@ -883,13 +880,13 @@ static int __init toshiba_acpi_setup_keyboard(char *device)
status = acpi_get_handle(NULL, device, &toshiba_acpi.handle);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to get notification device\n");
+ pr_info("Unable to get notification device\n");
return -ENODEV;
}
toshiba_acpi.hotkey_dev = input_allocate_device();
if (!toshiba_acpi.hotkey_dev) {
- printk(MY_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
return -ENOMEM;
}
@@ -905,21 +902,21 @@ static int __init toshiba_acpi_setup_keyboard(char *device)
status = acpi_install_notify_handler(toshiba_acpi.handle,
ACPI_DEVICE_NOTIFY, toshiba_acpi_notify, NULL);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to install hotkey notification\n");
+ pr_info("Unable to install hotkey notification\n");
error = -ENODEV;
goto err_free_keymap;
}
status = acpi_evaluate_object(toshiba_acpi.handle, "ENAB", NULL, NULL);
if (ACPI_FAILURE(status)) {
- printk(MY_INFO "Unable to enable hotkeys\n");
+ pr_info("Unable to enable hotkeys\n");
error = -ENODEV;
goto err_remove_notify;
}
error = input_register_device(toshiba_acpi.hotkey_dev);
if (error) {
- printk(MY_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
goto err_remove_notify;
}
@@ -980,17 +977,17 @@ static int __init toshiba_acpi_init(void)
if (is_valid_acpi_path(TOSH_INTERFACE_1 GHCI_METHOD)) {
method_hci = TOSH_INTERFACE_1 GHCI_METHOD;
if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_1))
- printk(MY_INFO "Unable to activate hotkeys\n");
+ pr_info("Unable to activate hotkeys\n");
} else if (is_valid_acpi_path(TOSH_INTERFACE_2 GHCI_METHOD)) {
method_hci = TOSH_INTERFACE_2 GHCI_METHOD;
if (toshiba_acpi_setup_keyboard(TOSH_INTERFACE_2))
- printk(MY_INFO "Unable to activate hotkeys\n");
+ pr_info("Unable to activate hotkeys\n");
} else
return -ENODEV;
- printk(MY_INFO "Toshiba Laptop ACPI Extras version %s\n",
+ pr_info("Toshiba Laptop ACPI Extras version %s\n",
TOSHIBA_ACPI_VERSION);
- printk(MY_INFO " HCI method: %s\n", method_hci);
+ pr_info(" HCI method: %s\n", method_hci);
mutex_init(&toshiba_acpi.mutex);
@@ -998,7 +995,7 @@ static int __init toshiba_acpi_init(void)
-1, NULL, 0);
if (IS_ERR(toshiba_acpi.p_dev)) {
ret = PTR_ERR(toshiba_acpi.p_dev);
- printk(MY_ERR "unable to register platform device\n");
+ pr_err("unable to register platform device\n");
toshiba_acpi.p_dev = NULL;
toshiba_acpi_exit();
return ret;
@@ -1028,7 +1025,7 @@ static int __init toshiba_acpi_init(void)
if (IS_ERR(toshiba_backlight_device)) {
ret = PTR_ERR(toshiba_backlight_device);
- printk(KERN_ERR "Could not register toshiba backlight device\n");
+ pr_err("Could not register toshiba backlight device\n");
toshiba_backlight_device = NULL;
toshiba_acpi_exit();
return ret;
@@ -1042,14 +1039,14 @@ static int __init toshiba_acpi_init(void)
&toshiba_rfk_ops,
&toshiba_acpi);
if (!toshiba_acpi.bt_rfk) {
- printk(MY_ERR "unable to allocate rfkill device\n");
+ pr_err("unable to allocate rfkill device\n");
toshiba_acpi_exit();
return -ENOMEM;
}
ret = rfkill_register(toshiba_acpi.bt_rfk);
if (ret) {
- printk(MY_ERR "unable to register rfkill device\n");
+ pr_err("unable to register rfkill device\n");
rfkill_destroy(toshiba_acpi.bt_rfk);
toshiba_acpi_exit();
return ret;
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 944068611919..5fb7186694df 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -17,6 +17,8 @@
* delivered.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -70,14 +72,13 @@ static int toshiba_bluetooth_enable(acpi_handle handle)
if (!(result & 0x01))
return 0;
- printk(KERN_INFO "toshiba_bluetooth: Re-enabling Toshiba Bluetooth\n");
+ pr_info("Re-enabling Toshiba Bluetooth\n");
res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
return 0;
- printk(KERN_WARNING "toshiba_bluetooth: Failed to re-enable "
- "Toshiba Bluetooth\n");
+ pr_warn("Failed to re-enable Toshiba Bluetooth\n");
return -ENODEV;
}
@@ -107,8 +108,8 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
&bt_present);
if (!ACPI_FAILURE(status) && bt_present) {
- printk(KERN_INFO "Detected Toshiba ACPI Bluetooth device - "
- "installing RFKill handler\n");
+ pr_info("Detected Toshiba ACPI Bluetooth device - "
+ "installing RFKill handler\n");
result = toshiba_bluetooth_enable(device->handle);
}
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 05cc79672a8b..f23d5a84e7b1 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -486,16 +486,16 @@ static void wmi_dump_wdg(const struct guid_block *g)
pr_info("\tnotify_id: %02X\n", g->notify_id);
pr_info("\treserved: %02X\n", g->reserved);
pr_info("\tinstance_count: %d\n", g->instance_count);
- pr_info("\tflags: %#x ", g->flags);
+ pr_info("\tflags: %#x", g->flags);
if (g->flags) {
if (g->flags & ACPI_WMI_EXPENSIVE)
- pr_cont("ACPI_WMI_EXPENSIVE ");
+ pr_cont(" ACPI_WMI_EXPENSIVE");
if (g->flags & ACPI_WMI_METHOD)
- pr_cont("ACPI_WMI_METHOD ");
+ pr_cont(" ACPI_WMI_METHOD");
if (g->flags & ACPI_WMI_STRING)
- pr_cont("ACPI_WMI_STRING ");
+ pr_cont(" ACPI_WMI_STRING");
if (g->flags & ACPI_WMI_EVENT)
- pr_cont("ACPI_WMI_EVENT ");
+ pr_cont(" ACPI_WMI_EVENT");
}
pr_cont("\n");
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index c1372ed9d2e9..fad153dc0355 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -11,6 +11,8 @@
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -20,7 +22,6 @@
#include <acpi/acpi_drivers.h>
#define MODULE_NAME "xo15-ebook"
-#define PREFIX MODULE_NAME ": "
#define XO15_EBOOK_CLASS MODULE_NAME
#define XO15_EBOOK_TYPE_UNKNOWN 0x00
@@ -105,7 +106,7 @@ static int ebook_switch_add(struct acpi_device *device)
class = acpi_device_class(device);
if (strcmp(hid, XO15_EBOOK_HID)) {
- printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
+ pr_err("Unsupported hid [%s]\n", hid);
error = -ENODEV;
goto err_free_input;
}
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index f46855cd853d..ad747dc337da 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -381,11 +381,6 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf,
return err;
}
-/**
- * proc_scsi_show - show contents of /proc/scsi/scsi (attached devices)
- * @s: output goes here
- * @p: not used
- */
static int always_match(struct device *dev, void *data)
{
return 1;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index fbd96b29530d..de35c3ad8a69 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -80,6 +80,15 @@ config SPI_BFIN
help
This is the SPI controller master driver for Blackfin 5xx processor.
+config SPI_BFIN_SPORT
+ tristate "SPI bus via Blackfin SPORT"
+ depends on BLACKFIN
+ help
+ Enable support for a SPI bus via the Blackfin SPORT peripheral.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_bfin_sport.
+
config SPI_AU1550
tristate "Au1550/Au12x0 SPI Controller"
depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index fd2fc5f6505f..0f8c69b6b19e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
+obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi_bfin_sport.c
new file mode 100644
index 000000000000..e557ff617b11
--- /dev/null
+++ b/drivers/spi/spi_bfin_sport.c
@@ -0,0 +1,952 @@
+/*
+ * SPI bus via the Blackfin SPORT peripheral
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+
+#include <asm/portmux.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/blackfin.h>
+#include <asm/bfin_sport.h>
+#include <asm/cacheflush.h>
+
+#define DRV_NAME "bfin-sport-spi"
+#define DRV_DESC "SPI bus via the Blackfin SPORT"
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bfin-sport-spi");
+
+enum bfin_sport_spi_state {
+ START_STATE,
+ RUNNING_STATE,
+ DONE_STATE,
+ ERROR_STATE,
+};
+
+struct bfin_sport_spi_master_data;
+
+struct bfin_sport_transfer_ops {
+ void (*write) (struct bfin_sport_spi_master_data *);
+ void (*read) (struct bfin_sport_spi_master_data *);
+ void (*duplex) (struct bfin_sport_spi_master_data *);
+};
+
+struct bfin_sport_spi_master_data {
+ /* Driver model hookup */
+ struct device *dev;
+
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct sport_register __iomem *regs;
+ int err_irq;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ bool run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ enum bfin_sport_spi_state state;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct bfin_sport_spi_slave_data *cur_chip;
+ union {
+ void *tx;
+ u8 *tx8;
+ u16 *tx16;
+ };
+ void *tx_end;
+ union {
+ void *rx;
+ u8 *rx8;
+ u16 *rx16;
+ };
+ void *rx_end;
+
+ int cs_change;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+struct bfin_sport_spi_slave_data {
+ u16 ctl_reg;
+ u16 baud;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u16 idle_tx_val;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+static void
+bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_or(&drv_data->regs->tcr1, TSPEN);
+ bfin_write_or(&drv_data->regs->rcr1, TSPEN);
+ SSYNC();
+}
+
+static void
+bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
+ bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
+ SSYNC();
+}
+
+/* Caculate the SPI_BAUD register value based on input HZ */
+static u16
+bfin_sport_hz_to_spi_baud(u32 speed_hz)
+{
+ u_long clk, sclk = get_sclk();
+ int div = (sclk / (2 * speed_hz)) - 1;
+
+ if (div < 0)
+ div = 0;
+
+ clk = sclk / (2 * (div + 1));
+
+ if (clk > speed_hz)
+ div++;
+
+ return div;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void
+bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 0);
+}
+
+static void
+bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 1);
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+static void
+bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long timeout = jiffies + HZ;
+ while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
+ if (!time_before(jiffies, timeout))
+ break;
+ }
+}
+
+static void
+bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
+ .write = bfin_sport_spi_u8_writer,
+ .read = bfin_sport_spi_u8_reader,
+ .duplex = bfin_sport_spi_u8_duplex,
+};
+
+static void
+bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
+ .write = bfin_sport_spi_u16_writer,
+ .read = bfin_sport_spi_u16_reader,
+ .duplex = bfin_sport_spi_u16_duplex,
+};
+
+/* stop controller and re-config current chip */
+static void
+bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
+
+ bfin_sport_spi_disable(drv_data);
+ dev_dbg(drv_data->dev, "restoring spi ctl state\n");
+
+ bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
+ bfin_write(&drv_data->regs->tcr2, bits);
+ bfin_write(&drv_data->regs->tclkdiv, chip->baud);
+ bfin_write(&drv_data->regs->tfsdiv, bits);
+ SSYNC();
+
+ bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
+ bfin_write(&drv_data->regs->rcr2, bits);
+ SSYNC();
+
+ bfin_sport_spi_cs_active(chip);
+}
+
+/* test if there is more transfer to be done */
+static enum bfin_sport_spi_state
+bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct spi_transfer *trans = drv_data->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ drv_data->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return RUNNING_STATE;
+ }
+
+ return DONE_STATE;
+}
+
+/*
+ * caller already set message->status;
+ * dma and pio irqs are blocked give finished message back
+ */
+static void
+bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+ msg = drv_data->cur_msg;
+ drv_data->state = START_STATE;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ if (!drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static irqreturn_t
+sport_err_handler(int irq, void *dev_id)
+{
+ struct bfin_sport_spi_master_data *drv_data = dev_id;
+ u16 status;
+
+ dev_dbg(drv_data->dev, "%s enter\n", __func__);
+ status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
+
+ if (status) {
+ bfin_write(&drv_data->regs->stat, status);
+ SSYNC();
+
+ bfin_sport_spi_disable(drv_data);
+ dev_err(drv_data->dev, "status error:%s%s%s%s\n",
+ status & TOVF ? " TOVF" : "",
+ status & TUVF ? " TUVF" : "",
+ status & ROVF ? " ROVF" : "",
+ status & RUVF ? " RUVF" : "");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+bfin_sport_spi_pump_transfers(unsigned long data)
+{
+ struct bfin_sport_spi_master_data *drv_data = (void *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct bfin_sport_spi_slave_data *chip = NULL;
+ unsigned int bits_per_word;
+ u32 tranf_success = 1;
+ u32 transfer_speed;
+ u8 full_duplex = 0;
+
+ /* Get current state information */
+ message = drv_data->cur_msg;
+ transfer = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ if (transfer->speed_hz)
+ transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
+ else
+ transfer_speed = chip->baud;
+ bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
+ SSYNC();
+
+ /*
+ * if msg is error or done, report it back using complete() callback
+ */
+
+ /* Handle for abort */
+ if (drv_data->state == ERROR_STATE) {
+ dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
+ message->status = -EIO;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Handle end of message */
+ if (drv_data->state == DONE_STATE) {
+ dev_dbg(drv_data->dev, "transfer: all done!\n");
+ message->status = 0;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Delay if requested at end of transfer */
+ if (drv_data->state == RUNNING_STATE) {
+ dev_dbg(drv_data->dev, "transfer: still running ...\n");
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ if (transfer->len == 0) {
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+
+ if (transfer->tx_buf != NULL) {
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
+ transfer->tx_buf, drv_data->tx_end);
+ } else
+ drv_data->tx = NULL;
+
+ if (transfer->rx_buf != NULL) {
+ full_duplex = transfer->tx_buf != NULL;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+ dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
+ transfer->rx_buf, drv_data->rx_end);
+ } else
+ drv_data->rx = NULL;
+
+ drv_data->cs_change = transfer->cs_change;
+
+ /* Bits per word setup */
+ bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
+ if (bits_per_word == 8)
+ drv_data->ops = &bfin_sport_transfer_ops_u8;
+ else
+ drv_data->ops = &bfin_sport_transfer_ops_u16;
+
+ drv_data->state = RUNNING_STATE;
+
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_active(chip);
+
+ dev_dbg(drv_data->dev,
+ "now pumping a transfer: width is %d, len is %d\n",
+ bits_per_word, transfer->len);
+
+ /* PIO mode write then read */
+ dev_dbg(drv_data->dev, "doing IO transfer\n");
+
+ bfin_sport_spi_enable(drv_data);
+ if (full_duplex) {
+ /* full duplex mode */
+ BUG_ON((drv_data->tx_end - drv_data->tx) !=
+ (drv_data->rx_end - drv_data->rx));
+ drv_data->ops->duplex(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->tx != NULL) {
+ /* write only half duplex */
+
+ drv_data->ops->write(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->rx != NULL) {
+ /* read only half duplex */
+
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ tranf_success = 0;
+ }
+ bfin_sport_spi_disable(drv_data);
+
+ if (!tranf_success) {
+ dev_dbg(drv_data->dev, "IO write error!\n");
+ drv_data->state = ERROR_STATE;
+ } else {
+ /* Update total byte transfered */
+ message->actual_length += transfer->len;
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+ }
+
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+/* pop a msg from queue and kick off real transfer */
+static void
+bfin_sport_spi_pump_messages(struct work_struct *work)
+{
+ struct bfin_sport_spi_master_data *drv_data;
+ unsigned long flags;
+ struct spi_message *next_msg;
+
+ drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&drv_data->lock, flags);
+ if (list_empty(&drv_data->queue) || !drv_data->run) {
+ /* pumper kicked off but no work to do */
+ drv_data->busy = 0;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (drv_data->cur_msg) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ next_msg = list_entry(drv_data->queue.next,
+ struct spi_message, queue);
+
+ drv_data->cur_msg = next_msg;
+
+ /* Setup the SSP using the per chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+
+ list_del_init(&drv_data->cur_msg->queue);
+
+ /* Initialize message state */
+ drv_data->cur_msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ bfin_sport_spi_restore_state(drv_data);
+ dev_dbg(drv_data->dev, "got a message to pump, "
+ "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
+ drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
+ drv_data->cur_chip->ctl_reg);
+
+ dev_dbg(drv_data->dev,
+ "the first transfer len is %d\n",
+ drv_data->cur_transfer->len);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ drv_data->busy = 1;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+}
+
+/*
+ * got a msg to transfer, queue it in drv_data->queue.
+ * And kick off message pumper
+ */
+static int
+bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (!drv_data->run) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ dev_dbg(&spi->dev, "adding an msg in transfer()\n");
+ list_add_tail(&msg->queue, &drv_data->queue);
+
+ if (drv_data->run && !drv_data->busy)
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return 0;
+}
+
+/* Called every time common spi devices change state */
+static int
+bfin_sport_spi_setup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip, *first = NULL;
+ int ret;
+
+ /* Only alloc (or use chip_info) on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ struct bfin5xx_spi_chip *chip_info;
+
+ chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ /* platform chip_info isn't required */
+ chip_info = spi->controller_data;
+ if (chip_info) {
+ /*
+ * DITFS and TDTYPE are only thing we don't set, but
+ * they probably shouldn't be changed by people.
+ */
+ if (chip_info->ctl_reg || chip_info->enable_dma) {
+ ret = -EINVAL;
+ dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields");
+ goto error;
+ }
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->idle_tx_val = chip_info->idle_tx_val;
+ spi->bits_per_word = chip_info->bits_per_word;
+ }
+ }
+
+ if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* translate common spi framework into our register
+ * following configure contents are same for tx and rx.
+ */
+
+ if (spi->mode & SPI_CPHA)
+ chip->ctl_reg &= ~TCKFE;
+ else
+ chip->ctl_reg |= TCKFE;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->ctl_reg |= TLSBIT;
+ else
+ chip->ctl_reg &= ~TLSBIT;
+
+ /* Sport in master mode */
+ chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
+
+ chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
+
+ chip->cs_gpio = spi->chip_select;
+ ret = gpio_request(chip->cs_gpio, spi->modalias);
+ if (ret)
+ goto error;
+
+ dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
+ spi->modalias, spi->bits_per_word);
+ dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
+ chip->ctl_reg, spi->chip_select);
+
+ spi_set_ctldata(spi, chip);
+
+ bfin_sport_spi_cs_deactive(chip);
+
+ return ret;
+
+ error:
+ kfree(first);
+ return ret;
+}
+
+/*
+ * callback for spi framework.
+ * clean driver specific data
+ */
+static void
+bfin_sport_spi_cleanup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
+
+ if (!chip)
+ return;
+
+ gpio_free(chip->cs_gpio);
+
+ kfree(chip);
+}
+
+static int
+bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ INIT_LIST_HEAD(&drv_data->queue);
+ spin_lock_init(&drv_data->lock);
+
+ drv_data->run = false;
+ drv_data->busy = 0;
+
+ /* init transfer tasklet */
+ tasklet_init(&drv_data->pump_transfers,
+ bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
+
+ /* init messages workqueue */
+ INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
+ drv_data->workqueue =
+ create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
+ if (drv_data->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int
+bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->run || drv_data->busy) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -EBUSY;
+ }
+
+ drv_data->run = true;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ return 0;
+}
+
+static inline int
+bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the drv_data->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead
+ */
+ drv_data->run = false;
+ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&drv_data->lock, flags);
+ }
+
+ if (!list_empty(&drv_data->queue) || drv_data->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return status;
+}
+
+static inline int
+bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ destroy_workqueue(drv_data->workqueue);
+
+ return 0;
+}
+
+static int __devinit
+bfin_sport_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bfin5xx_spi_master *platform_info;
+ struct spi_master *master;
+ struct resource *res, *ires;
+ struct bfin_sport_spi_master_data *drv_data;
+ int status;
+
+ platform_info = dev->platform_data;
+
+ /* Allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*master) + 16);
+ if (!master) {
+ dev_err(dev, "cannot alloc spi_master\n");
+ return -ENOMEM;
+ }
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->dev = dev;
+ drv_data->pin_req = platform_info->pin_req;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bus_num = pdev->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = bfin_sport_spi_cleanup;
+ master->setup = bfin_sport_spi_setup;
+ master->transfer = bfin_sport_spi_transfer;
+
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto out_error_get_res;
+ }
+
+ drv_data->regs = ioremap(res->start, resource_size(res));
+ if (drv_data->regs == NULL) {
+ dev_err(dev, "cannot map registers\n");
+ status = -ENXIO;
+ goto out_error_ioremap;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ dev_err(dev, "cannot get IORESOURCE_IRQ\n");
+ status = -ENODEV;
+ goto out_error_get_ires;
+ }
+ drv_data->err_irq = ires->start;
+
+ /* Initial and start queue */
+ status = bfin_sport_spi_init_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem initializing queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem starting queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = request_irq(drv_data->err_irq, sport_err_handler,
+ 0, "sport_spi_err", drv_data);
+ if (status) {
+ dev_err(dev, "unable to request sport err irq\n");
+ goto out_error_irq;
+ }
+
+ status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
+ if (status) {
+ dev_err(dev, "requesting peripherals failed\n");
+ goto out_error_peripheral;
+ }
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = spi_register_master(master);
+ if (status) {
+ dev_err(dev, "problem registering spi master\n");
+ goto out_error_master;
+ }
+
+ dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
+ return 0;
+
+ out_error_master:
+ peripheral_free_list(drv_data->pin_req);
+ out_error_peripheral:
+ free_irq(drv_data->err_irq, drv_data);
+ out_error_irq:
+ out_error_queue_alloc:
+ bfin_sport_spi_destroy_queue(drv_data);
+ out_error_get_ires:
+ iounmap(drv_data->regs);
+ out_error_ioremap:
+ out_error_get_res:
+ spi_master_put(master);
+
+ return status;
+}
+
+/* stop hardware and remove the driver */
+static int __devexit
+bfin_sport_spi_remove(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status = 0;
+
+ if (!drv_data)
+ return 0;
+
+ /* Remove the queue */
+ status = bfin_sport_spi_destroy_queue(drv_data);
+ if (status)
+ return status;
+
+ /* Disable the SSP at the peripheral and SOC level */
+ bfin_sport_spi_disable(drv_data);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(drv_data->master);
+
+ peripheral_free_list(drv_data->pin_req);
+
+ /* Prevent double remove */
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ /* stop hardware */
+ bfin_sport_spi_disable(drv_data);
+
+ return status;
+}
+
+static int
+bfin_sport_spi_resume(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status;
+
+ /* Enable the SPI interface */
+ bfin_sport_spi_enable(drv_data);
+
+ /* Start the queue running */
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status)
+ dev_err(drv_data->dev, "problem resuming queue\n");
+
+ return status;
+}
+#else
+# define bfin_sport_spi_suspend NULL
+# define bfin_sport_spi_resume NULL
+#endif
+
+static struct platform_driver bfin_sport_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = bfin_sport_spi_probe,
+ .remove = __devexit_p(bfin_sport_spi_remove),
+ .suspend = bfin_sport_spi_suspend,
+ .resume = bfin_sport_spi_resume,
+};
+
+static int __init bfin_sport_spi_init(void)
+{
+ return platform_driver_register(&bfin_sport_spi_driver);
+}
+module_init(bfin_sport_spi_init);
+
+static void __exit bfin_sport_spi_exit(void)
+{
+ platform_driver_unregister(&bfin_sport_spi_driver);
+}
+module_exit(bfin_sport_spi_exit);
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index a3938958147c..32a40876532f 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -283,7 +283,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi)
return 0;
err_gpios:
- for (; ptr > 0; ptr--)
+ while (--ptr >= 0)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
device_remove_file(&spi->dev, &dev_attr_status_show);
@@ -301,6 +301,7 @@ static int __devexit tle62x0_remove(struct spi_device *spi)
for (ptr = 0; ptr < st->nr_gpio; ptr++)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
+ device_remove_file(&spi->dev, &dev_attr_status_show);
kfree(st);
return 0;
}
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index fc6f2a5bde01..0b1c82ad6805 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -499,7 +499,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
dev_set_drvdata(hwmon->device, hwmon);
result = device_create_file(hwmon->device, &dev_attr_name);
if (result)
- goto unregister_hwmon_device;
+ goto free_mem;
register_sys_interface:
tz->hwmon = hwmon;
@@ -513,7 +513,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
sysfs_attr_init(&tz->temp_input.attr.attr);
result = device_create_file(hwmon->device, &tz->temp_input.attr);
if (result)
- goto unregister_hwmon_device;
+ goto unregister_name;
if (tz->ops->get_crit_temp) {
unsigned long temperature;
@@ -527,7 +527,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
result = device_create_file(hwmon->device,
&tz->temp_crit.attr);
if (result)
- goto unregister_hwmon_device;
+ goto unregister_input;
}
}
@@ -539,9 +539,9 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return 0;
- unregister_hwmon_device:
- device_remove_file(hwmon->device, &tz->temp_crit.attr);
+ unregister_input:
device_remove_file(hwmon->device, &tz->temp_input.attr);
+ unregister_name:
if (new_hwmon_device) {
device_remove_file(hwmon->device, &dev_attr_name);
hwmon_device_unregister(hwmon->device);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index a4c42a75a3bf..09e8c7d53af3 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
gsm->tty = NULL;
}
-static unsigned int gsmld_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct gsm_mux *gsm = tty->disc_data;
const unsigned char *dp;
@@ -2162,8 +2162,6 @@ static unsigned int gsmld_receive_buf(struct tty_struct *tty,
}
/* FASYNC if needed ? */
/* If clogged call tty_throttle(tty); */
-
- return count;
}
/**
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cac666314aef..cea56033b34c 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
- const __u8 *cp, char *fp, int count);
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
+ char *fp, int count);
static void n_hdlc_tty_wakeup(struct tty_struct *tty);
#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
@@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is available. Data is
* interpreted as one HDLC frame.
*/
-static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
- const __u8 *data, char *flags, int count)
+static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
+ char *flags, int count)
{
register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
register struct n_hdlc_buf *buf;
@@ -521,20 +521,20 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
/* This can happen if stuff comes in on the backup tty */
if (!n_hdlc || tty != n_hdlc->tty)
- return -ENODEV;
+ return;
/* verify line is using HDLC discipline */
if (n_hdlc->magic != HDLC_MAGIC) {
printk("%s(%d) line not using HDLC discipline\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
if ( count>maxframe ) {
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d) rx count>maxframesize, data discarded\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
/* get a free HDLC buffer */
@@ -550,7 +550,7 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d) no more rx buffers, data discarded\n",
__FILE__,__LINE__);
- return -EINVAL;
+ return;
}
/* copy received data to HDLC buffer */
@@ -565,8 +565,6 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
if (n_hdlc->tty->fasync != NULL)
kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
- return count;
-
} /* end of n_hdlc_tty_receive() */
/**
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index a4bc39c21a43..5c6c31459a2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count);
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count);
static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
.owner = THIS_MODULE,
@@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
return result;
}
-static unsigned int r3964_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct r3964_info *pInfo = tty->disc_data;
const unsigned char *p;
@@ -1257,8 +1257,6 @@ static unsigned int r3964_receive_buf(struct tty_struct *tty,
}
}
-
- return count;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 95d0a9c2dd13..0ad32888091c 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -81,6 +81,38 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
+/**
+ * n_tty_set__room - receive space
+ * @tty: terminal
+ *
+ * Called by the driver to find out how much data it is
+ * permitted to feed to the line discipline without any being lost
+ * and thus to manage flow control. Not serialized. Answers for the
+ * "instant".
+ */
+
+static void n_tty_set_room(struct tty_struct *tty)
+{
+ /* tty->read_cnt is not read locked ? */
+ int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
+ int old_left;
+
+ /*
+ * If we are doing input canonicalization, and there are no
+ * pending newlines, let characters through without limit, so
+ * that erase characters will be handled. Other excess
+ * characters will be beeped.
+ */
+ if (left <= 0)
+ left = tty->icanon && !tty->canon_data;
+ old_left = tty->receive_room;
+ tty->receive_room = left;
+
+ /* Did this open up the receive buffer? We may need to flip */
+ if (left && !old_left)
+ schedule_work(&tty->buf.work);
+}
+
static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
{
if (tty->read_cnt < N_TTY_BUF_SIZE) {
@@ -152,6 +184,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
tty->canon_head = tty->canon_data = tty->erasing = 0;
memset(&tty->read_flags, 0, sizeof tty->read_flags);
+ n_tty_set_room(tty);
check_unthrottle(tty);
}
@@ -1327,19 +1360,17 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
* calls one at a time and in order (or using flush_to_ldisc)
*/
-static unsigned int n_tty_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
const unsigned char *p;
char *f, flags = TTY_NORMAL;
int i;
char buf[64];
unsigned long cpuflags;
- int left;
- int ret = 0;
if (!tty->read_buf)
- return 0;
+ return;
if (tty->real_raw) {
spin_lock_irqsave(&tty->read_lock, cpuflags);
@@ -1349,7 +1380,6 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
- ret += i;
cp += i;
count -= i;
@@ -1359,10 +1389,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
- ret += i;
spin_unlock_irqrestore(&tty->read_lock, cpuflags);
} else {
- ret = count;
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
flags = *f++;
@@ -1390,6 +1418,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
tty->ops->flush_chars(tty);
}
+ n_tty_set_room(tty);
+
if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
@@ -1402,12 +1432,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
- left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
-
- if (left < TTY_THRESHOLD_THROTTLE)
+ if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
tty_throttle(tty);
-
- return ret;
}
int is_ignored(int sig)
@@ -1451,6 +1477,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
tty->raw = 1;
tty->real_raw = 1;
+ n_tty_set_room(tty);
return;
}
if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1503,6 +1530,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
else
tty->real_raw = 0;
}
+ n_tty_set_room(tty);
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
wake_up_interruptible(&tty->read_wait);
@@ -1784,6 +1812,8 @@ do_it_again:
retval = -ERESTARTSYS;
break;
}
+ /* FIXME: does n_tty_set_room need locking ? */
+ n_tty_set_room(tty);
timeout = schedule_timeout(timeout);
continue;
}
@@ -1855,8 +1885,10 @@ do_it_again:
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
- if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
+ if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+ n_tty_set_room(tty);
check_unthrottle(tty);
+ }
if (b - buf >= minimum)
break;
@@ -1878,6 +1910,7 @@ do_it_again:
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
goto do_it_again;
+ n_tty_set_room(tty);
return retval;
}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 46de2e075dac..f1a7918d71aa 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -416,7 +416,6 @@ static void flush_to_ldisc(struct work_struct *work)
struct tty_buffer *head, *tail = tty->buf.tail;
int seen_tail = 0;
while ((head = tty->buf.head) != NULL) {
- int copied;
int count;
char *char_buf;
unsigned char *flag_buf;
@@ -443,19 +442,17 @@ static void flush_to_ldisc(struct work_struct *work)
line discipline as we want to empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
+ if (!tty->receive_room || seen_tail)
+ break;
+ if (count > tty->receive_room)
+ count = tty->receive_room;
char_buf = head->char_buf_ptr + head->read;
flag_buf = head->flag_buf_ptr + head->read;
+ head->read += count;
spin_unlock_irqrestore(&tty->buf.lock, flags);
- copied = disc->ops->receive_buf(tty, char_buf,
+ disc->ops->receive_buf(tty, char_buf,
flag_buf, count);
spin_lock_irqsave(&tty->buf.lock, flags);
-
- head->read += copied;
-
- if (copied == 0 || seen_tail) {
- schedule_work(&tty->buf.work);
- break;
- }
}
clear_bit(TTY_FLUSHING, &tty->flags);
}
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 67b1d0d7c8ac..fb864e7fcd13 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -332,7 +332,8 @@ int paste_selection(struct tty_struct *tty)
continue;
}
count = sel_buffer_lth - pasted;
- count = tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
+ count = min(count, tty->receive_room);
+ tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
NULL, count);
pasted += count;
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 660b80a75cac..1102ce65a3a9 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -348,11 +348,50 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
return rc;
}
+static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == 0x1E26;
+}
+
+static void ehci_enable_xhci_companion(void)
+{
+ struct pci_dev *companion = NULL;
+
+ /* The xHCI and EHCI controllers are not on the same PCI slot */
+ for_each_pci_dev(companion) {
+ if (!usb_is_intel_switchable_xhci(companion))
+ continue;
+ usb_enable_xhci_ports(companion);
+ return;
+ }
+}
+
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * We can't tell whether the EHCI or xHCI controller will be resumed
+ * first, so we have to do the port switchover in both drivers. Writing
+ * a '1' to the port switchover registers should have no effect if the
+ * port was already switched over.
+ */
+ if (usb_is_intel_switchable_ehci(pdev))
+ ehci_enable_xhci_companion();
+
// maybe restore FLADJ
if (time_before(jiffies, ehci->next_statechange))
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f16c59d5f487..fd930618c28f 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -69,6 +69,9 @@
#define NB_PIF0_PWRDOWN_0 0x01100012
#define NB_PIF0_PWRDOWN_1 0x01100013
+#define USB_INTEL_XUSB2PR 0xD0
+#define USB_INTEL_USB3_PSSEN 0xD8
+
static struct amd_chipset_info {
struct pci_dev *nb_dev;
struct pci_dev *smbus_dev;
@@ -673,6 +676,64 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
return -ETIMEDOUT;
}
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+{
+ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
+}
+EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
+
+/*
+ * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
+ * share some number of ports. These ports can be switched between either
+ * controller. Not all of the ports under the EHCI host controller may be
+ * switchable.
+ *
+ * The ports should be switched over to xHCI before PCI probes for any device
+ * start. This avoids active devices under EHCI being disconnected during the
+ * port switchover, which could cause loss of data on USB storage devices, or
+ * failed boot when the root file system is on a USB mass storage device and is
+ * enumerated under EHCI first.
+ *
+ * We write into the xHC's PCI configuration space in some Intel-specific
+ * registers to switch the ports over. The USB 3.0 terminations and the USB
+ * 2.0 data wires are switched separately. We want to enable the SuperSpeed
+ * terminations before switching the USB 2.0 wires over, so that USB 3.0
+ * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
+ */
+void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+ u32 ports_available;
+
+ ports_available = 0xffffffff;
+ /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+ * Register, to turn on SuperSpeed terminations for all
+ * available ports.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ cpu_to_le32(ports_available));
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
+ "under xHCI: 0x%x\n", ports_available);
+
+ ports_available = 0xffffffff;
+ /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
+ * switch the USB 2.0 power and data lines over to the xHCI
+ * host.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ cpu_to_le32(ports_available));
+
+ pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+ &ports_available);
+ dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
+ "to xHCI: 0x%x\n", ports_available);
+}
+EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+
/**
* PCI Quirks for xHCI.
*
@@ -732,6 +793,8 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
writel(XHCI_LEGACY_DISABLE_SMI,
base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
hc_init:
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 6ae9f78e9938..b1002a8ef96f 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -8,6 +8,8 @@ int usb_amd_find_chipset_info(void);
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
+bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
+void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
#else
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cbc4d491e626..c408e9f6a707 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -118,6 +118,12 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ }
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
@@ -242,8 +248,28 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval = 0;
+ /* The BIOS on systems with the Intel Panther Point chipset may or may
+ * not support xHCI natively. That means that during system resume, it
+ * may switch the ports back to EHCI so that users can use their
+ * keyboard to select a kernel from GRUB after resume from hibernate.
+ *
+ * The BIOS is supposed to remember whether the OS had xHCI ports
+ * enabled before resume, and switch the ports back to xHCI when the
+ * BIOS/OS semaphore is written, but we all know we can't trust BIOS
+ * writers.
+ *
+ * Unconditionally switch the ports back to xHCI after a system resume.
+ * We can't tell whether the EHCI or xHCI controller will be resumed
+ * first, so we have to do the port switchover in both drivers. Writing
+ * a '1' to the port switchover registers should have no effect if the
+ * port was already switched over.
+ */
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
+
retval = xhci_resume(xhci, hibernated);
return retval;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 237a765f8d18..cc1485bfed38 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -167,12 +167,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
next = ring->dequeue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
}
/*
@@ -248,12 +242,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
next = ring->enqueue;
}
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
- if (ring == xhci->event_ring)
- xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
- else if (ring == xhci->cmd_ring)
- xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
- else
- xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
}
/*
@@ -636,13 +624,11 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
- xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(hcd, urb, status);
xhci_urb_free_priv(xhci, urb_priv);
spin_lock(&xhci->lock);
- xhci_dbg(xhci, "%s URB given back\n", adjective);
}
}
@@ -692,6 +678,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->stopped_td = NULL;
+ ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -1093,8 +1081,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
complete(&xhci->addr_dev);
break;
case TRB_TYPE(TRB_DISABLE_SLOT):
- if (xhci->devs[slot_id])
+ if (xhci->devs[slot_id]) {
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
+ /* Delete default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci,
+ xhci->devs[slot_id], true);
xhci_free_virt_device(xhci, slot_id);
+ }
break;
case TRB_TYPE(TRB_CONFIG_EP):
virt_dev = xhci->devs[slot_id];
@@ -1630,7 +1623,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
"without IOC set??\n");
*status = -ESHUTDOWN;
} else {
- xhci_dbg(xhci, "Successful control transfer!\n");
*status = 0;
}
break;
@@ -1727,7 +1719,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
switch (trb_comp_code) {
case COMP_SUCCESS:
frame->status = 0;
- xhci_dbg(xhci, "Successful isoc transfer!\n");
break;
case COMP_SHORT_TX:
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
@@ -1837,12 +1828,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
else
*status = 0;
} else {
- if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
- xhci_dbg(xhci, "Successful bulk "
- "transfer!\n");
- else
- xhci_dbg(xhci, "Successful interrupt "
- "transfer!\n");
*status = 0;
}
break;
@@ -1856,11 +1841,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Others already handled above */
break;
}
- xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
- "%d bytes untransferred\n",
- td->urb->ep->desc.bEndpointAddress,
- td->urb->transfer_buffer_length,
- TRB_LEN(le32_to_cpu(event->transfer_len)));
+ if (trb_comp_code == COMP_SHORT_TX)
+ xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+ TRB_LEN(le32_to_cpu(event->transfer_len)));
/* Fast path - was this the last TRB in the TD for this URB? */
if (event_trb == td->last_trb) {
if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
@@ -1954,7 +1940,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
- xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
@@ -2081,6 +2066,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* Some host controllers give a spurious
+ * successful event after a short transfer.
+ * Ignore it.
+ */
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ ep_ring->last_td_was_short) {
+ ep_ring->last_td_was_short = false;
+ ret = 0;
+ goto cleanup;
+ }
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
@@ -2091,6 +2086,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ret = skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
+ if (trb_comp_code == COMP_SHORT_TX)
+ ep_ring->last_td_was_short = true;
+ else
+ ep_ring->last_td_was_short = false;
if (ep->skip) {
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
@@ -2149,9 +2148,15 @@ cleanup:
xhci_urb_free_priv(xhci, urb_priv);
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
- xhci_dbg(xhci, "Giveback URB %p, len = %d, "
- "status = %d\n",
- urb, urb->actual_length, status);
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags &
+ URB_SHORT_NOT_OK)) ||
+ status != 0)
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, "
+ "expected = %x, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length,
+ status);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
@@ -2180,7 +2185,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int update_ptrs = 1;
int ret;
- xhci_dbg(xhci, "In %s\n", __func__);
if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1;
return 0;
@@ -2193,7 +2197,6 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
xhci->error_bitmask |= 1 << 2;
return 0;
}
- xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2203,20 +2206,14 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
/* FIXME: Handle more event types. */
switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION):
- xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
handle_cmd_completion(xhci, &event->event_cmd);
- xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
break;
case TRB_TYPE(TRB_PORT_STATUS):
- xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
handle_port_status(xhci, event);
- xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
update_ptrs = 0;
break;
case TRB_TYPE(TRB_TRANSFER):
- xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
ret = handle_tx_event(xhci, &event->trans_event);
- xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
if (ret < 0)
xhci->error_bitmask |= 1 << 9;
else
@@ -2273,16 +2270,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
spin_unlock(&xhci->lock);
return IRQ_NONE;
}
- xhci_dbg(xhci, "op reg status = %08x\n", status);
- xhci_dbg(xhci, "Event ring dequeue ptr:\n");
- xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
- (unsigned long long)
- xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
- lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- (unsigned int) le32_to_cpu(trb->link.intr_target),
- (unsigned int) le32_to_cpu(trb->link.control));
-
if (status & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
@@ -2397,7 +2384,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
/* Make sure the endpoint has been added to xHC schedule */
- xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
switch (ep_state) {
case EP_STATE_DISABLED:
/*
@@ -2434,7 +2420,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
struct xhci_ring *ring = ep_ring;
union xhci_trb *next;
- xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
next = ring->enqueue;
while (last_trb(xhci, ring, ring->enq_seg, next)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8f2a56ece44f..d9660eb97eb9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1314,8 +1314,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
+ xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
@@ -1401,6 +1403,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return ret;
}
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
added_ctxs = xhci_get_endpoint_flag(&ep->desc);
last_ctx = xhci_last_valid_endpoint(added_ctxs);
@@ -1578,6 +1582,113 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
return ret;
}
+static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ /* Ignore the slot flag (bit 0), and the default control endpoint flag
+ * (bit 1). The default control endpoint is added during the Address
+ * Device command and is never removed until the slot is disabled.
+ */
+ valid_add_flags = ctrl_ctx->add_flags >> 2;
+ valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+
+ /* Use hweight32 to count the number of ones in the add flags, or
+ * number of endpoints added. Don't count endpoints that are changed
+ * (both added and dropped).
+ */
+ return hweight32(valid_add_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u32 valid_add_flags;
+ u32 valid_drop_flags;
+
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ valid_add_flags = ctrl_ctx->add_flags >> 2;
+ valid_drop_flags = ctrl_ctx->drop_flags >> 2;
+
+ return hweight32(valid_drop_flags) -
+ hweight32(valid_add_flags & valid_drop_flags);
+}
+
+/*
+ * We need to reserve the new number of endpoints before the configure endpoint
+ * command completes. We can't subtract the dropped endpoints from the number
+ * of active endpoints until the command completes because we can oversubscribe
+ * the host in this case:
+ *
+ * - the first configure endpoint command drops more endpoints than it adds
+ * - a second configure endpoint command that adds more endpoints is queued
+ * - the first configure endpoint command fails, so the config is unchanged
+ * - the second command may succeed, even though there isn't enough resources
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 added_eps;
+
+ added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+ if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
+ xhci_dbg(xhci, "Not enough ep ctxs: "
+ "%u active, need to add %u, limit is %u.\n",
+ xhci->num_active_eps, added_eps,
+ xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += added_eps;
+ xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
+ xhci->num_active_eps);
+ return 0;
+}
+
+/*
+ * The configure endpoint was failed by the xHC for some other reason, so we
+ * need to revert the resources that failed configuration would have used.
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_free_host_resources(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 num_failed_eps;
+
+ num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
+ xhci->num_active_eps -= num_failed_eps;
+ xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
+ num_failed_eps,
+ xhci->num_active_eps);
+}
+
+/*
+ * Now that the command has completed, clean up the active endpoint count by
+ * subtracting out the endpoints that were dropped (but not changed).
+ *
+ * Must be called with xhci->lock held.
+ */
+static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *in_ctx)
+{
+ u32 num_dropped_eps;
+
+ num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
+ num_dropped_eps,
+ xhci->num_active_eps);
+}
+
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
@@ -1598,6 +1709,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
virt_dev = xhci->devs[udev->slot_id];
if (command) {
in_ctx = command->in_ctx;
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
+
cmd_completion = command->completion;
cmd_status = &command->status;
command->command_trb = xhci->cmd_ring->enqueue;
@@ -1613,6 +1733,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
in_ctx = virt_dev->in_ctx;
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
+ xhci_reserve_host_resources(xhci, in_ctx)) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ return -ENOMEM;
+ }
cmd_completion = &virt_dev->cmd_completion;
cmd_status = &virt_dev->cmd_status;
}
@@ -1627,6 +1755,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
if (ret < 0) {
if (command)
list_del(&command->cmd_list);
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ xhci_free_host_resources(xhci, in_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
@@ -1649,8 +1779,22 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
}
if (!ctx_change)
- return xhci_configure_endpoint_result(xhci, udev, cmd_status);
- return xhci_evaluate_context_result(xhci, udev, cmd_status);
+ ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
+ else
+ ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* If the command failed, remove the reserved resources.
+ * Otherwise, clean up the estimate to include dropped eps.
+ */
+ if (ret)
+ xhci_free_host_resources(xhci, in_ctx);
+ else
+ xhci_finish_resource_reservation(xhci, in_ctx);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ return ret;
}
/* Called after one or more calls to xhci_add_endpoint() or
@@ -1676,6 +1820,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id];
@@ -2266,6 +2412,34 @@ int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
}
/*
+ * Deletes endpoint resources for endpoints that were active before a Reset
+ * Device command, or a Disable Slot command. The Reset Device command leaves
+ * the control endpoint intact, whereas the Disable Slot command deletes it.
+ *
+ * Must be called with xhci->lock held.
+ */
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep)
+{
+ int i;
+ unsigned int num_dropped_eps = 0;
+ unsigned int drop_flags = 0;
+
+ for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
+ if (virt_dev->eps[i].ring) {
+ drop_flags |= 1 << i;
+ num_dropped_eps++;
+ }
+ }
+ xhci->num_active_eps -= num_dropped_eps;
+ if (num_dropped_eps)
+ xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
+ "%u now active.\n",
+ num_dropped_eps, drop_flags,
+ xhci->num_active_eps);
+}
+
+/*
* This submits a Reset Device Command, which will set the device state to 0,
* set the device address to 0, and disable all the endpoints except the default
* control endpoint. The USB core should come back and call
@@ -2406,6 +2580,14 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
goto command_cleanup;
}
+ /* Free up host controller endpoint resources */
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* Don't delete the default control endpoint resources */
+ xhci_free_device_endpoint_resources(xhci, virt_dev, false);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
@@ -2479,6 +2661,27 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
/*
+ * Checks if we have enough host controller resources for the default control
+ * endpoint.
+ *
+ * Must be called with xhci->lock held.
+ */
+static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
+{
+ if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
+ xhci_dbg(xhci, "Not enough ep ctxs: "
+ "%u active, need to add 1, limit is %u.\n",
+ xhci->num_active_eps, xhci->limit_active_eps);
+ return -ENOMEM;
+ }
+ xhci->num_active_eps += 1;
+ xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
+ xhci->num_active_eps);
+ return 0;
+}
+
+
+/*
* Returns 0 if the xHC ran out of device slots, the Enable Slot command
* timed out, or allocating memory failed. Returns 1 on success.
*/
@@ -2513,24 +2716,39 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Error while assigning device slot ID\n");
return 0;
}
- /* xhci_alloc_virt_device() does not touch rings; no need to lock.
- * Use GFP_NOIO, since this function can be called from
+
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
+ spin_lock_irqsave(&xhci->lock, flags);
+ ret = xhci_reserve_host_control_ep_resources(xhci);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_warn(xhci, "Not enough host resources, "
+ "active endpoint contexts = %u\n",
+ xhci->num_active_eps);
+ goto disable_slot;
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+ /* Use GFP_NOIO, since this function can be called from
* xhci_discover_or_reset_device(), which may be called as part of
* mass storage driver error handling.
*/
if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
- /* Disable slot, if we can do it without mem alloc */
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
- spin_lock_irqsave(&xhci->lock, flags);
- if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
- xhci_ring_cmd_db(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
- return 0;
+ goto disable_slot;
}
udev->slot_id = xhci->slot_id;
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
+
+disable_slot:
+ /* Disable slot, if we can do it without mem alloc */
+ spin_lock_irqsave(&xhci->lock, flags);
+ if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return 0;
}
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e12db7cfb9bb..ac0196e7fcf1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1123,6 +1123,7 @@ struct xhci_ring {
*/
u32 cycle_state;
unsigned int stream_id;
+ bool last_td_was_short;
};
struct xhci_erst_entry {
@@ -1290,6 +1291,19 @@ struct xhci_hcd {
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
#define XHCI_AMD_PLL_FIX (1 << 3)
+#define XHCI_SPURIOUS_SUCCESS (1 << 4)
+/*
+ * Certain Intel host controllers have a limit to the number of endpoint
+ * contexts they can handle. Ideally, they would signal that they can't handle
+ * anymore endpoint contexts by returning a Resource Error for the Configure
+ * Endpoint command, but they don't. Instead they expect software to keep track
+ * of the number of active endpoints for them, across configure endpoint
+ * commands, reset device commands, disable slot commands, and address device
+ * commands.
+ */
+#define XHCI_EP_LIMIT_QUIRK (1 << 5)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
@@ -1338,9 +1352,6 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
static inline void xhci_writel(struct xhci_hcd *xhci,
const unsigned int val, __le32 __iomem *regs)
{
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
- regs, val);
writel(val, regs);
}
@@ -1368,9 +1379,6 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
u32 val_lo = lower_32_bits(val);
u32 val_hi = upper_32_bits(val);
- xhci_dbg(xhci,
- "`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
- regs, (long unsigned int) val);
writel(val_lo, ptr);
writel(val_hi, ptr + 1);
}
@@ -1439,6 +1447,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep);
+void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, bool drop_control_ep);
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2f7c76a85e53..e224a92baa16 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -144,7 +144,7 @@ static void handle_tx(struct vhost_net *net)
}
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
if (wmem < sock->sk->sk_sndbuf / 2)
tx_poll_stop(net);
@@ -166,8 +166,8 @@ static void handle_tx(struct vhost_net *net)
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
- if (unlikely(vhost_enable_notify(vq))) {
- vhost_disable_notify(vq);
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+ vhost_disable_notify(&net->dev, vq);
continue;
}
break;
@@ -315,7 +315,7 @@ static void handle_rx(struct vhost_net *net)
return;
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
vhost_hlen = vq->vhost_hlen;
sock_hlen = vq->sock_hlen;
@@ -334,10 +334,10 @@ static void handle_rx(struct vhost_net *net)
break;
/* OK, now we need to know about added descriptors. */
if (!headcount) {
- if (unlikely(vhost_enable_notify(vq))) {
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
/* They have slipped one in as we were
* doing that: check again. */
- vhost_disable_notify(vq);
+ vhost_disable_notify(&net->dev, vq);
continue;
}
/* Nothing new? Wait for eventfd to tell us
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 099f30230d06..734e1d74ad80 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n)
return;
mutex_lock(&vq->mutex);
- vhost_disable_notify(vq);
+ vhost_disable_notify(&n->dev, vq);
for (;;) {
head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
@@ -61,8 +61,8 @@ static void handle_vq(struct vhost_test *n)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- if (unlikely(vhost_enable_notify(vq))) {
- vhost_disable_notify(vq);
+ if (unlikely(vhost_enable_notify(&n->dev, vq))) {
+ vhost_disable_notify(&n->dev, vq);
continue;
}
break;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 7aa4eea930f1..ea966b356352 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -37,6 +37,9 @@ enum {
VHOST_MEMORY_F_LOG = 0x1,
};
+#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
+#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -161,6 +164,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->last_avail_idx = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
+ vq->signalled_used = 0;
+ vq->signalled_used_valid = false;
vq->used_flags = 0;
vq->log_used = false;
vq->log_addr = -1ull;
@@ -489,16 +494,17 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
return 1;
}
-static int vq_access_ok(unsigned int num,
+static int vq_access_ok(struct vhost_dev *d, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
{
+ size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
- sizeof *avail + num * sizeof *avail->ring) &&
+ sizeof *avail + num * sizeof *avail->ring + s) &&
access_ok(VERIFY_WRITE, used,
- sizeof *used + num * sizeof *used->ring);
+ sizeof *used + num * sizeof *used->ring + s);
}
/* Can we log writes? */
@@ -514,9 +520,11 @@ int vhost_log_access_ok(struct vhost_dev *dev)
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
-static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
+static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
+ void __user *log_base)
{
struct vhost_memory *mp;
+ size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
mp = rcu_dereference_protected(vq->dev->memory,
lockdep_is_held(&vq->mutex));
@@ -524,15 +532,15 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
- vq->num * sizeof *vq->used->ring));
+ vq->num * sizeof *vq->used->ring + s));
}
/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
- return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
- vq_log_access_ok(vq, vq->log_base);
+ return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
+ vq_log_access_ok(vq->dev, vq, vq->log_base);
}
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
@@ -577,6 +585,7 @@ static int init_used(struct vhost_virtqueue *vq,
if (r)
return r;
+ vq->signalled_used_valid = false;
return get_user(vq->last_used_idx, &used->idx);
}
@@ -674,7 +683,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
* If it is not, we don't as size might not have been setup.
* We will verify when backend is configured. */
if (vq->private_data) {
- if (!vq_access_ok(vq->num,
+ if (!vq_access_ok(d, vq->num,
(void __user *)(unsigned long)a.desc_user_addr,
(void __user *)(unsigned long)a.avail_user_addr,
(void __user *)(unsigned long)a.used_user_addr)) {
@@ -818,7 +827,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
vq = d->vqs + i;
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
- if (vq->private_data && !vq_log_access_ok(vq, base))
+ if (vq->private_data && !vq_log_access_ok(d, vq, base))
r = -EFAULT;
else
vq->log_base = base;
@@ -1219,6 +1228,10 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
/* On success, increment avail index. */
vq->last_avail_idx++;
+
+ /* Assume notifications from guest are disabled at this point,
+ * if they aren't we would need to update avail_event index. */
+ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
@@ -1267,6 +1280,12 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
eventfd_signal(vq->log_ctx, 1);
}
vq->last_used_idx++;
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely(vq->last_used_idx == vq->signalled_used))
+ vq->signalled_used_valid = false;
return 0;
}
@@ -1275,6 +1294,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
unsigned count)
{
struct vring_used_elem __user *used;
+ u16 old, new;
int start;
start = vq->last_used_idx % vq->num;
@@ -1292,7 +1312,14 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
((void __user *)used - (void __user *)vq->used),
count * sizeof *used);
}
- vq->last_used_idx += count;
+ old = vq->last_used_idx;
+ new = (vq->last_used_idx += count);
+ /* If the driver never bothers to signal in a very long while,
+ * used index might wrap around. If that happens, invalidate
+ * signalled_used index we stored. TODO: make sure driver
+ * signals at least once in 2^16 and remove this. */
+ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
+ vq->signalled_used_valid = false;
return 0;
}
@@ -1331,29 +1358,47 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
return r;
}
-/* This actually signals the guest, using eventfd. */
-void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __u16 flags;
-
+ __u16 old, new, event;
+ bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
- if (__get_user(flags, &vq->avail->flags)) {
- vq_err(vq, "Failed to get flags");
- return;
+ if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ unlikely(vq->avail_idx == vq->last_avail_idx))
+ return true;
+
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ __u16 flags;
+ if (__get_user(flags, &vq->avail->flags)) {
+ vq_err(vq, "Failed to get flags");
+ return true;
+ }
+ return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
}
+ old = vq->signalled_used;
+ v = vq->signalled_used_valid;
+ new = vq->signalled_used = vq->last_used_idx;
+ vq->signalled_used_valid = true;
- /* If they don't want an interrupt, don't signal, unless empty. */
- if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
- (vq->avail_idx != vq->last_avail_idx ||
- !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
- return;
+ if (unlikely(!v))
+ return true;
+ if (get_user(event, vhost_used_event(vq))) {
+ vq_err(vq, "Failed to get used event idx");
+ return true;
+ }
+ return vring_need_event(event, new, old);
+}
+
+/* This actually signals the guest, using eventfd. */
+void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
/* Signal the Guest tell them we used something up. */
- if (vq->call_ctx)
+ if (vq->call_ctx && vhost_notify(dev, vq))
eventfd_signal(vq->call_ctx, 1);
}
@@ -1376,7 +1421,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
}
/* OK, now we need to know about added descriptors. */
-bool vhost_enable_notify(struct vhost_virtqueue *vq)
+bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
u16 avail_idx;
int r;
@@ -1384,11 +1429,34 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
- r = put_user(vq->used_flags, &vq->used->flags);
- if (r) {
- vq_err(vq, "Failed to enable notification at %p: %d\n",
- &vq->used->flags, r);
- return false;
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r) {
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ return false;
+ }
+ } else {
+ r = put_user(vq->avail_idx, vhost_avail_event(vq));
+ if (r) {
+ vq_err(vq, "Failed to update avail event index at %p: %d\n",
+ vhost_avail_event(vq), r);
+ return false;
+ }
+ }
+ if (unlikely(vq->log_used)) {
+ void __user *used;
+ /* Make sure data is seen before log. */
+ smp_wmb();
+ used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ?
+ &vq->used->flags : vhost_avail_event(vq);
+ /* Log used flags or event index entry write. Both are 16 bit
+ * fields. */
+ log_write(vq->log_base, vq->log_addr +
+ (used - (void __user *)vq->used),
+ sizeof(u16));
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
}
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
@@ -1404,15 +1472,17 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
}
/* We don't need to be notified again. */
-void vhost_disable_notify(struct vhost_virtqueue *vq)
+void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
int r;
if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
- r = put_user(vq->used_flags, &vq->used->flags);
- if (r)
- vq_err(vq, "Failed to enable notification at %p: %d\n",
- &vq->used->flags, r);
+ if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r)
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ }
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b3363ae38518..8e03379dd30f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -84,6 +84,12 @@ struct vhost_virtqueue {
/* Used flags */
u16 used_flags;
+ /* Last used index value we have signalled on */
+ u16 signalled_used;
+
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+
/* Log writes to used structure. */
bool log_used;
u64 log_addr;
@@ -149,8 +155,8 @@ void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
struct vring_used_elem *heads, unsigned count);
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
-void vhost_disable_notify(struct vhost_virtqueue *);
-bool vhost_enable_notify(struct vhost_virtqueue *);
+void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
@@ -162,11 +168,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
} while (0)
enum {
- VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
- (1 << VIRTIO_RING_F_INDIRECT_DESC) |
- (1 << VHOST_F_LOG_ALL) |
- (1 << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1 << VIRTIO_NET_F_MRG_RXBUF),
+ VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
+ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX) |
+ (1ULL << VHOST_F_LOG_ALL) |
+ (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF),
};
static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0f1da45ba47d..e058ace2a4ad 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -40,9 +40,6 @@ struct virtio_balloon
/* Waiting for host to ack the pages we released. */
struct completion acked;
- /* Do we have to tell Host *before* we reuse pages? */
- bool tell_host_first;
-
/* The pages we've told the Host we're not using. */
unsigned int num_pages;
struct list_head pages;
@@ -151,13 +148,14 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
vb->num_pages--;
}
- if (vb->tell_host_first) {
- tell_host(vb, vb->deflate_vq);
- release_pages_by_pfn(vb->pfns, vb->num_pfns);
- } else {
- release_pages_by_pfn(vb->pfns, vb->num_pfns);
- tell_host(vb, vb->deflate_vq);
- }
+
+ /*
+ * Note that if
+ * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
+ * is true, we *have* to do it in this order
+ */
+ tell_host(vb, vb->deflate_vq);
+ release_pages_by_pfn(vb->pfns, vb->num_pfns);
}
static inline void update_stat(struct virtio_balloon *vb, int idx,
@@ -325,9 +323,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out_del_vqs;
}
- vb->tell_host_first
- = virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
-
return 0;
out_del_vqs:
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index b0043fb26a4d..68b9136847af 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -82,6 +82,9 @@ struct vring_virtqueue
/* Host supports indirect buffers */
bool indirect;
+ /* Host publishes avail event idx */
+ bool event;
+
/* Number of free buffers */
unsigned int num_free;
/* Head of free buffer list. */
@@ -237,18 +240,22 @@ EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
void virtqueue_kick(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 new, old;
START_USE(vq);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb();
- vq->vring.avail->idx += vq->num_added;
+ old = vq->vring.avail->idx;
+ new = vq->vring.avail->idx = old + vq->num_added;
vq->num_added = 0;
/* Need to update avail index before checking if we should notify */
virtio_mb();
- if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
+ if (vq->event ?
+ vring_need_event(vring_avail_event(&vq->vring), new, old) :
+ !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
/* Prod other side to tell it about changes. */
vq->notify(&vq->vq);
@@ -324,6 +331,14 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
ret = vq->data[i];
detach_buf(vq, i);
vq->last_used_idx++;
+ /* If we expect an interrupt for the next entry, tell host
+ * by writing event index and flush out the write before
+ * the read in the next get_buf call. */
+ if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
+ vring_used_event(&vq->vring) = vq->last_used_idx;
+ virtio_mb();
+ }
+
END_USE(vq);
return ret;
}
@@ -345,7 +360,11 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
+ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ vring_used_event(&vq->vring) = vq->last_used_idx;
virtio_mb();
if (unlikely(more_used(vq))) {
END_USE(vq);
@@ -357,6 +376,33 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 bufs;
+
+ START_USE(vq);
+
+ /* We optimistically turn back on interrupts, then check if there was
+ * more to do. */
+ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
+ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ /* TODO: tune this threshold */
+ bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
+ vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
+ virtio_mb();
+ if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
+ END_USE(vq);
+ return false;
+ }
+
+ END_USE(vq);
+ return true;
+}
+EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
+
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -438,6 +484,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
/* No callback? Tell other side not to bother us. */
if (!callback)
@@ -472,6 +519,8 @@ void vring_transport_features(struct virtio_device *vdev)
switch (i) {
case VIRTIO_RING_F_INDIRECT_DESC:
break;
+ case VIRTIO_RING_F_EVENT_IDX:
+ break;
default:
/* We don't understand this bit. */
clear_bit(i, vdev->features);