summaryrefslogtreecommitdiff
path: root/drivers/staging
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/android/Kconfig30
-rw-r--r--drivers/staging/android/Makefile3
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/alarm-dev.c17
-rw-r--r--drivers/staging/android/android_alarm.h44
-rw-r--r--drivers/staging/android/ashmem.c65
-rw-r--r--drivers/staging/android/ashmem.h30
-rw-r--r--drivers/staging/android/binder.c310
-rw-r--r--drivers/staging/android/binder.h308
-rw-r--r--drivers/staging/android/binder_trace.h14
-rw-r--r--drivers/staging/android/fiq_debugger/Kconfig49
-rw-r--r--drivers/staging/android/fiq_debugger/Makefile4
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger.c1212
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger.h64
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger_arm.c240
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c202
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger_priv.h37
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h94
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_watchdog.c56
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_watchdog.h20
-rw-r--r--drivers/staging/android/ion/Kconfig35
-rw-r--r--drivers/staging/android/ion/Makefile10
-rw-r--r--drivers/staging/android/ion/compat_ion.c195
-rw-r--r--drivers/staging/android/ion/compat_ion.h30
-rw-r--r--drivers/staging/android/ion/ion.c1823
-rw-r--r--drivers/staging/android/ion/ion.h204
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c194
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c195
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c218
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c163
-rw-r--r--drivers/staging/android/ion/ion_heap.c369
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c190
-rw-r--r--drivers/staging/android/ion/ion_priv.h423
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c446
-rw-r--r--drivers/staging/android/ion/ion_test.c282
-rw-r--r--drivers/staging/android/ion/tegra/Makefile1
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c84
-rw-r--r--drivers/staging/android/lowmemorykiller.c123
-rw-r--r--drivers/staging/android/sw_sync.h37
-rw-r--r--drivers/staging/android/sync.c36
-rw-r--r--drivers/staging/android/sync.h86
-rw-r--r--drivers/staging/android/timed_output.c2
-rw-r--r--drivers/staging/android/uapi/android_alarm.h65
-rw-r--r--drivers/staging/android/uapi/ashmem.h47
-rw-r--r--drivers/staging/android/uapi/binder.h347
-rw-r--r--drivers/staging/android/uapi/ion.h196
-rw-r--r--drivers/staging/android/uapi/ion_test.h70
-rw-r--r--drivers/staging/android/uapi/sw_sync.h32
-rw-r--r--drivers/staging/android/uapi/sync.h97
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/adc/Kconfig26
-rw-r--r--drivers/staging/iio/adc/Makefile3
-rw-r--r--drivers/staging/iio/adc/as3722-adc-extcon.c317
-rw-r--r--drivers/staging/iio/adc/max77660-adc.c629
-rw-r--r--drivers/staging/iio/adc/palmas_gpadc.c1306
-rw-r--r--drivers/staging/iio/light/Kconfig115
-rw-r--r--drivers/staging/iio/light/Makefile15
-rw-r--r--drivers/staging/iio/light/cm3217.c542
-rw-r--r--drivers/staging/iio/light/cm3218.c732
-rw-r--r--drivers/staging/iio/light/iqs253.c856
-rw-r--r--drivers/staging/iio/light/isl29018.c15
-rw-r--r--drivers/staging/iio/light/isl29028.c8
-rw-r--r--drivers/staging/iio/light/jsa1127.c617
-rw-r--r--drivers/staging/iio/light/ls_dt.c87
-rw-r--r--drivers/staging/iio/light/ls_sysfs.c79
-rw-r--r--drivers/staging/iio/light/ltr558als.c934
-rw-r--r--drivers/staging/iio/light/ltr558als.h126
-rw-r--r--drivers/staging/iio/light/max44005.c717
-rw-r--r--drivers/staging/iio/light/stm8t143.c392
-rw-r--r--drivers/staging/iio/light/tcs3772.c695
-rw-r--r--drivers/staging/iio/magnetometer/Makefile2
-rw-r--r--drivers/staging/iio/meter/Kconfig24
-rw-r--r--drivers/staging/iio/meter/Makefile3
-rw-r--r--drivers/staging/iio/meter/ina219.c754
-rw-r--r--drivers/staging/iio/meter/ina230.c973
-rw-r--r--drivers/staging/iio/meter/ina3221.c1067
-rw-r--r--drivers/staging/iio/pressure/Kconfig13
-rw-r--r--drivers/staging/iio/pressure/Makefile6
-rw-r--r--drivers/staging/iio/pressure/bmp180.c421
-rw-r--r--drivers/staging/nvshm/Kconfig17
-rw-r--r--drivers/staging/nvshm/Makefile16
-rw-r--r--drivers/staging/nvshm/nvshm_if.c110
-rw-r--r--drivers/staging/nvshm/nvshm_if.h125
-rw-r--r--drivers/staging/nvshm/nvshm_init.c137
-rw-r--r--drivers/staging/nvshm/nvshm_iobuf.c540
-rw-r--r--drivers/staging/nvshm/nvshm_iobuf.h172
-rw-r--r--drivers/staging/nvshm/nvshm_ipc.c421
-rw-r--r--drivers/staging/nvshm/nvshm_ipc.h41
-rw-r--r--drivers/staging/nvshm/nvshm_net.c461
-rw-r--r--drivers/staging/nvshm/nvshm_priv.h116
-rw-r--r--drivers/staging/nvshm/nvshm_queue.c259
-rw-r--r--drivers/staging/nvshm/nvshm_queue.h25
-rw-r--r--drivers/staging/nvshm/nvshm_rpc.c393
-rw-r--r--drivers/staging/nvshm/nvshm_rpc.h107
-rw-r--r--drivers/staging/nvshm/nvshm_rpc_dispatcher.c179
-rw-r--r--drivers/staging/nvshm/nvshm_rpc_dispatcher.h56
-rw-r--r--drivers/staging/nvshm/nvshm_rpc_prog_rsm.c222
-rw-r--r--drivers/staging/nvshm/nvshm_rpc_shared.h29
-rw-r--r--drivers/staging/nvshm/nvshm_rpc_utils.c472
-rw-r--r--drivers/staging/nvshm/nvshm_rpc_utils.h266
-rw-r--r--drivers/staging/nvshm/nvshm_stats.c237
-rw-r--r--drivers/staging/nvshm/nvshm_tty.c522
-rw-r--r--drivers/staging/nvshm/nvshm_types.h166
-rw-r--r--drivers/staging/ozwpan/Kbuild2
-rw-r--r--drivers/staging/ozwpan/Makefile25
-rw-r--r--drivers/staging/ozwpan/README2
-rw-r--r--drivers/staging/ozwpan/TODO14
-rw-r--r--drivers/staging/ozwpan/ozappif.h2
-rw-r--r--drivers/staging/ozwpan/ozcdev.c361
-rw-r--r--drivers/staging/ozwpan/ozcdev.h4
-rw-r--r--drivers/staging/ozwpan/ozconfig.h27
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c21
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.h5
-rw-r--r--drivers/staging/ozwpan/ozevent.c195
-rw-r--r--drivers/staging/ozwpan/ozevent.h32
-rw-r--r--drivers/staging/ozwpan/ozeventdef.h40
-rw-r--r--drivers/staging/ozwpan/ozeventtrace.h219
-rw-r--r--drivers/staging/ozwpan/ozhcd.c646
-rw-r--r--drivers/staging/ozwpan/ozkobject.c304
-rw-r--r--drivers/staging/ozwpan/ozkobject.h17
-rw-r--r--drivers/staging/ozwpan/ozmain.c18
-rw-r--r--drivers/staging/ozwpan/ozpd.c227
-rw-r--r--drivers/staging/ozwpan/ozpd.h31
-rw-r--r--drivers/staging/ozwpan/ozproto.c547
-rw-r--r--drivers/staging/ozwpan/ozproto.h31
-rw-r--r--drivers/staging/ozwpan/ozprotocol.h48
-rw-r--r--drivers/staging/ozwpan/oztrace.c179
-rw-r--r--drivers/staging/ozwpan/oztrace.h114
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.c7
-rw-r--r--drivers/staging/ozwpan/ozusbif.h10
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c41
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c77
-rw-r--r--drivers/staging/pasr/Kconfig14
-rw-r--r--drivers/staging/pasr/Makefile5
-rw-r--r--drivers/staging/pasr/core.c180
-rw-r--r--drivers/staging/pasr/helper.c91
-rw-r--r--drivers/staging/pasr/helper.h16
-rw-r--r--drivers/staging/pasr/init.c478
-rw-r--r--drivers/staging/zram/Kconfig2
-rw-r--r--drivers/staging/zram/zram_drv.c58
-rw-r--r--drivers/staging/zram/zram_drv.h10
-rw-r--r--drivers/staging/zsmalloc/Kconfig2
145 files changed, 27536 insertions, 1946 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index aefe820a8005..cfb11200ef90 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -140,4 +140,8 @@ source "drivers/staging/netlogic/Kconfig"
source "drivers/staging/dwc2/Kconfig"
+source "drivers/staging/nvshm/Kconfig"
+
+source "drivers/staging/pasr/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 415772ea306d..e213eec10d27 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_STAGING) += staging.o
obj-y += media/
+obj-y += pasr/
obj-$(CONFIG_ET131X) += et131x/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_USBIP_CORE) += usbip/
@@ -62,3 +63,4 @@ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_USB_DWC2) += dwc2/
+obj-$(CONFIG_NVSHM) += nvshm/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index c0c95be0f969..c303acbe05cc 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -19,6 +19,14 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_32BIT
+ bool "Use old 32-bit binder api"
+ default y
+ depends on !64BIT
+ ---help---
+ Enable to support an old 32-bit Android user-space. Breaks the new
+ Android user-space.
+
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
@@ -63,6 +71,24 @@ config ANDROID_LOW_MEMORY_KILLER
---help---
Registers processes to be killed when memory is low
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+ bool "Android Low Memory Killer: detect oom_adj values"
+ depends on ANDROID_LOW_MEMORY_KILLER
+ default y
+ ---help---
+ Detect oom_adj values written to
+ /sys/module/lowmemorykiller/parameters/adj and convert them
+ to oom_score_adj values.
+
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+ bool "Android Low Memory Killer: detect oom_adj values"
+ depends on ANDROID_LOW_MEMORY_KILLER
+ default y
+ ---help---
+ Detect oom_adj values written to
+ /sys/module/lowmemorykiller/parameters/adj and convert them
+ to oom_score_adj values.
+
config ANDROID_INTF_ALARM_DEV
bool "Android alarm driver"
depends on RTC_CLASS
@@ -99,6 +125,10 @@ config SW_SYNC_USER
*WARNING* improper use of this can result in deadlocking kernel
drivers from userspace.
+source "drivers/staging/android/ion/Kconfig"
+
+source "drivers/staging/android/fiq_debugger/Kconfig"
+
endif # if ANDROID
endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c136299e05af..907b62f56203 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,5 +1,8 @@
ccflags-y += -I$(src) # needed for trace events
+obj-y += ion/
+obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger/
+
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
deleted file mode 100644
index b15fb0d6b152..000000000000
--- a/drivers/staging/android/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - sparse fixes
- - rename files to be not so "generic"
- - make sure things build as modules properly
- - add proper arch dependencies as needed
- - audit userspace interfaces to make sure they are sane
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
-Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c
index 6dc27dac679d..2fbaa4aabd44 100644
--- a/drivers/staging/android/alarm-dev.c
+++ b/drivers/staging/android/alarm-dev.c
@@ -49,6 +49,7 @@ static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
static uint32_t alarm_pending;
static uint32_t alarm_enabled;
static uint32_t wait_pending;
+static uint32_t rtc_wakeup_src;
struct devalarm {
union {
@@ -92,6 +93,16 @@ static void devalarm_cancel(struct devalarm *alrm)
hrtimer_cancel(&alrm->u.hrt);
}
+uint32_t get_rtc_wakeup_src(void)
+{
+ return rtc_wakeup_src;
+}
+
+void set_rtc_wakeup_src(uint32_t value)
+{
+ rtc_wakeup_src = value;
+}
+
static void alarm_clear(enum android_alarm_type alarm_type)
{
uint32_t alarm_type_mask = 1U << alarm_type;
@@ -147,6 +158,12 @@ static int alarm_wait(void)
alarm_pending = 0;
spin_unlock_irqrestore(&alarm_slock, flags);
+ if (rtc_wakeup_src) {
+ rv |= ANDROID_ALARM_WAKENUP_BY_RTCCHIP_MASK;
+ rtc_wakeup_src = 0;
+ alarm_dbg(INFO, "it is a rtc wakeup\n");
+ }
+
return rv;
}
diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h
index 4fd32f337f9c..495b20cf3bf6 100644
--- a/drivers/staging/android/android_alarm.h
+++ b/drivers/staging/android/android_alarm.h
@@ -16,50 +16,10 @@
#ifndef _LINUX_ANDROID_ALARM_H
#define _LINUX_ANDROID_ALARM_H
-#include <linux/ioctl.h>
-#include <linux/time.h>
#include <linux/compat.h>
+#include <linux/ioctl.h>
-enum android_alarm_type {
- /* return code bit numbers or set alarm arg */
- ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME,
-
- ANDROID_ALARM_TYPE_COUNT,
-
- /* return code bit numbers */
- /* ANDROID_ALARM_TIME_CHANGE = 16 */
-};
-
-enum android_alarm_return_flags {
- ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
- ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
- ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
- ANDROID_ALARM_ELAPSED_REALTIME_MASK =
- 1U << ANDROID_ALARM_ELAPSED_REALTIME,
- ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
- ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
-};
-
-/* Disable alarm */
-#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
-
-/* Ack last alarm and wait for next */
-#define ANDROID_ALARM_WAIT _IO('a', 1)
-
-#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
-/* Set alarm */
-#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
-#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
-#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
-#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
-#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
-#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
-
+#include "uapi/android_alarm.h"
#ifdef CONFIG_COMPAT
#define ANDROID_ALARM_SET_COMPAT(type) ALARM_IOW(2, type, \
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index e681bdd9aa5f..3511b0840362 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -224,21 +224,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
- goto out;
+ goto out_unlock;
if (!asma->file) {
ret = -EBADF;
- goto out;
+ goto out_unlock;
}
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
- if (ret < 0)
- goto out;
+ mutex_unlock(&ashmem_mutex);
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret >= 0) {
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+ }
+ return ret;
-out:
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -317,22 +325,14 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
}
get_file(asma->file);
- /*
- * XXX - Reworked to use shmem_zero_setup() instead of
- * shmem_set_file while we're in staging. -jstultz
- */
- if (vma->vm_flags & VM_SHARED) {
- ret = shmem_zero_setup(vma);
- if (ret) {
- fput(asma->file);
- goto out;
- }
+ if (vma->vm_flags & VM_SHARED)
+ shmem_set_file(vma, asma->file);
+ else {
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = asma->file;
}
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = asma->file;
-
out:
mutex_unlock(&ashmem_mutex);
return ret;
@@ -413,6 +413,7 @@ out:
static int set_name(struct ashmem_area *asma, void __user *name)
{
+ int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
@@ -425,21 +426,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
- if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
- return -EFAULT;
-
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+ if (len == ASHMEM_NAME_LEN)
+ local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file)) {
+ if (unlikely(asma->file))
ret = -EINVAL;
- goto out;
- }
- memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
- local_name, ASHMEM_NAME_LEN);
- asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
- mutex_unlock(&ashmem_mutex);
+ else
+ strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ mutex_unlock(&ashmem_mutex);
return ret;
}
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
index 8dc0f0d3adf3..5abcfd7aa706 100644
--- a/drivers/staging/android/ashmem.h
+++ b/drivers/staging/android/ashmem.h
@@ -16,35 +16,7 @@
#include <linux/ioctl.h>
#include <linux/compat.h>
-#define ASHMEM_NAME_LEN 256
-
-#define ASHMEM_NAME_DEF "dev/ashmem"
-
-/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
-#define ASHMEM_NOT_PURGED 0
-#define ASHMEM_WAS_PURGED 1
-
-/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
-#define ASHMEM_IS_UNPINNED 0
-#define ASHMEM_IS_PINNED 1
-
-struct ashmem_pin {
- __u32 offset; /* offset into region, in bytes, page-aligned */
- __u32 len; /* length forward from offset, in bytes, page-aligned */
-};
-
-#define __ASHMEMIOC 0x77
-
-#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
-#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
-#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
-#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
-#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
-#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
-#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
-#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
-#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
-#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+#include "uapi/ashmem.h"
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 0fce5fc9923b..d4e529001934 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <linux/fdtable.h>
#include <linux/file.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
@@ -36,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/pid_namespace.h>
+#include <linux/security.h>
#include "binder.h"
#include "binder_trace.h"
@@ -227,8 +229,8 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
- void __user *ptr;
- void __user *cookie;
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
unsigned has_strong_ref:1;
unsigned pending_strong_ref:1;
unsigned has_weak_ref:1;
@@ -241,7 +243,7 @@ struct binder_node {
struct binder_ref_death {
struct binder_work work;
- void __user *cookie;
+ binder_uintptr_t cookie;
};
struct binder_ref {
@@ -514,14 +516,14 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,
}
static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- void __user *user_ptr)
+ uintptr_t user_ptr)
{
struct rb_node *n = proc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;
- kern_ptr = user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data);
+ kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data));
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -790,7 +792,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
list_del(&buffer->entry);
if (free_page_start || free_page_end) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with with %p or %p\n",
+ "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
proc->pid, buffer, free_page_start ? "" : " end",
free_page_end ? "" : " start", prev, next);
binder_update_page_range(proc, 0, free_page_start ?
@@ -855,7 +857,7 @@ static void binder_free_buf(struct binder_proc *proc,
}
static struct binder_node *binder_get_node(struct binder_proc *proc,
- void __user *ptr)
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
@@ -874,8 +876,8 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
- void __user *ptr,
- void __user *cookie)
+ binder_uintptr_t ptr,
+ binder_uintptr_t cookie)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
@@ -907,9 +909,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p created\n",
+ "%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
- node->ptr, node->cookie);
+ (u64)node->ptr, (u64)node->cookie);
return node;
}
@@ -1225,9 +1227,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
- size_t *failed_at)
+ binder_size_t *failed_at)
{
- size_t *offp, *off_end;
+ binder_size_t *offp, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1238,7 +1240,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+ offp = (binder_size_t *)(buffer->data +
+ ALIGN(buffer->data_size, sizeof(void *)));
if (failed_at)
off_end = failed_at;
else
@@ -1247,9 +1250,9 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
struct flat_binder_object *fp;
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(void *))) {
- pr_err("transaction release %d bad offset %zd, size %zd\n",
- debug_id, *offp, buffer->data_size);
+ !IS_ALIGNED(*offp, sizeof(u32))) {
+ pr_err("transaction release %d bad offset %lld, size %zd\n",
+ debug_id, (u64)*offp, buffer->data_size);
continue;
}
fp = (struct flat_binder_object *)(buffer->data + *offp);
@@ -1258,20 +1261,20 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_WEAK_BINDER: {
struct binder_node *node = binder_get_node(proc, fp->binder);
if (node == NULL) {
- pr_err("transaction release %d bad node %p\n",
- debug_id, fp->binder);
+ pr_err("transaction release %d bad node %016llx\n",
+ debug_id, (u64)fp->binder);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%p\n",
- node->debug_id, node->ptr);
+ " node %d u%016llx\n",
+ node->debug_id, (u64)node->ptr);
binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
- pr_err("transaction release %d bad handle %ld\n",
+ pr_err("transaction release %d bad handle %d\n",
debug_id, fp->handle);
break;
}
@@ -1283,13 +1286,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_FD:
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %ld\n", fp->handle);
+ " fd %d\n", fp->handle);
if (failed_at)
task_close_fd(proc, fp->handle);
break;
default:
- pr_err("transaction release %d bad object type %lx\n",
+ pr_err("transaction release %d bad object type %x\n",
debug_id, fp->type);
break;
}
@@ -1302,7 +1305,8 @@ static void binder_transaction(struct binder_proc *proc,
{
struct binder_transaction *t;
struct binder_work *tcomplete;
- size_t *offp, *off_end;
+ binder_size_t *offp, *off_end;
+ binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1382,6 +1386,10 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
+ if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
+ return_error = BR_FAILED_REPLY;
+ goto err_invalid_target_handle;
+ }
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
@@ -1431,18 +1439,20 @@ static void binder_transaction(struct binder_proc *proc,
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
- tr->data.ptr.buffer, tr->data.ptr.offsets,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data.ptr.buffer,
+ (u64)tr->data.ptr.offsets,
+ (u64)tr->data_size, (u64)tr->offsets_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
- tr->data.ptr.buffer, tr->data.ptr.offsets,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data.ptr.buffer,
+ (u64)tr->data.ptr.offsets,
+ (u64)tr->data_size, (u64)tr->offsets_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
@@ -1471,38 +1481,47 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
- offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+ offp = (binder_size_t *)(t->buffer->data +
+ ALIGN(tr->data_size, sizeof(void *)));
- if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+ tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ if (copy_from_user(offp, (const void __user *)(uintptr_t)
+ tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
- binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n",
- proc->pid, thread->pid, tr->offsets_size);
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
+ binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
+ proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
off_end = (void *)offp + tr->offsets_size;
+ off_min = 0;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
+ *offp < off_min ||
t->buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(void *))) {
- binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
- proc->pid, thread->pid, *offp);
+ !IS_ALIGNED(*offp, sizeof(u32))) {
+ binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+ proc->pid, thread->pid, (u64)*offp,
+ (u64)off_min,
+ (u64)(t->buffer->data_size -
+ sizeof(*fp)));
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
@@ -1518,10 +1537,14 @@ static void binder_transaction(struct binder_proc *proc,
node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
- binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
- fp->binder, node->debug_id,
- fp->cookie, node->cookie);
+ (u64)fp->binder, node->debug_id,
+ (u64)fp->cookie, (u64)node->cookie);
+ goto err_binder_get_ref_for_node_failed;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
ref = binder_get_ref_for_node(target_proc, node);
@@ -1539,20 +1562,24 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction_node_to_ref(t, node, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%p -> ref %d desc %d\n",
- node->debug_id, node->ptr, ref->debug_id,
- ref->desc);
+ " node %d u%016llx -> ref %d desc %d\n",
+ node->debug_id, (u64)node->ptr,
+ ref->debug_id, ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
- binder_user_error("%d:%d got transaction with invalid handle, %ld\n",
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_failed;
+ }
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
@@ -1563,9 +1590,9 @@ static void binder_transaction(struct binder_proc *proc,
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> node %d u%p\n",
+ " ref %d desc %d -> node %d u%016llx\n",
ref->debug_id, ref->desc, ref->node->debug_id,
- ref->node->ptr);
+ (u64)ref->node->ptr);
} else {
struct binder_ref *new_ref;
new_ref = binder_get_ref_for_node(target_proc, ref->node);
@@ -1590,13 +1617,13 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
- binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n",
+ binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
- binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
@@ -1604,11 +1631,16 @@ static void binder_transaction(struct binder_proc *proc,
file = fget(fp->handle);
if (file == NULL) {
- binder_user_error("%d:%d got transaction with invalid fd, %ld\n",
+ binder_user_error("%d:%d got transaction with invalid fd, %d\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
}
+ if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ goto err_get_unused_fd_failed;
+ }
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
if (target_fd < 0) {
fput(file);
@@ -1618,13 +1650,13 @@ static void binder_transaction(struct binder_proc *proc,
task_fd_install(target_proc, target_fd, file);
trace_binder_transaction_fd(t, fp->handle, target_fd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %ld -> %d\n", fp->handle, target_fd);
+ " fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
- binder_user_error("%d:%d got transaction with invalid object type, %lx\n",
+ binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
@@ -1681,9 +1713,9 @@ err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %zd-%zd\n",
+ "%d:%d transaction failed %d, size %lld-%lld\n",
proc->pid, thread->pid, return_error,
- tr->data_size, tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size);
{
struct binder_transaction_log_entry *fe;
@@ -1700,9 +1732,11 @@ err_no_context_mgr_node:
}
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
- void __user *buffer, int size, signed long *consumed)
+ binder_uintptr_t binder_buffer, size_t size,
+ binder_size_t *consumed)
{
uint32_t cmd;
+ void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -1771,33 +1805,33 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {
- void __user *node_ptr;
- void *cookie;
+ binder_uintptr_t node_ptr;
+ binder_uintptr_t cookie;
struct binder_node *node;
- if (get_user(node_ptr, (void * __user *)ptr))
+ if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
- if (get_user(cookie, (void * __user *)ptr))
+ ptr += sizeof(binder_uintptr_t);
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
if (node == NULL) {
- binder_user_error("%d:%d %s u%p no match\n",
+ binder_user_error("%d:%d %s u%016llx no match\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" :
"BC_ACQUIRE_DONE",
- node_ptr);
+ (u64)node_ptr);
break;
}
if (cookie != node->cookie) {
- binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node_ptr, node->debug_id,
- cookie, node->cookie);
+ (u64)node_ptr, node->debug_id,
+ (u64)cookie, (u64)node->cookie);
break;
}
if (cmd == BC_ACQUIRE_DONE) {
@@ -1833,27 +1867,27 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
return -EINVAL;
case BC_FREE_BUFFER: {
- void __user *data_ptr;
+ binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
- if (get_user(data_ptr, (void * __user *)ptr))
+ if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
buffer = binder_buffer_lookup(proc, data_ptr);
if (buffer == NULL) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n",
- proc->pid, thread->pid, data_ptr);
+ binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ proc->pid, thread->pid, (u64)data_ptr);
break;
}
if (!buffer->allow_user_free) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n",
- proc->pid, thread->pid, data_ptr);
+ binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
+ proc->pid, thread->pid, (u64)data_ptr);
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
- "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
- proc->pid, thread->pid, data_ptr, buffer->debug_id,
+ "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
+ proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id,
buffer->transaction ? "active" : "finished");
if (buffer->transaction) {
@@ -1923,16 +1957,16 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
case BC_REQUEST_DEATH_NOTIFICATION:
case BC_CLEAR_DEATH_NOTIFICATION: {
uint32_t target;
- void __user *cookie;
+ binder_uintptr_t cookie;
struct binder_ref *ref;
struct binder_ref_death *death;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user(cookie, (void __user * __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
ref = binder_get_ref(proc, target);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
@@ -1945,12 +1979,12 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
- "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+ "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- cookie, ref->debug_id, ref->desc,
+ (u64)cookie, ref->debug_id, ref->desc,
ref->strong, ref->weak, ref->node->debug_id);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
@@ -1988,9 +2022,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
death = ref->death;
if (death->cookie != cookie) {
- binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n",
+ binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
- death->cookie, cookie);
+ (u64)death->cookie, (u64)cookie);
break;
}
ref->death = NULL;
@@ -2010,9 +2044,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
- void __user *cookie;
+ binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
- if (get_user(cookie, (void __user * __user *)ptr))
+ if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(void *);
@@ -2024,11 +2058,11 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %p found %p\n",
- proc->pid, thread->pid, cookie, death);
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+ proc->pid, thread->pid, (u64)cookie, death);
if (death == NULL) {
- binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n",
- proc->pid, thread->pid, cookie);
+ binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
+ proc->pid, thread->pid, (u64)cookie);
break;
}
@@ -2080,9 +2114,10 @@ static int binder_has_thread_work(struct binder_thread *thread)
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
- void __user *buffer, int size,
- signed long *consumed, int non_block)
+ binder_uintptr_t binder_buffer, size_t size,
+ binder_size_t *consumed, int non_block)
{
+ void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -2140,13 +2175,13 @@ retry:
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
- ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+ ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
} else
- ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+ ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
binder_lock(__func__);
@@ -2227,32 +2262,36 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(node->ptr, (void * __user *)ptr))
+ if (put_user(node->ptr,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
- if (put_user(node->cookie, (void * __user *)ptr))
+ ptr += sizeof(binder_uintptr_t);
+ if (put_user(node->cookie,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%p c%p\n",
- proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+ "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name,
+ node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
} else {
list_del_init(&w->entry);
if (!weak && !strong) {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p deleted\n",
+ "%d:%d node %d u%016llx c%016llx deleted\n",
proc->pid, thread->pid, node->debug_id,
- node->ptr, node->cookie);
+ (u64)node->ptr, (u64)node->cookie);
rb_erase(&node->rb_node, &proc->nodes);
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
} else {
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%p c%p state unchanged\n",
- proc->pid, thread->pid, node->debug_id, node->ptr,
- node->cookie);
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
}
}
} break;
@@ -2270,17 +2309,18 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(death->cookie, (void * __user *)ptr))
+ if (put_user(death->cookie,
+ (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
- "%d:%d %s %p\n",
+ "%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- death->cookie);
+ (u64)death->cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
list_del(&w->entry);
@@ -2310,8 +2350,8 @@ retry:
binder_set_nice(target_node->min_priority);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = NULL;
- tr.cookie = NULL;
+ tr.target.ptr = 0;
+ tr.cookie = 0;
cmd = BR_REPLY;
}
tr.code = t->code;
@@ -2328,8 +2368,9 @@ retry:
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (void *)t->buffer->data +
- proc->user_buffer_offset;
+ tr.data.ptr.buffer = (binder_uintptr_t)(
+ (uintptr_t)t->buffer->data +
+ proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
@@ -2344,14 +2385,14 @@ retry:
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n",
+ "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
t->debug_id, t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
- tr.data.ptr.buffer, tr.data.ptr.offsets);
+ (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
@@ -2421,8 +2462,8 @@ static void binder_release_work(struct list_head *list)
death = container_of(w, struct binder_ref_death, work);
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered death notification, %p\n",
- death->cookie);
+ "undelivered death notification, %016llx\n",
+ (u64)death->cookie);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
@@ -2578,12 +2619,13 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d write %ld at %08lx, read %ld at %08lx\n",
- proc->pid, thread->pid, bwr.write_size,
- bwr.write_buffer, bwr.read_size, bwr.read_buffer);
+ "%d:%d write %lld at %016llx, read %lld at %016llx\n",
+ proc->pid, thread->pid,
+ (u64)bwr.write_size, (u64)bwr.write_buffer,
+ (u64)bwr.read_size, (u64)bwr.read_buffer);
if (bwr.write_size > 0) {
- ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
@@ -2593,7 +2635,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
if (bwr.read_size > 0) {
- ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+ ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
@@ -2604,9 +2646,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d wrote %ld of %ld, read return %ld of %ld\n",
- proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
- bwr.read_consumed, bwr.read_size);
+ "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
+ proc->pid, thread->pid,
+ (u64)bwr.write_consumed, (u64)bwr.write_size,
+ (u64)bwr.read_consumed, (u64)bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto err;
@@ -2625,6 +2668,9 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = -EBUSY;
goto err;
}
+ ret = security_binder_set_context_mgr(proc->tsk);
+ if (ret < 0)
+ goto err;
if (uid_valid(binder_context_mgr_uid)) {
if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
@@ -2635,7 +2681,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
} else
binder_context_mgr_uid = current->cred->euid;
- binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+ binder_context_mgr_node = binder_new_node(proc, 0, 0);
if (binder_context_mgr_node == NULL) {
ret = -ENOMEM;
goto err;
@@ -3130,8 +3176,9 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
break;
case BINDER_WORK_NODE:
node = container_of(w, struct binder_node, work);
- seq_printf(m, "%snode work %d: u%p c%p\n",
- prefix, node->debug_id, node->ptr, node->cookie);
+ seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+ prefix, node->debug_id,
+ (u64)node->ptr, (u64)node->cookie);
break;
case BINDER_WORK_DEAD_BINDER:
seq_printf(m, "%shas dead binder\n", prefix);
@@ -3191,8 +3238,8 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
- node->debug_id, node->ptr, node->cookie,
+ seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count);
@@ -3494,6 +3541,7 @@ static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
+ .compat_ioctl = binder_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index dbe81ceca1bd..eb0834656dfe 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -20,311 +20,11 @@
#ifndef _LINUX_BINDER_H
#define _LINUX_BINDER_H
-#include <linux/ioctl.h>
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
-#define B_PACK_CHARS(c1, c2, c3, c4) \
- ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
-#define B_TYPE_LARGE 0x85
-
-enum {
- BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
- BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
- BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
- BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
- BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
-};
-
-enum {
- FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
- FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
-};
-
-/*
- * This is the flattened representation of a Binder object for transfer
- * between processes. The 'offsets' supplied as part of a binder transaction
- * contains offsets into the data where these structures occur. The Binder
- * driver takes care of re-writing the structure type and data as it moves
- * between processes.
- */
-struct flat_binder_object {
- /* 8 bytes for large_flat_header. */
- unsigned long type;
- unsigned long flags;
-
- /* 8 bytes of data. */
- union {
- void __user *binder; /* local object */
- signed long handle; /* remote object */
- };
-
- /* extra data associated with local object */
- void __user *cookie;
-};
-
-/*
- * On 64-bit platforms where user code may run in 32-bits the driver must
- * translate the buffer (and local binder) addresses appropriately.
- */
-
-struct binder_write_read {
- signed long write_size; /* bytes to write */
- signed long write_consumed; /* bytes consumed by driver */
- unsigned long write_buffer;
- signed long read_size; /* bytes to read */
- signed long read_consumed; /* bytes consumed by driver */
- unsigned long read_buffer;
-};
-
-/* Use with BINDER_VERSION, driver fills in fields. */
-struct binder_version {
- /* driver protocol version -- increment with incompatible change */
- signed long protocol_version;
-};
-
-/* This is the current protocol version. */
-#define BINDER_CURRENT_PROTOCOL_VERSION 7
-
-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
-#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
-#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
-
-/*
- * NOTE: Two special error codes you should check for when calling
- * in to the driver are:
- *
- * EINTR -- The operation has been interupted. This should be
- * handled by retrying the ioctl() until a different error code
- * is returned.
- *
- * ECONNREFUSED -- The driver is no longer accepting operations
- * from your process. That is, the process is being destroyed.
- * You should handle this by exiting from your process. Note
- * that once this error code is returned, all further calls to
- * the driver from any thread will return this same code.
- */
-
-enum transaction_flags {
- TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
- TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
- TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
- TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
-};
-
-struct binder_transaction_data {
- /* The first two are only used for bcTRANSACTION and brTRANSACTION,
- * identifying the target and contents of the transaction.
- */
- union {
- size_t handle; /* target descriptor of command transaction */
- void *ptr; /* target descriptor of return transaction */
- } target;
- void *cookie; /* target object cookie */
- unsigned int code; /* transaction command */
-
- /* General information about the transaction. */
- unsigned int flags;
- pid_t sender_pid;
- uid_t sender_euid;
- size_t data_size; /* number of bytes of data */
- size_t offsets_size; /* number of bytes of offsets */
-
- /* If this transaction is inline, the data immediately
- * follows here; otherwise, it ends with a pointer to
- * the data buffer.
- */
- union {
- struct {
- /* transaction data */
- const void __user *buffer;
- /* offsets from buffer to flat_binder_object structs */
- const void __user *offsets;
- } ptr;
- uint8_t buf[8];
- } data;
-};
-
-struct binder_ptr_cookie {
- void *ptr;
- void *cookie;
-};
-
-struct binder_pri_desc {
- int priority;
- int desc;
-};
-
-struct binder_pri_ptr_cookie {
- int priority;
- void *ptr;
- void *cookie;
-};
-
-enum binder_driver_return_protocol {
- BR_ERROR = _IOR('r', 0, int),
- /*
- * int: error code
- */
-
- BR_OK = _IO('r', 1),
- /* No parameters! */
-
- BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
- BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
- /*
- * binder_transaction_data: the received command.
- */
-
- BR_ACQUIRE_RESULT = _IOR('r', 4, int),
- /*
- * not currently supported
- * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
- * Else the remote object has acquired a primary reference.
- */
-
- BR_DEAD_REPLY = _IO('r', 5),
- /*
- * The target of the last transaction (either a bcTRANSACTION or
- * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
- */
-
- BR_TRANSACTION_COMPLETE = _IO('r', 6),
- /*
- * No parameters... always refers to the last transaction requested
- * (including replies). Note that this will be sent even for
- * asynchronous transactions.
- */
-
- BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
- BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
- BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
- BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
- /*
- * not currently supported
- * int: priority
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BR_NOOP = _IO('r', 12),
- /*
- * No parameters. Do nothing and examine the next command. It exists
- * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
- */
-
- BR_SPAWN_LOOPER = _IO('r', 13),
- /*
- * No parameters. The driver has determined that a process has no
- * threads waiting to service incoming transactions. When a process
- * receives this command, it must spawn a new service thread and
- * register it via bcENTER_LOOPER.
- */
-
- BR_FINISHED = _IO('r', 14),
- /*
- * not currently supported
- * stop threadpool thread
- */
-
- BR_DEAD_BINDER = _IOR('r', 15, void *),
- /*
- * void *: cookie
- */
- BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
- /*
- * void *: cookie
- */
-
- BR_FAILED_REPLY = _IO('r', 17),
- /*
- * The the last transaction (either a bcTRANSACTION or
- * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
- */
-};
-
-enum binder_driver_command_protocol {
- BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
- BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
- /*
- * binder_transaction_data: the sent command.
- */
-
- BC_ACQUIRE_RESULT = _IOW('c', 2, int),
- /*
- * not currently supported
- * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
- * Else you have acquired a primary reference on the object.
- */
-
- BC_FREE_BUFFER = _IOW('c', 3, int),
- /*
- * void *: ptr to transaction data received on a read
- */
-
- BC_INCREFS = _IOW('c', 4, int),
- BC_ACQUIRE = _IOW('c', 5, int),
- BC_RELEASE = _IOW('c', 6, int),
- BC_DECREFS = _IOW('c', 7, int),
- /*
- * int: descriptor
- */
-
- BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
- BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie for binder
- */
-
- BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
- /*
- * not currently supported
- * int: priority
- * int: descriptor
- */
-
- BC_REGISTER_LOOPER = _IO('c', 11),
- /*
- * No parameters.
- * Register a spawned looper thread with the device.
- */
-
- BC_ENTER_LOOPER = _IO('c', 12),
- BC_EXIT_LOOPER = _IO('c', 13),
- /*
- * No parameters.
- * These two commands are sent as an application-level thread
- * enters and exits the binder loop, respectively. They are
- * used so the binder can have an accurate count of the number
- * of looping threads it has available.
- */
-
- BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie
- */
-
- BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
- /*
- * void *: ptr to binder
- * void *: cookie
- */
-
- BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
- /*
- * void *: cookie
- */
-};
+#include "uapi/binder.h"
#endif /* _LINUX_BINDER_H */
diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h
index 82a567c2af67..7f20f3dc8369 100644
--- a/drivers/staging/android/binder_trace.h
+++ b/drivers/staging/android/binder_trace.h
@@ -152,7 +152,7 @@ TRACE_EVENT(binder_transaction_node_to_ref,
TP_STRUCT__entry(
__field(int, debug_id)
__field(int, node_debug_id)
- __field(void __user *, node_ptr)
+ __field(binder_uintptr_t, node_ptr)
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
),
@@ -163,8 +163,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->ref_debug_id = ref->debug_id;
__entry->ref_desc = ref->desc;
),
- TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d",
- __entry->debug_id, __entry->node_debug_id, __entry->node_ptr,
+ TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
+ __entry->debug_id, __entry->node_debug_id,
+ (u64)__entry->node_ptr,
__entry->ref_debug_id, __entry->ref_desc)
);
@@ -177,7 +178,7 @@ TRACE_EVENT(binder_transaction_ref_to_node,
__field(int, ref_debug_id)
__field(uint32_t, ref_desc)
__field(int, node_debug_id)
- __field(void __user *, node_ptr)
+ __field(binder_uintptr_t, node_ptr)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
@@ -186,9 +187,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
__entry->node_debug_id = ref->node->debug_id;
__entry->node_ptr = ref->node->ptr;
),
- TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p",
+ TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
- __entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr)
+ __entry->ref_debug_id, __entry->ref_desc,
+ (u64)__entry->node_ptr)
);
TRACE_EVENT(binder_transaction_ref_to_ref,
diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig
new file mode 100644
index 000000000000..56f7f999377e
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/Kconfig
@@ -0,0 +1,49 @@
+config FIQ_DEBUGGER
+ bool "FIQ Mode Serial Debugger"
+ default n
+ depends on ARM || ARM64
+ help
+ The FIQ serial debugger can accept commands even when the
+ kernel is unresponsive due to being stuck with interrupts
+ disabled.
+
+config FIQ_DEBUGGER_NO_SLEEP
+ bool "Keep serial debugger active"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Enables the serial debugger at boot. Passing
+ fiq_debugger.no_sleep on the kernel commandline will
+ override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+ bool "Don't disable wakeup IRQ when debugger is active"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Don't disable the wakeup irq when enabling the uart clock. This will
+ cause extra interrupts, but it makes the serial debugger usable with
+ on some MSM radio builds that ignore the uart clock request in power
+ collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+ bool "Console on FIQ Serial Debugger port"
+ depends on FIQ_DEBUGGER
+ default n
+ help
+ Enables a console so that printk messages are displayed on
+ the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+ bool "Put the FIQ debugger into console mode by default"
+ depends on FIQ_DEBUGGER_CONSOLE
+ default n
+ help
+ If enabled, this puts the fiq debugger into console mode by default.
+ Otherwise, the fiq debugger will start out in debug mode.
+
+config FIQ_WATCHDOG
+ bool
+ select FIQ_DEBUGGER
+ select PSTORE_RAM
+ default n
diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile
new file mode 100644
index 000000000000..a7ca4871cad3
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/Makefile
@@ -0,0 +1,4 @@
+obj-y += fiq_debugger.o
+obj-$(CONFIG_ARM) += fiq_debugger_arm.o
+obj-$(CONFIG_ARM64) += fiq_debugger_arm64.o
+obj-$(CONFIG_FIQ_WATCHDOG) += fiq_watchdog.o
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
new file mode 100644
index 000000000000..7d6b4ae8a2cd
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c
@@ -0,0 +1,1212 @@
+/*
+ * drivers/staging/android/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/kmsg_dump.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#ifdef CONFIG_FIQ_GLUE
+#include <asm/fiq_glue.h>
+#endif
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger.h"
+#include "fiq_debugger_priv.h"
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define MAX_FIQ_DEBUGGER_PORTS 4
+
+struct fiq_debugger_state {
+#ifdef CONFIG_FIQ_GLUE
+ struct fiq_glue_handler handler;
+#endif
+ struct fiq_debugger_output output;
+
+ int fiq;
+ int uart_irq;
+ int signal_irq;
+ int wakeup_irq;
+ bool wakeup_irq_no_set_wake;
+ struct clk *clk;
+ struct fiq_debugger_pdata *pdata;
+ struct platform_device *pdev;
+
+ char debug_cmd[DEBUG_MAX];
+ int debug_busy;
+ int debug_abort;
+
+ char debug_buf[DEBUG_MAX];
+ int debug_count;
+
+ bool no_sleep;
+ bool debug_enable;
+ bool ignore_next_wakeup_irq;
+ struct timer_list sleep_timer;
+ spinlock_t sleep_timer_lock;
+ bool uart_enabled;
+ struct wake_lock debugger_wake_lock;
+ bool console_enable;
+ int current_cpu;
+ atomic_t unhandled_fiq_count;
+ bool in_fiq;
+
+ struct work_struct work;
+ spinlock_t work_lock;
+ char work_cmd[DEBUG_MAX];
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+ spinlock_t console_lock;
+ struct console console;
+ struct tty_port tty_port;
+ struct fiq_debugger_ringbuf *tty_rbuf;
+ bool syslog_dumping;
+#endif
+
+ unsigned int last_irqs[NR_IRQS];
+ unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+struct tty_driver *fiq_tty_driver;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+static bool fiq_kgdb_enable;
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+ if (state->wakeup_irq < 0)
+ return;
+ enable_irq(state->wakeup_irq);
+ if (!state->wakeup_irq_no_set_wake)
+ enable_irq_wake(state->wakeup_irq);
+}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+ if (state->wakeup_irq < 0)
+ return;
+ disable_irq_nosync(state->wakeup_irq);
+ if (!state->wakeup_irq_no_set_wake)
+ disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state)
+{
+ return (state->fiq >= 0);
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_force_irq(struct fiq_debugger_state *state)
+{
+ unsigned int irq = state->signal_irq;
+
+ if (WARN_ON(!fiq_debugger_have_fiq(state)))
+ return;
+ if (state->pdata->force_irq) {
+ state->pdata->force_irq(state->pdev, irq);
+ } else {
+ struct irq_chip *chip = irq_get_chip(irq);
+ if (chip && chip->irq_retrigger)
+ chip->irq_retrigger(irq_get_irq_data(irq));
+ }
+}
+#endif
+
+static void fiq_debugger_uart_enable(struct fiq_debugger_state *state)
+{
+ if (state->clk)
+ clk_enable(state->clk);
+ if (state->pdata->uart_enable)
+ state->pdata->uart_enable(state->pdev);
+}
+
+static void fiq_debugger_uart_disable(struct fiq_debugger_state *state)
+{
+ if (state->pdata->uart_disable)
+ state->pdata->uart_disable(state->pdev);
+ if (state->clk)
+ clk_disable(state->clk);
+}
+
+static void fiq_debugger_uart_flush(struct fiq_debugger_state *state)
+{
+ if (state->pdata->uart_flush)
+ state->pdata->uart_flush(state->pdev);
+}
+
+static void fiq_debugger_putc(struct fiq_debugger_state *state, char c)
+{
+ state->pdata->uart_putc(state->pdev, c);
+}
+
+static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s)
+{
+ unsigned c;
+ while ((c = *s++)) {
+ if (c == '\n')
+ fiq_debugger_putc(state, '\r');
+ fiq_debugger_putc(state, c);
+ }
+}
+
+static void fiq_debugger_prompt(struct fiq_debugger_state *state)
+{
+ fiq_debugger_puts(state, "debug> ");
+}
+
+static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state)
+{
+ char buf[512];
+ size_t len;
+ struct kmsg_dumper dumper = { .active = true };
+
+
+ kmsg_dump_rewind_nolock(&dumper);
+ while (kmsg_dump_get_line_nolock(&dumper, true, buf,
+ sizeof(buf) - 1, &len)) {
+ buf[len] = 0;
+ fiq_debugger_puts(state, buf);
+ }
+}
+
+static void fiq_debugger_printf(struct fiq_debugger_output *output,
+ const char *fmt, ...)
+{
+ struct fiq_debugger_state *state;
+ char buf[256];
+ va_list ap;
+
+ state = container_of(output, struct fiq_debugger_state, output);
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ fiq_debugger_puts(state, buf);
+}
+
+/* Safe outside fiq context */
+static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+ struct fiq_debugger_state *state = cookie;
+ char buf[256];
+ va_list ap;
+ unsigned long irq_flags;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, 128, fmt, ap);
+ va_end(ap);
+
+ local_irq_save(irq_flags);
+ fiq_debugger_puts(state, buf);
+ fiq_debugger_uart_flush(state);
+ local_irq_restore(irq_flags);
+ return state->debug_abort;
+}
+
+static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
+{
+ int n;
+ struct irq_desc *desc;
+
+ fiq_debugger_printf(&state->output,
+ "irqnr total since-last status name\n");
+ for_each_irq_desc(n, desc) {
+ struct irqaction *act = desc->action;
+ if (!act && !kstat_irqs(n))
+ continue;
+ fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x %s\n", n,
+ kstat_irqs(n),
+ kstat_irqs(n) - state->last_irqs[n],
+ desc->status_use_accessors,
+ (act && act->name) ? act->name : "???");
+ state->last_irqs[n] = kstat_irqs(n);
+ }
+}
+
+static void fiq_debugger_do_ps(struct fiq_debugger_state *state)
+{
+ struct task_struct *g;
+ struct task_struct *p;
+ unsigned task_state;
+ static const char stat_nam[] = "RSDTtZX";
+
+ fiq_debugger_printf(&state->output, "pid ppid prio task pc\n");
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ task_state = p->state ? __ffs(p->state) + 1 : 0;
+ fiq_debugger_printf(&state->output,
+ "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+ fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm,
+ task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+ if (task_state == TASK_RUNNING)
+ fiq_debugger_printf(&state->output, " running\n");
+ else
+ fiq_debugger_printf(&state->output, " %08lx\n",
+ thread_saved_pc(p));
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = true;
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+ fiq_debugger_dump_kernel_log(state);
+}
+#endif
+
+static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+ if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
+ fiq_debugger_printf(&state->output, "sysrq-g blocked\n");
+ return;
+ }
+ fiq_debugger_begin_syslog_dump(state);
+ handle_sysrq(rq);
+ fiq_debugger_end_syslog_dump(state);
+}
+
+#ifdef CONFIG_KGDB
+static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state)
+{
+ if (!fiq_kgdb_enable) {
+ fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n");
+ return;
+ }
+
+ fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n");
+ state->console_enable = true;
+ handle_sysrq('g');
+}
+#endif
+
+static void fiq_debugger_schedule_work(struct fiq_debugger_state *state,
+ char *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->work_lock, flags);
+ if (state->work_cmd[0] != '\0') {
+ fiq_debugger_printf(&state->output, "work command processor busy\n");
+ spin_unlock_irqrestore(&state->work_lock, flags);
+ return;
+ }
+
+ strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
+ spin_unlock_irqrestore(&state->work_lock, flags);
+
+ schedule_work(&state->work);
+}
+
+static void fiq_debugger_work(struct work_struct *work)
+{
+ struct fiq_debugger_state *state;
+ char work_cmd[DEBUG_MAX];
+ char *cmd;
+ unsigned long flags;
+
+ state = container_of(work, struct fiq_debugger_state, work);
+
+ spin_lock_irqsave(&state->work_lock, flags);
+
+ strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
+ state->work_cmd[0] = '\0';
+
+ spin_unlock_irqrestore(&state->work_lock, flags);
+
+ cmd = work_cmd;
+ if (!strncmp(cmd, "reboot", 6)) {
+ cmd += 6;
+ while (*cmd == ' ')
+ cmd++;
+ if (cmd != '\0')
+ kernel_restart(cmd);
+ else
+ kernel_restart(NULL);
+ } else {
+ fiq_debugger_printf(&state->output, "unknown work command '%s'\n",
+ work_cmd);
+ }
+}
+
+/* This function CANNOT be called in FIQ context */
+static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+ if (!strcmp(cmd, "ps"))
+ fiq_debugger_do_ps(state);
+ if (!strcmp(cmd, "sysrq"))
+ fiq_debugger_do_sysrq(state, 'h');
+ if (!strncmp(cmd, "sysrq ", 6))
+ fiq_debugger_do_sysrq(state, cmd[6]);
+#ifdef CONFIG_KGDB
+ if (!strcmp(cmd, "kgdb"))
+ fiq_debugger_do_kgdb(state);
+#endif
+ if (!strncmp(cmd, "reboot", 6))
+ fiq_debugger_schedule_work(state, cmd);
+}
+
+static void fiq_debugger_help(struct fiq_debugger_state *state)
+{
+ fiq_debugger_printf(&state->output,
+ "FIQ Debugger commands:\n"
+ " pc PC status\n"
+ " regs Register dump\n"
+ " allregs Extended Register dump\n"
+ " bt Stack trace\n"
+ " reboot [<c>] Reboot with command <c>\n"
+ " reset [<c>] Hard reset with command <c>\n"
+ " irqs Interupt status\n"
+ " kmsg Kernel log\n"
+ " version Kernel version\n");
+ fiq_debugger_printf(&state->output,
+ " sleep Allow sleep while in FIQ\n"
+ " nosleep Disable sleep while in FIQ\n"
+ " console Switch terminal to console\n"
+ " cpu Current CPU\n"
+ " cpu <number> Switch to CPU<number>\n");
+ fiq_debugger_printf(&state->output,
+ " ps Process list\n"
+ " sysrq sysrq options\n"
+ " sysrq <param> Execute sysrq with <param>\n");
+#ifdef CONFIG_KGDB
+ fiq_debugger_printf(&state->output,
+ " kgdb Enter kernel debugger\n");
+#endif
+}
+
+static void fiq_debugger_take_affinity(void *info)
+{
+ struct fiq_debugger_state *state = info;
+ struct cpumask cpumask;
+
+ cpumask_clear(&cpumask);
+ cpumask_set_cpu(get_cpu(), &cpumask);
+
+ irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+ if (!fiq_debugger_have_fiq(state))
+ smp_call_function_single(cpu, fiq_debugger_take_affinity, state,
+ false);
+ state->current_cpu = cpu;
+}
+
+static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state,
+ const char *cmd, const struct pt_regs *regs,
+ void *svc_sp)
+{
+ bool signal_helper = false;
+
+ if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+ fiq_debugger_help(state);
+ } else if (!strcmp(cmd, "pc")) {
+ fiq_debugger_dump_pc(&state->output, regs);
+ } else if (!strcmp(cmd, "regs")) {
+ fiq_debugger_dump_regs(&state->output, regs);
+ } else if (!strcmp(cmd, "allregs")) {
+ fiq_debugger_dump_allregs(&state->output, regs);
+ } else if (!strcmp(cmd, "bt")) {
+ fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp);
+ } else if (!strncmp(cmd, "reset", 5)) {
+ cmd += 5;
+ while (*cmd == ' ')
+ cmd++;
+ if (*cmd) {
+ char tmp_cmd[32];
+ strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
+ machine_restart(tmp_cmd);
+ } else {
+ machine_restart(NULL);
+ }
+ } else if (!strcmp(cmd, "irqs")) {
+ fiq_debugger_dump_irqs(state);
+ } else if (!strcmp(cmd, "kmsg")) {
+ fiq_debugger_dump_kernel_log(state);
+ } else if (!strcmp(cmd, "version")) {
+ fiq_debugger_printf(&state->output, "%s\n", linux_banner);
+ } else if (!strcmp(cmd, "sleep")) {
+ state->no_sleep = false;
+ fiq_debugger_printf(&state->output, "enabling sleep\n");
+ } else if (!strcmp(cmd, "nosleep")) {
+ state->no_sleep = true;
+ fiq_debugger_printf(&state->output, "disabling sleep\n");
+ } else if (!strcmp(cmd, "console")) {
+ fiq_debugger_printf(&state->output, "console mode\n");
+ fiq_debugger_uart_flush(state);
+ state->console_enable = true;
+ } else if (!strcmp(cmd, "cpu")) {
+ fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+ } else if (!strncmp(cmd, "cpu ", 4)) {
+ unsigned long cpu = 0;
+ if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
+ fiq_debugger_switch_cpu(state, cpu);
+ else
+ fiq_debugger_printf(&state->output, "invalid cpu\n");
+ fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+ } else {
+ if (state->debug_busy) {
+ fiq_debugger_printf(&state->output,
+ "command processor busy. trying to abort.\n");
+ state->debug_abort = -1;
+ } else {
+ strcpy(state->debug_cmd, cmd);
+ state->debug_busy = 1;
+ }
+
+ return true;
+ }
+ if (!state->console_enable)
+ fiq_debugger_prompt(state);
+
+ return signal_helper;
+}
+
+static void fiq_debugger_sleep_timer_expired(unsigned long data)
+{
+ struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ if (state->uart_enabled && !state->no_sleep) {
+ if (state->debug_enable && !state->console_enable) {
+ state->debug_enable = false;
+ fiq_debugger_printf_nfiq(state,
+ "suspending fiq debugger\n");
+ }
+ state->ignore_next_wakeup_irq = true;
+ fiq_debugger_uart_disable(state);
+ state->uart_enabled = false;
+ fiq_debugger_enable_wakeup_irq(state);
+ }
+ wake_unlock(&state->debugger_wake_lock);
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+ state->ignore_next_wakeup_irq = false;
+ } else if (!state->uart_enabled) {
+ wake_lock(&state->debugger_wake_lock);
+ fiq_debugger_uart_enable(state);
+ state->uart_enabled = true;
+ fiq_debugger_disable_wakeup_irq(state);
+ mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+ }
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+
+ if (!state->no_sleep)
+ fiq_debugger_puts(state, "WAKEUP\n");
+ fiq_debugger_handle_wakeup(state);
+
+ return IRQ_HANDLED;
+}
+
+static
+void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+ if (state->tty_port.ops) {
+ int i;
+ int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+ for (i = 0; i < count; i++) {
+ int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+ tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL);
+ if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+ pr_warn("fiq tty failed to consume byte\n");
+ }
+ tty_flip_buffer_push(&state->tty_port);
+ }
+#endif
+}
+
+static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state)
+{
+ if (!state->no_sleep) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->sleep_timer_lock, flags);
+ wake_lock(&state->debugger_wake_lock);
+ mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+ spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+ }
+ fiq_debugger_handle_console_irq_context(state);
+ if (state->debug_busy) {
+ fiq_debugger_irq_exec(state, state->debug_cmd);
+ if (!state->console_enable)
+ fiq_debugger_prompt(state);
+ state->debug_busy = 0;
+ }
+}
+
+static int fiq_debugger_getc(struct fiq_debugger_state *state)
+{
+ return state->pdata->uart_getc(state->pdev);
+}
+
+static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state,
+ int this_cpu, const struct pt_regs *regs, void *svc_sp)
+{
+ int c;
+ static int last_c;
+ int count = 0;
+ bool signal_helper = false;
+
+ if (this_cpu != state->current_cpu) {
+ if (state->in_fiq)
+ return false;
+
+ if (atomic_inc_return(&state->unhandled_fiq_count) !=
+ MAX_UNHANDLED_FIQ_COUNT)
+ return false;
+
+ fiq_debugger_printf(&state->output,
+ "fiq_debugger: cpu %d not responding, "
+ "reverting to cpu %d\n", state->current_cpu,
+ this_cpu);
+
+ atomic_set(&state->unhandled_fiq_count, 0);
+ fiq_debugger_switch_cpu(state, this_cpu);
+ return false;
+ }
+
+ state->in_fiq = true;
+
+ while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+ count++;
+ if (!state->debug_enable) {
+ if ((c == 13) || (c == 10)) {
+ state->debug_enable = true;
+ state->debug_count = 0;
+ fiq_debugger_prompt(state);
+ }
+ } else if (c == FIQ_DEBUGGER_BREAK) {
+ state->console_enable = false;
+ fiq_debugger_puts(state, "fiq debugger mode\n");
+ state->debug_count = 0;
+ fiq_debugger_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+ } else if (state->console_enable && state->tty_rbuf) {
+ fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+ signal_helper = true;
+#endif
+ } else if ((c >= ' ') && (c < 127)) {
+ if (state->debug_count < (DEBUG_MAX - 1)) {
+ state->debug_buf[state->debug_count++] = c;
+ fiq_debugger_putc(state, c);
+ }
+ } else if ((c == 8) || (c == 127)) {
+ if (state->debug_count > 0) {
+ state->debug_count--;
+ fiq_debugger_putc(state, 8);
+ fiq_debugger_putc(state, ' ');
+ fiq_debugger_putc(state, 8);
+ }
+ } else if ((c == 13) || (c == 10)) {
+ if (c == '\r' || (c == '\n' && last_c != '\r')) {
+ fiq_debugger_putc(state, '\r');
+ fiq_debugger_putc(state, '\n');
+ }
+ if (state->debug_count) {
+ state->debug_buf[state->debug_count] = 0;
+ state->debug_count = 0;
+ signal_helper |=
+ fiq_debugger_fiq_exec(state,
+ state->debug_buf,
+ regs, svc_sp);
+ } else {
+ fiq_debugger_prompt(state);
+ }
+ }
+ last_c = c;
+ }
+ if (!state->console_enable)
+ fiq_debugger_uart_flush(state);
+ if (state->pdata->fiq_ack)
+ state->pdata->fiq_ack(state->pdev, state->fiq);
+
+ /* poke sleep timer if necessary */
+ if (state->debug_enable && !state->no_sleep)
+ signal_helper = true;
+
+ atomic_set(&state->unhandled_fiq_count, 0);
+ state->in_fiq = false;
+
+ return signal_helper;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_fiq(struct fiq_glue_handler *h,
+ const struct pt_regs *regs, void *svc_sp)
+{
+ struct fiq_debugger_state *state =
+ container_of(h, struct fiq_debugger_state, handler);
+ unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+ bool need_irq;
+
+ need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs,
+ svc_sp);
+ if (need_irq)
+ fiq_debugger_force_irq(state);
+}
+#endif
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+ bool not_done;
+
+ fiq_debugger_handle_wakeup(state);
+
+ /* handle the debugger irq in regular context */
+ not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(),
+ get_irq_regs(),
+ current_thread_info());
+ if (not_done)
+ fiq_debugger_handle_irq_context(state);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev)
+{
+ struct fiq_debugger_state *state = dev;
+
+ if (state->pdata->force_irq_ack)
+ state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+ fiq_debugger_handle_irq_context(state);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_resume(struct fiq_glue_handler *h)
+{
+ struct fiq_debugger_state *state =
+ container_of(h, struct fiq_debugger_state, handler);
+ if (state->pdata->uart_resume)
+ state->pdata->uart_resume(state->pdev);
+}
+#endif
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *fiq_debugger_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return fiq_tty_driver;
+}
+
+static void fiq_debugger_console_write(struct console *co,
+ const char *s, unsigned int count)
+{
+ struct fiq_debugger_state *state;
+ unsigned long flags;
+
+ state = container_of(co, struct fiq_debugger_state, console);
+
+ if (!state->console_enable && !state->syslog_dumping)
+ return;
+
+ fiq_debugger_uart_enable(state);
+ spin_lock_irqsave(&state->console_lock, flags);
+ while (count--) {
+ if (*s == '\n')
+ fiq_debugger_putc(state, '\r');
+ fiq_debugger_putc(state, *s++);
+ }
+ fiq_debugger_uart_flush(state);
+ spin_unlock_irqrestore(&state->console_lock, flags);
+ fiq_debugger_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+ .name = "ttyFIQ",
+ .device = fiq_debugger_console_device,
+ .write = fiq_debugger_console_write,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ int line = tty->index;
+ struct fiq_debugger_state **states = tty->driver->driver_state;
+ struct fiq_debugger_state *state = states[line];
+
+ return tty_port_open(&state->tty_port, tty, filp);
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ tty_port_close(tty->port, tty, filp);
+}
+
+int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ int i;
+ int line = tty->index;
+ struct fiq_debugger_state **states = tty->driver->driver_state;
+ struct fiq_debugger_state *state = states[line];
+
+ if (!state->console_enable)
+ return count;
+
+ fiq_debugger_uart_enable(state);
+ spin_lock_irq(&state->console_lock);
+ for (i = 0; i < count; i++)
+ fiq_debugger_putc(state, *buf++);
+ spin_unlock_irq(&state->console_lock);
+ fiq_debugger_uart_disable(state);
+
+ return count;
+}
+
+int fiq_tty_write_room(struct tty_struct *tty)
+{
+ return 16;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
+{
+ return 0;
+}
+
+static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
+{
+ struct fiq_debugger_state **states = driver->driver_state;
+ struct fiq_debugger_state *state = states[line];
+ int c = NO_POLL_CHAR;
+
+ fiq_debugger_uart_enable(state);
+ if (fiq_debugger_have_fiq(state)) {
+ int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+ if (count > 0) {
+ c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+ fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
+ }
+ } else {
+ c = fiq_debugger_getc(state);
+ if (c == FIQ_DEBUGGER_NO_CHAR)
+ c = NO_POLL_CHAR;
+ }
+ fiq_debugger_uart_disable(state);
+
+ return c;
+}
+
+static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+ struct fiq_debugger_state **states = driver->driver_state;
+ struct fiq_debugger_state *state = states[line];
+ fiq_debugger_uart_enable(state);
+ fiq_debugger_putc(state, ch);
+ fiq_debugger_uart_disable(state);
+}
+#endif
+
+static const struct tty_port_operations fiq_tty_port_ops;
+
+static const struct tty_operations fiq_tty_driver_ops = {
+ .write = fiq_tty_write,
+ .write_room = fiq_tty_write_room,
+ .open = fiq_tty_open,
+ .close = fiq_tty_close,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = fiq_tty_poll_init,
+ .poll_get_char = fiq_tty_poll_get_char,
+ .poll_put_char = fiq_tty_poll_put_char,
+#endif
+};
+
+static int fiq_debugger_tty_init(void)
+{
+ int ret;
+ struct fiq_debugger_state **states = NULL;
+
+ states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
+ if (!states) {
+ pr_err("Failed to allocate fiq debugger state structres\n");
+ return -ENOMEM;
+ }
+
+ fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
+ if (!fiq_tty_driver) {
+ pr_err("Failed to allocate fiq debugger tty\n");
+ ret = -ENOMEM;
+ goto err_free_state;
+ }
+
+ fiq_tty_driver->owner = THIS_MODULE;
+ fiq_tty_driver->driver_name = "fiq-debugger";
+ fiq_tty_driver->name = "ttyFIQ";
+ fiq_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ fiq_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ fiq_tty_driver->init_termios = tty_std_termios;
+ fiq_tty_driver->flags = TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV;
+ fiq_tty_driver->driver_state = states;
+
+ fiq_tty_driver->init_termios.c_cflag =
+ B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+ fiq_tty_driver->init_termios.c_ispeed = 115200;
+ fiq_tty_driver->init_termios.c_ospeed = 115200;
+
+ tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
+
+ ret = tty_register_driver(fiq_tty_driver);
+ if (ret) {
+ pr_err("Failed to register fiq tty: %d\n", ret);
+ goto err_free_tty;
+ }
+
+ pr_info("Registered FIQ tty driver\n");
+ return 0;
+
+err_free_tty:
+ put_tty_driver(fiq_tty_driver);
+ fiq_tty_driver = NULL;
+err_free_state:
+ kfree(states);
+ return ret;
+}
+
+static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
+{
+ int ret;
+ struct device *tty_dev;
+ struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
+
+ states[state->pdev->id] = state;
+
+ state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+ if (!state->tty_rbuf) {
+ pr_err("Failed to allocate fiq debugger ringbuf\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tty_port_init(&state->tty_port);
+ state->tty_port.ops = &fiq_tty_port_ops;
+
+ tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver,
+ state->pdev->id, &state->pdev->dev);
+ if (IS_ERR(tty_dev)) {
+ pr_err("Failed to register fiq debugger tty device\n");
+ ret = PTR_ERR(tty_dev);
+ goto err;
+ }
+
+ device_set_wakeup_capable(tty_dev, 1);
+
+ pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
+
+ return 0;
+
+err:
+ fiq_debugger_ringbuf_free(state->tty_rbuf);
+ state->tty_rbuf = NULL;
+ return ret;
+}
+#endif
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+ if (state->pdata->uart_dev_suspend)
+ return state->pdata->uart_dev_suspend(pdev);
+ return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+ if (state->pdata->uart_dev_resume)
+ return state->pdata->uart_dev_resume(pdev);
+ return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fiq_debugger_state *state;
+ int fiq;
+ int uart_irq;
+
+ if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
+ return -EINVAL;
+
+ if (!pdata->uart_getc || !pdata->uart_putc)
+ return -EINVAL;
+ if ((pdata->uart_enable && !pdata->uart_disable) ||
+ (!pdata->uart_enable && pdata->uart_disable))
+ return -EINVAL;
+
+ fiq = platform_get_irq_byname(pdev, "fiq");
+ uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+ /* uart_irq mode and fiq mode are mutually exclusive, but one of them
+ * is required */
+ if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+ return -EINVAL;
+ if (fiq >= 0 && !pdata->fiq_enable)
+ return -EINVAL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ state->output.printf = fiq_debugger_printf;
+ setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired,
+ (unsigned long)state);
+ state->pdata = pdata;
+ state->pdev = pdev;
+ state->no_sleep = initial_no_sleep;
+ state->debug_enable = initial_debug_enable;
+ state->console_enable = initial_console_enable;
+
+ state->fiq = fiq;
+ state->uart_irq = uart_irq;
+ state->signal_irq = platform_get_irq_byname(pdev, "signal");
+ state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+ INIT_WORK(&state->work, fiq_debugger_work);
+ spin_lock_init(&state->work_lock);
+
+ platform_set_drvdata(pdev, state);
+
+ spin_lock_init(&state->sleep_timer_lock);
+
+ if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state))
+ state->no_sleep = true;
+ state->ignore_next_wakeup_irq = !state->no_sleep;
+
+ wake_lock_init(&state->debugger_wake_lock,
+ WAKE_LOCK_SUSPEND, "serial-debug");
+
+ state->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(state->clk))
+ state->clk = NULL;
+
+ /* do not call pdata->uart_enable here since uart_init may still
+ * need to do some initialization before uart_enable can work.
+ * So, only try to manage the clock during init.
+ */
+ if (state->clk)
+ clk_enable(state->clk);
+
+ if (pdata->uart_init) {
+ ret = pdata->uart_init(pdev);
+ if (ret)
+ goto err_uart_init;
+ }
+
+ fiq_debugger_printf_nfiq(state,
+ "<hit enter %sto activate fiq debugger>\n",
+ state->no_sleep ? "" : "twice ");
+
+#ifdef CONFIG_FIQ_GLUE
+ if (fiq_debugger_have_fiq(state)) {
+ state->handler.fiq = fiq_debugger_fiq;
+ state->handler.resume = fiq_debugger_resume;
+ ret = fiq_glue_register_handler(&state->handler);
+ if (ret) {
+ pr_err("%s: could not install fiq handler\n", __func__);
+ goto err_register_irq;
+ }
+
+ pdata->fiq_enable(pdev, state->fiq, 1);
+ } else
+#endif
+ {
+ ret = request_irq(state->uart_irq, fiq_debugger_uart_irq,
+ IRQF_NO_SUSPEND, "debug", state);
+ if (ret) {
+ pr_err("%s: could not install irq handler\n", __func__);
+ goto err_register_irq;
+ }
+
+ /* for irq-only mode, we want this irq to wake us up, if it
+ * can.
+ */
+ enable_irq_wake(state->uart_irq);
+ }
+
+ if (state->clk)
+ clk_disable(state->clk);
+
+ if (state->signal_irq >= 0) {
+ ret = request_irq(state->signal_irq, fiq_debugger_signal_irq,
+ IRQF_TRIGGER_RISING, "debug-signal", state);
+ if (ret)
+ pr_err("serial_debugger: could not install signal_irq");
+ }
+
+ if (state->wakeup_irq >= 0) {
+ ret = request_irq(state->wakeup_irq,
+ fiq_debugger_wakeup_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+ "debug-wakeup", state);
+ if (ret) {
+ pr_err("serial_debugger: "
+ "could not install wakeup irq\n");
+ state->wakeup_irq = -1;
+ } else {
+ ret = enable_irq_wake(state->wakeup_irq);
+ if (ret) {
+ pr_err("serial_debugger: "
+ "could not enable wakeup\n");
+ state->wakeup_irq_no_set_wake = true;
+ }
+ }
+ }
+ if (state->no_sleep)
+ fiq_debugger_handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+ spin_lock_init(&state->console_lock);
+ state->console = fiq_debugger_console;
+ state->console.index = pdev->id;
+ if (!console_set_on_cmdline)
+ add_preferred_console(state->console.name,
+ state->console.index, NULL);
+ register_console(&state->console);
+ fiq_debugger_tty_init_one(state);
+#endif
+ return 0;
+
+err_register_irq:
+ if (pdata->uart_free)
+ pdata->uart_free(pdev);
+err_uart_init:
+ if (state->clk)
+ clk_disable(state->clk);
+ if (state->clk)
+ clk_put(state->clk);
+ wake_lock_destroy(&state->debugger_wake_lock);
+ platform_set_drvdata(pdev, NULL);
+ kfree(state);
+ return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+ .suspend = fiq_debugger_dev_suspend,
+ .resume = fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+ .probe = fiq_debugger_probe,
+ .driver = {
+ .name = "fiq_debugger",
+ .pm = &fiq_debugger_dev_pm_ops,
+ },
+};
+
+static int __init fiq_debugger_init(void)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+ fiq_debugger_tty_init();
+#endif
+ return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h
new file mode 100644
index 000000000000..c9ec4f8db086
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.h
@@ -0,0 +1,64 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME "fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME "signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME "wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume: used to restore uart state right before enabling
+ * the fiq.
+ * @uart_enable: Do the work necessary to communicate with the uart
+ * hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable: Do the work necessary to disable the uart hw
+ * (disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend: called during PM suspend, generally not needed
+ * for real fiq mode debugger.
+ * @uart_dev_resume: called during PM resume, generally not needed
+ * for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+ int (*uart_init)(struct platform_device *pdev);
+ void (*uart_free)(struct platform_device *pdev);
+ int (*uart_resume)(struct platform_device *pdev);
+ int (*uart_getc)(struct platform_device *pdev);
+ void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+ void (*uart_flush)(struct platform_device *pdev);
+ void (*uart_enable)(struct platform_device *pdev);
+ void (*uart_disable)(struct platform_device *pdev);
+
+ int (*uart_dev_suspend)(struct platform_device *pdev);
+ int (*uart_dev_resume)(struct platform_device *pdev);
+
+ void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+ bool enable);
+ void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+ void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+ void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
new file mode 100644
index 000000000000..8b3e0137be1a
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(unsigned cpsr)
+{
+ switch (cpsr & MODE_MASK) {
+ case USR_MODE: return "USR";
+ case FIQ_MODE: return "FIQ";
+ case IRQ_MODE: return "IRQ";
+ case SVC_MODE: return "SVC";
+ case ABT_MODE: return "ABT";
+ case UND_MODE: return "UND";
+ case SYSTEM_MODE: return "SYS";
+ default: return "???";
+ }
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+ const struct pt_regs *regs)
+{
+ output->printf(output, " pc %08x cpsr %08x mode %s\n",
+ regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+ const struct pt_regs *regs)
+{
+ output->printf(output,
+ " r0 %08x r1 %08x r2 %08x r3 %08x\n",
+ regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+ output->printf(output,
+ " r4 %08x r5 %08x r6 %08x r7 %08x\n",
+ regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
+ output->printf(output,
+ " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n",
+ regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp,
+ mode_name(regs->ARM_cpsr));
+ output->printf(output,
+ " ip %08x sp %08x lr %08x pc %08x cpsr %08x\n",
+ regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc,
+ regs->ARM_cpsr);
+}
+
+struct mode_regs {
+ unsigned long sp_svc;
+ unsigned long lr_svc;
+ unsigned long spsr_svc;
+
+ unsigned long sp_abt;
+ unsigned long lr_abt;
+ unsigned long spsr_abt;
+
+ unsigned long sp_und;
+ unsigned long lr_und;
+ unsigned long spsr_und;
+
+ unsigned long sp_irq;
+ unsigned long lr_irq;
+ unsigned long spsr_irq;
+
+ unsigned long r8_fiq;
+ unsigned long r9_fiq;
+ unsigned long r10_fiq;
+ unsigned long r11_fiq;
+ unsigned long r12_fiq;
+ unsigned long sp_fiq;
+ unsigned long lr_fiq;
+ unsigned long spsr_fiq;
+};
+
+static void __naked get_mode_regs(struct mode_regs *regs)
+{
+ asm volatile (
+ "mrs r1, cpsr\n"
+ "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r13 - r14}\n"
+ "mrs r2, spsr\n"
+ "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+ "stmia r0!, {r2, r8 - r14}\n"
+ "mrs r2, spsr\n"
+ "stmia r0!, {r2}\n"
+ "msr cpsr_c, r1\n"
+ "bx lr\n");
+}
+
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+ const struct pt_regs *regs)
+{
+ struct mode_regs mode_regs;
+ unsigned long mode = regs->ARM_cpsr & MODE_MASK;
+
+ fiq_debugger_dump_regs(output, regs);
+ get_mode_regs(&mode_regs);
+
+ output->printf(output,
+ "%csvc: sp %08x lr %08x spsr %08x\n",
+ mode == SVC_MODE ? '*' : ' ',
+ mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+ output->printf(output,
+ "%cabt: sp %08x lr %08x spsr %08x\n",
+ mode == ABT_MODE ? '*' : ' ',
+ mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+ output->printf(output,
+ "%cund: sp %08x lr %08x spsr %08x\n",
+ mode == UND_MODE ? '*' : ' ',
+ mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+ output->printf(output,
+ "%cirq: sp %08x lr %08x spsr %08x\n",
+ mode == IRQ_MODE ? '*' : ' ',
+ mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+ output->printf(output,
+ "%cfiq: r8 %08x r9 %08x r10 %08x r11 %08x r12 %08x\n",
+ mode == FIQ_MODE ? '*' : ' ',
+ mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+ mode_regs.r11_fiq, mode_regs.r12_fiq);
+ output->printf(output,
+ " fiq: sp %08x lr %08x spsr %08x\n",
+ mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+struct stacktrace_state {
+ struct fiq_debugger_output *output;
+ unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+ struct stacktrace_state *sts = d;
+
+ if (sts->depth) {
+ sts->output->printf(sts->output,
+ " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+ frame->pc, frame->pc, frame->lr, frame->lr,
+ frame->sp, frame->fp);
+ sts->depth--;
+ return 0;
+ }
+ sts->output->printf(sts->output, " ...\n");
+
+ return sts->depth == 0;
+}
+
+struct frame_tail {
+ struct frame_tail *fp;
+ unsigned long sp;
+ unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_output *output,
+ struct frame_tail *tail)
+{
+ struct frame_tail buftail[2];
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+ output->printf(output, " invalid frame pointer %p\n",
+ tail);
+ return NULL;
+ }
+ if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+ output->printf(output,
+ " failed to copy frame pointer %p\n", tail);
+ return NULL;
+ }
+
+ output->printf(output, " %p\n", buftail[0].lr);
+
+ /* frame pointers should strictly progress back up the stack
+ * (towards higher addresses) */
+ if (tail >= buftail[0].fp)
+ return NULL;
+
+ return buftail[0].fp-1;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+ const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+ struct frame_tail *tail;
+ struct thread_info *real_thread_info = THREAD_INFO(ssp);
+ struct stacktrace_state sts;
+
+ sts.depth = depth;
+ sts.output = output;
+ *current_thread_info() = *real_thread_info;
+
+ if (!current)
+ output->printf(output, "current NULL\n");
+ else
+ output->printf(output, "pid: %d comm: %s\n",
+ current->pid, current->comm);
+ fiq_debugger_dump_regs(output, regs);
+
+ if (!user_mode(regs)) {
+ struct stackframe frame;
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.lr = regs->ARM_lr;
+ frame.pc = regs->ARM_pc;
+ output->printf(output,
+ " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+ regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+ regs->ARM_sp, regs->ARM_fp);
+ walk_stackframe(&frame, report_trace, &sts);
+ return;
+ }
+
+ tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+ while (depth-- && tail && !((unsigned long) tail & 3))
+ tail = user_backtrace(output, tail);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
new file mode 100644
index 000000000000..99c6584fcfa5
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(const struct pt_regs *regs)
+{
+ if (compat_user_mode(regs)) {
+ return "USR";
+ } else {
+ switch (processor_mode(regs)) {
+ case PSR_MODE_EL0t: return "EL0t";
+ case PSR_MODE_EL1t: return "EL1t";
+ case PSR_MODE_EL1h: return "EL1h";
+ case PSR_MODE_EL2t: return "EL2t";
+ case PSR_MODE_EL2h: return "EL2h";
+ default: return "???";
+ }
+ }
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+ const struct pt_regs *regs)
+{
+ output->printf(output, " pc %016lx cpsr %08lx mode %s\n",
+ regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output,
+ const struct pt_regs *regs)
+{
+ output->printf(output, " r0 %08x r1 %08x r2 %08x r3 %08x\n",
+ regs->compat_usr(0), regs->compat_usr(1),
+ regs->compat_usr(2), regs->compat_usr(3));
+ output->printf(output, " r4 %08x r5 %08x r6 %08x r7 %08x\n",
+ regs->compat_usr(4), regs->compat_usr(5),
+ regs->compat_usr(6), regs->compat_usr(7));
+ output->printf(output, " r8 %08x r9 %08x r10 %08x r11 %08x\n",
+ regs->compat_usr(8), regs->compat_usr(9),
+ regs->compat_usr(10), regs->compat_usr(11));
+ output->printf(output, " ip %08x sp %08x lr %08x pc %08x\n",
+ regs->compat_usr(12), regs->compat_sp,
+ regs->compat_lr, regs->pc);
+ output->printf(output, " cpsr %08x (%s)\n",
+ regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output,
+ const struct pt_regs *regs)
+{
+
+ output->printf(output, " x0 %016lx x1 %016lx\n",
+ regs->regs[0], regs->regs[1]);
+ output->printf(output, " x2 %016lx x3 %016lx\n",
+ regs->regs[2], regs->regs[3]);
+ output->printf(output, " x4 %016lx x5 %016lx\n",
+ regs->regs[4], regs->regs[5]);
+ output->printf(output, " x6 %016lx x7 %016lx\n",
+ regs->regs[6], regs->regs[7]);
+ output->printf(output, " x8 %016lx x9 %016lx\n",
+ regs->regs[8], regs->regs[9]);
+ output->printf(output, " x10 %016lx x11 %016lx\n",
+ regs->regs[10], regs->regs[11]);
+ output->printf(output, " x12 %016lx x13 %016lx\n",
+ regs->regs[12], regs->regs[13]);
+ output->printf(output, " x14 %016lx x15 %016lx\n",
+ regs->regs[14], regs->regs[15]);
+ output->printf(output, " x16 %016lx x17 %016lx\n",
+ regs->regs[16], regs->regs[17]);
+ output->printf(output, " x18 %016lx x19 %016lx\n",
+ regs->regs[18], regs->regs[19]);
+ output->printf(output, " x20 %016lx x21 %016lx\n",
+ regs->regs[20], regs->regs[21]);
+ output->printf(output, " x22 %016lx x23 %016lx\n",
+ regs->regs[22], regs->regs[23]);
+ output->printf(output, " x24 %016lx x25 %016lx\n",
+ regs->regs[24], regs->regs[25]);
+ output->printf(output, " x26 %016lx x27 %016lx\n",
+ regs->regs[26], regs->regs[27]);
+ output->printf(output, " x28 %016lx x29 %016lx\n",
+ regs->regs[28], regs->regs[29]);
+ output->printf(output, " x30 %016lx sp %016lx\n",
+ regs->regs[30], regs->sp);
+ output->printf(output, " pc %016lx cpsr %08x (%s)\n",
+ regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+ const struct pt_regs *regs)
+{
+ if (compat_user_mode(regs))
+ fiq_debugger_dump_regs_aarch32(output, regs);
+ else
+ fiq_debugger_dump_regs_aarch64(output, regs);
+}
+
+#define READ_SPECIAL_REG(x) ({ \
+ u64 val; \
+ asm volatile ("mrs %0, " # x : "=r"(val)); \
+ val; \
+})
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+ const struct pt_regs *regs)
+{
+ u32 pstate = READ_SPECIAL_REG(CurrentEl);
+ bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t;
+
+ fiq_debugger_dump_regs(output, regs);
+
+ output->printf(output, " sp_el0 %016lx\n",
+ READ_SPECIAL_REG(sp_el0));
+
+ if (in_el2)
+ output->printf(output, " sp_el1 %016lx\n",
+ READ_SPECIAL_REG(sp_el1));
+
+ output->printf(output, " elr_el1 %016lx\n",
+ READ_SPECIAL_REG(elr_el1));
+
+ output->printf(output, " spsr_el1 %08lx\n",
+ READ_SPECIAL_REG(spsr_el1));
+
+ if (in_el2) {
+ output->printf(output, " spsr_irq %08lx\n",
+ READ_SPECIAL_REG(spsr_irq));
+ output->printf(output, " spsr_abt %08lx\n",
+ READ_SPECIAL_REG(spsr_abt));
+ output->printf(output, " spsr_und %08lx\n",
+ READ_SPECIAL_REG(spsr_und));
+ output->printf(output, " spsr_fiq %08lx\n",
+ READ_SPECIAL_REG(spsr_fiq));
+ output->printf(output, " spsr_el2 %08lx\n",
+ READ_SPECIAL_REG(elr_el2));
+ output->printf(output, " spsr_el2 %08lx\n",
+ READ_SPECIAL_REG(spsr_el2));
+ }
+}
+
+struct stacktrace_state {
+ struct fiq_debugger_output *output;
+ unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+ struct stacktrace_state *sts = d;
+
+ if (sts->depth) {
+ sts->output->printf(sts->output, "%pF:\n", frame->pc);
+ sts->output->printf(sts->output,
+ " pc %016lx sp %016lx fp %016lx\n",
+ frame->pc, frame->sp, frame->fp);
+ sts->depth--;
+ return 0;
+ }
+ sts->output->printf(sts->output, " ...\n");
+
+ return sts->depth == 0;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+ const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+ struct thread_info *real_thread_info = THREAD_INFO(ssp);
+ struct stacktrace_state sts;
+
+ sts.depth = depth;
+ sts.output = output;
+ *current_thread_info() = *real_thread_info;
+
+ if (!current)
+ output->printf(output, "current NULL\n");
+ else
+ output->printf(output, "pid: %d comm: %s\n",
+ current->pid, current->comm);
+ fiq_debugger_dump_regs(output, regs);
+
+ if (!user_mode(regs)) {
+ struct stackframe frame;
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
+ output->printf(output, "\n");
+ walk_stackframe(&frame, report_trace, &sts);
+ }
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
new file mode 100644
index 000000000000..d5d051f727a8
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_DEBUGGER_PRIV_H_
+#define _FIQ_DEBUGGER_PRIV_H_
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+ ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_output {
+ void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...);
+};
+
+struct pt_regs;
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+ const struct pt_regs *regs);
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+ const struct pt_regs *regs);
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+ const struct pt_regs *regs);
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+ const struct pt_regs *regs, unsigned int depth, void *ssp);
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
new file mode 100644
index 000000000000..10c3c5d09098
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
@@ -0,0 +1,94 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+ int len;
+ int head;
+ int tail;
+ u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+ struct fiq_debugger_ringbuf *rbuf;
+
+ rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+ if (rbuf == NULL)
+ return NULL;
+
+ rbuf->len = len;
+ rbuf->head = 0;
+ rbuf->tail = 0;
+ smp_mb();
+
+ return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+ kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+ int level = rbuf->head - rbuf->tail;
+
+ if (level < 0)
+ level = rbuf->len + level;
+
+ return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+ return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+ return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+ count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+ rbuf->tail = (rbuf->tail + count) % rbuf->len;
+ smp_mb();
+
+ return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+ if (fiq_debugger_ringbuf_room(rbuf) == 0)
+ return 0;
+
+ rbuf->buf[rbuf->head] = datum;
+ smp_mb();
+ rbuf->head = (rbuf->head + 1) % rbuf->len;
+ smp_mb();
+
+ return 1;
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
new file mode 100644
index 000000000000..194b54138417
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/pstore_ram.h>
+
+#include "fiq_watchdog.h"
+#include "fiq_debugger_priv.h"
+
+static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock);
+
+static void fiq_watchdog_printf(struct fiq_debugger_output *output,
+ const char *fmt, ...)
+{
+ char buf[256];
+ va_list ap;
+ int len;
+
+ va_start(ap, fmt);
+ len = vscnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ ramoops_console_write_buf(buf, len);
+}
+
+struct fiq_debugger_output fiq_watchdog_output = {
+ .printf = fiq_watchdog_printf,
+};
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp)
+{
+ char msg[24];
+ int len;
+
+ raw_spin_lock(&fiq_watchdog_lock);
+
+ len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n",
+ THREAD_INFO(svc_sp)->cpu);
+ ramoops_console_write_buf(msg, len);
+
+ fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp);
+
+ raw_spin_unlock(&fiq_watchdog_lock);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
new file mode 100644
index 000000000000..c6b507f8d976
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_WATCHDOG_H_
+#define _FIQ_WATCHDOG_H_
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp);
+
+#endif
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
new file mode 100644
index 000000000000..0f8fec1f84e5
--- /dev/null
+++ b/drivers/staging/android/ion/Kconfig
@@ -0,0 +1,35 @@
+menuconfig ION
+ bool "Ion Memory Manager"
+ depends on HAVE_MEMBLOCK
+ select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
+ ---help---
+ Chose this option to enable the ION Memory Manager,
+ used by Android to efficiently allocate buffers
+ from userspace that can be shared between drivers.
+ If you're not using Android its probably safe to
+ say N here.
+
+config ION_TEST
+ tristate "Ion Test Device"
+ depends on ION
+ help
+ Choose this option to create a device that can be used to test the
+ kernel and device side ION functions.
+
+config ION_DUMMY
+ bool "Dummy Ion driver"
+ depends on ION
+ help
+ Provides a dummy ION driver that registers the
+ /dev/ion device and some basic heaps. This can
+ be used for testing the ION infrastructure if
+ one doesn't have access to hardware drivers that
+ use ION.
+
+config ION_TEGRA
+ tristate "Ion for Tegra"
+ depends on ARCH_TEGRA && ION
+ help
+ Choose this option if you wish to use ion on an nVidia Tegra.
+
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
new file mode 100644
index 000000000000..b56fd2bf2b4f
--- /dev/null
+++ b/drivers/staging/android/ion/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+ ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION_TEST) += ion_test.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_ION) += compat_ion.o
+endif
+
+obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
+obj-$(CONFIG_ION_TEGRA) += tegra/
+
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
new file mode 100644
index 000000000000..ee3a7380e53b
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/staging/android/ion/compat_ion.c
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "compat_ion.h"
+
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
+struct compat_ion_allocation_data {
+ compat_size_t len;
+ compat_size_t align;
+ compat_uint_t heap_id_mask;
+ compat_uint_t flags;
+ compat_int_t handle;
+};
+
+struct compat_ion_custom_data {
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+};
+
+struct compat_ion_handle_data {
+ compat_int_t handle;
+};
+
+#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
+ struct compat_ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
+ struct compat_ion_custom_data)
+
+static int compat_get_ion_allocation_data(
+ struct compat_ion_allocation_data __user *data32,
+ struct ion_allocation_data __user *data)
+{
+ compat_size_t s;
+ compat_uint_t u;
+ compat_int_t i;
+ int err;
+
+ err = get_user(s, &data32->len);
+ err |= put_user(s, &data->len);
+ err |= get_user(s, &data32->align);
+ err |= put_user(s, &data->align);
+ err |= get_user(u, &data32->heap_id_mask);
+ err |= put_user(u, &data->heap_id_mask);
+ err |= get_user(u, &data32->flags);
+ err |= put_user(u, &data->flags);
+ err |= get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
+static int compat_get_ion_handle_data(
+ struct compat_ion_handle_data __user *data32,
+ struct ion_handle_data __user *data)
+{
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
+static int compat_put_ion_allocation_data(
+ struct compat_ion_allocation_data __user *data32,
+ struct ion_allocation_data __user *data)
+{
+ compat_size_t s;
+ compat_uint_t u;
+ compat_int_t i;
+ int err;
+
+ err = get_user(s, &data->len);
+ err |= put_user(s, &data32->len);
+ err |= get_user(s, &data->align);
+ err |= put_user(s, &data32->align);
+ err |= get_user(u, &data->heap_id_mask);
+ err |= put_user(u, &data32->heap_id_mask);
+ err |= get_user(u, &data->flags);
+ err |= put_user(u, &data32->flags);
+ err |= get_user(i, &data->handle);
+ err |= put_user(i, &data32->handle);
+
+ return err;
+}
+
+static int compat_get_ion_custom_data(
+ struct compat_ion_custom_data __user *data32,
+ struct ion_custom_data __user *data)
+{
+ compat_uint_t cmd;
+ compat_ulong_t arg;
+ int err;
+
+ err = get_user(cmd, &data32->cmd);
+ err |= put_user(cmd, &data->cmd);
+ err |= get_user(arg, &data32->arg);
+ err |= put_user(arg, &data->arg);
+
+ return err;
+};
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case COMPAT_ION_IOC_ALLOC:
+ {
+ struct compat_ion_allocation_data __user *data32;
+ struct ion_allocation_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_allocation_data(data32, data);
+ if (err)
+ return err;
+ ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
+ (unsigned long)data);
+ err = compat_put_ion_allocation_data(data32, data);
+ return ret ? ret : err;
+ }
+ case COMPAT_ION_IOC_FREE:
+ {
+ struct compat_ion_handle_data __user *data32;
+ struct ion_handle_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_handle_data(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
+ (unsigned long)data);
+ }
+ case COMPAT_ION_IOC_CUSTOM: {
+ struct compat_ion_custom_data __user *data32;
+ struct ion_custom_data __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(*data));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_ion_custom_data(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
+ (unsigned long)data);
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ case ION_IOC_IMPORT:
+ case ION_IOC_SYNC:
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
new file mode 100644
index 000000000000..c2ad5893dfda
--- /dev/null
+++ b/drivers/staging/android/ion/compat_ion.h
@@ -0,0 +1,30 @@
+/*
+
+ * drivers/staging/android/ion/compat_ion.h
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#else
+
+#define compat_ion_ioctl NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
new file mode 100644
index 000000000000..974af032fc29
--- /dev/null
+++ b/drivers/staging/android/ion/ion.c
@@ -0,0 +1,1823 @@
+/*
+
+ * drivers/staging/android/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ * @heaps: list of all the heaps in the system
+ * @user_clients: list of all the clients created from userspace
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
+ unsigned long arg);
+ struct rb_root clients;
+ struct dentry *debug_root;
+ struct dentry *heaps_debug_root;
+ struct dentry *clients_debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node: node in the tree of all clients
+ * @dev: backpointer to ion device
+ * @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
+ * @lock: lock protecting the tree of handles
+ * @name: used for debugging
+ * @display_name: used for debugging (unique version of @name)
+ * @display_serial: used for debugging (to make display_name unique)
+ * @task: used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+ struct rb_node node;
+ struct ion_device *dev;
+ struct rb_root handles;
+ struct idr idr;
+ struct mutex lock;
+ const char *name;
+ char *display_name;
+ int display_serial;
+ struct task_struct *task;
+ pid_t pid;
+ struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref: reference count
+ * @client: back pointer to the client the buffer resides in
+ * @buffer: pointer to the buffer
+ * @node: node in the client's handle rbtree
+ * @kmap_cnt: count of times this client has mapped to kernel
+ * @id: client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client. Other fields are never changed after initialization.
+ */
+struct ion_handle {
+ struct kref ref;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+ unsigned int kmap_cnt;
+ int id;
+};
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+ return (buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+ return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+ return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) & ~(1UL));
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+ struct ion_buffer *buffer)
+{
+ struct rb_node **p = &dev->buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_buffer *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_buffer, node);
+
+ if (buffer < entry) {
+ p = &(*p)->rb_left;
+ } else if (buffer > entry) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: buffer already found.", __func__);
+ BUG();
+ }
+ }
+
+ rb_link_node(&buffer->node, parent, p);
+ rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ struct ion_device *dev,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, ret;
+
+ buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ buffer->heap = heap;
+ buffer->flags = flags;
+ kref_init(&buffer->ref);
+
+ ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
+ if (ret) {
+ if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+ goto err2;
+
+ ion_heap_freelist_drain(heap, 0);
+ ret = heap->ops->allocate(heap, buffer, len, align,
+ flags);
+ if (ret)
+ goto err2;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+
+ table = heap->ops->map_dma(heap, buffer);
+ if (WARN_ONCE(table == NULL,
+ "heap->ops->map_dma should return ERR_PTR on error"))
+ table = ERR_PTR(-EINVAL);
+ if (IS_ERR(table)) {
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(PTR_ERR(table));
+ }
+ buffer->sg_table = table;
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct scatterlist *sg;
+ int i, j, k = 0;
+
+ buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+ if (!buffer->pages) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+
+ for (j = 0; j < sg->length / PAGE_SIZE; j++)
+ buffer->pages[k++] = page++;
+ }
+
+ if (ret)
+ goto err;
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+ INIT_LIST_HEAD(&buffer->vmas);
+ mutex_init(&buffer->lock);
+ /* this will set up dma addresses for the sglist -- it is not
+ technically correct as per the dma api -- a specific
+ device isn't really taking ownership here. However, in practice on
+ our systems the only dma_address space is physical addresses.
+ Additionally, we can't afford the overhead of invalidating every
+ allocation via dma_map_sg. The implicit contract here is that
+ memory comming from the heaps is ready for dma, ie if it has a
+ cached mapping that mapping has been invalidated */
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ sg_dma_address(sg) = sg_phys(sg);
+ mutex_lock(&dev->buffer_lock);
+ ion_buffer_add(dev, buffer);
+ mutex_unlock(&dev->buffer_lock);
+ return buffer;
+
+err:
+ heap->ops->unmap_dma(heap, buffer);
+ heap->ops->free(buffer);
+err1:
+ if (buffer->pages)
+ vfree(buffer->pages);
+err2:
+ kfree(buffer);
+ return ERR_PTR(ret);
+}
+
+void ion_buffer_destroy(struct ion_buffer *buffer)
+{
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+ buffer->heap->ops->free(buffer);
+ if (buffer->pages)
+ vfree(buffer->pages);
+ kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+ struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+ struct ion_heap *heap = buffer->heap;
+ struct ion_device *dev = buffer->dev;
+
+ mutex_lock(&dev->buffer_lock);
+ rb_erase(&buffer->node, &dev->buffers);
+ mutex_unlock(&dev->buffer_lock);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_freelist_add(heap, buffer);
+ else
+ ion_buffer_destroy(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+ kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+ return kref_put(&buffer->ref, _ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+ mutex_lock(&buffer->lock);
+ buffer->handle_count++;
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+ /*
+ * when a buffer is removed from a handle, if it is not in
+ * any other handles, copy the taskcomm and the pid of the
+ * process it's being removed from into the buffer. At this
+ * point there will be no way to track what processes this buffer is
+ * being used by, it only exists as a dma_buf file descriptor.
+ * The taskcomm and pid can provide a debug hint as to where this fd
+ * is in the system
+ */
+ mutex_lock(&buffer->lock);
+ buffer->handle_count--;
+ BUG_ON(buffer->handle_count < 0);
+ if (!buffer->handle_count) {
+ struct task_struct *task;
+
+ task = current->group_leader;
+ get_task_comm(buffer->task_comm, task);
+ buffer->pid = task_pid_nr(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct ion_handle *handle;
+
+ handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&handle->ref);
+ RB_CLEAR_NODE(&handle->node);
+ handle->client = client;
+ ion_buffer_get(buffer);
+ ion_buffer_add_to_handle(buffer);
+ handle->buffer = buffer;
+
+ return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+ struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+ struct ion_client *client = handle->client;
+ struct ion_buffer *buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+ while (handle->kmap_cnt)
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+
+ idr_remove(&client->idr, handle->id);
+ if (!RB_EMPTY_NODE(&handle->node))
+ rb_erase(&handle->node, &client->handles);
+
+ ion_buffer_remove_from_handle(buffer);
+ ion_buffer_put(buffer);
+
+ kfree(handle);
+}
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+ return handle->buffer;
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+ kref_get(&handle->ref);
+}
+
+static int ion_handle_put(struct ion_handle *handle)
+{
+ struct ion_client *client = handle->client;
+ int ret;
+
+ mutex_lock(&client->lock);
+ ret = kref_put(&handle->ref, ion_handle_destroy);
+ mutex_unlock(&client->lock);
+
+ return ret;
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+ struct ion_buffer *buffer)
+{
+ struct rb_node *n = client->handles.rb_node;
+
+ while (n) {
+ struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+ if (buffer < entry->buffer)
+ n = n->rb_left;
+ else if (buffer > entry->buffer)
+ n = n->rb_right;
+ else
+ return entry;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
+{
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = idr_find(&client->idr, id);
+ if (handle)
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+
+ return handle ? handle : ERR_PTR(-EINVAL);
+}
+
+static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ WARN_ON(!mutex_is_locked(&client->lock));
+ return (idr_find(&client->idr, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+ int id;
+ struct rb_node **p = &client->handles.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_handle *entry;
+
+ id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
+ handle->id = id;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_handle, node);
+
+ if (handle->buffer < entry->buffer)
+ p = &(*p)->rb_left;
+ else if (handle->buffer > entry->buffer)
+ p = &(*p)->rb_right;
+ else
+ WARN(1, "%s: buffer already found.", __func__);
+ }
+
+ rb_link_node(&handle->node, parent, p);
+ rb_insert_color(&handle->node, &client->handles);
+
+ return 0;
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags)
+{
+ struct ion_handle *handle;
+ struct ion_device *dev = client->dev;
+ struct ion_buffer *buffer = NULL;
+ struct ion_heap *heap;
+ int ret;
+
+ pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
+ len, align, heap_id_mask, flags);
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ len = PAGE_ALIGN(len);
+
+ if (!len)
+ return ERR_PTR(-EINVAL);
+
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, align, flags);
+ if (!IS_ERR(buffer))
+ break;
+ }
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ERR(buffer))
+ return ERR_PTR(PTR_ERR(buffer));
+
+ handle = ion_handle_create(client, buffer);
+
+ /*
+ * ion_buffer_create will create a buffer with a ref_cnt of 1,
+ * and ion_handle_create will take a second reference, drop one here
+ */
+ ion_buffer_put(buffer);
+
+ if (IS_ERR(handle))
+ return handle;
+
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ mutex_unlock(&client->lock);
+ return;
+ }
+ mutex_unlock(&client->lock);
+ ion_handle_put(handle);
+}
+EXPORT_SYMBOL(ion_free);
+
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ if (!buffer->heap->ops->phys) {
+ pr_err("%s: ion_phys is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&client->lock);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+ return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->kmap_cnt) {
+ buffer->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+ if (WARN_ONCE(vaddr == NULL,
+ "heap->ops->map_kernel should return ERR_PTR on error"))
+ return ERR_PTR(-EINVAL);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ buffer->vaddr = vaddr;
+ buffer->kmap_cnt++;
+ return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ void *vaddr;
+
+ if (handle->kmap_cnt) {
+ handle->kmap_cnt++;
+ return buffer->vaddr;
+ }
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR(vaddr))
+ return vaddr;
+ handle->kmap_cnt++;
+ return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+ buffer->kmap_cnt--;
+ if (!buffer->kmap_cnt) {
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+ buffer->vaddr = NULL;
+ }
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+
+ handle->kmap_cnt--;
+ if (!handle->kmap_cnt)
+ ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ void *vaddr;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_kernel.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ buffer = handle->buffer;
+
+ if (!handle->buffer->heap->ops->map_kernel) {
+ pr_err("%s: map_kernel is not implemented by this heap.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_handle_kmap_get(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+ return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+
+ mutex_lock(&client->lock);
+ buffer = handle->buffer;
+ mutex_lock(&buffer->lock);
+ ion_handle_kmap_put(handle);
+ mutex_unlock(&buffer->lock);
+ mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+ struct ion_client *client = s->private;
+ struct rb_node *n;
+ size_t sizes[ION_NUM_HEAP_IDS] = {0};
+ const char *names[ION_NUM_HEAP_IDS] = {NULL};
+ int i;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ unsigned int id = handle->buffer->heap->id;
+
+ if (!names[id])
+ names[id] = handle->buffer->heap->name;
+ sizes[id] += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+
+ seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+ for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
+ if (!names[i])
+ continue;
+ seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+ }
+ return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+ .open = ion_debug_client_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ion_get_client_serial(const struct rb_root *root,
+ const unsigned char *name)
+{
+ int serial = -1;
+ struct rb_node *node;
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ struct ion_client *client = rb_entry(node, struct ion_client,
+ node);
+ if (strcmp(client->name, name))
+ continue;
+ serial = max(serial, client->display_serial);
+ }
+ return serial + 1;
+}
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name)
+{
+ struct ion_client *client;
+ struct task_struct *task;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ion_client *entry;
+ pid_t pid;
+
+ if (!name) {
+ pr_err("%s: Name cannot be null\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ get_task_struct(current->group_leader);
+ task_lock(current->group_leader);
+ pid = task_pid_nr(current->group_leader);
+ /* don't bother to store task struct for kernel threads,
+ they can't be killed anyway */
+ if (current->group_leader->flags & PF_KTHREAD) {
+ put_task_struct(current->group_leader);
+ task = NULL;
+ } else {
+ task = current->group_leader;
+ }
+ task_unlock(current->group_leader);
+
+ client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ if (!client)
+ goto err_put_task_struct;
+
+ client->dev = dev;
+ client->handles = RB_ROOT;
+ idr_init(&client->idr);
+ mutex_init(&client->lock);
+ client->task = task;
+ client->pid = pid;
+ client->name = kstrdup(name, GFP_KERNEL);
+ if (!client->name)
+ goto err_free_client;
+
+ down_write(&dev->lock);
+ client->display_serial = ion_get_client_serial(&dev->clients, name);
+ client->display_name = kasprintf(
+ GFP_KERNEL, "%s-%d", name, client->display_serial);
+ if (!client->display_name) {
+ up_write(&dev->lock);
+ goto err_free_client_name;
+ }
+ p = &dev->clients.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_client, node);
+
+ if (client < entry)
+ p = &(*p)->rb_left;
+ else if (client > entry)
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&client->node, parent, p);
+ rb_insert_color(&client->node, &dev->clients);
+
+ client->debug_root = debugfs_create_file(client->display_name, 0664,
+ dev->clients_debug_root,
+ client, &debug_client_fops);
+ if (!client->debug_root) {
+ char buf[256], *path;
+ path = dentry_path(dev->clients_debug_root, buf, 256);
+ pr_err("Failed to create client debugfs at %s/%s\n",
+ path, client->display_name);
+ }
+
+ up_write(&dev->lock);
+
+ return client;
+
+err_free_client_name:
+ kfree(client->name);
+err_free_client:
+ kfree(client);
+err_put_task_struct:
+ if (task)
+ put_task_struct(current->group_leader);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(ion_client_create);
+
+void ion_client_destroy(struct ion_client *client)
+{
+ struct ion_device *dev = client->dev;
+ struct rb_node *n;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ while ((n = rb_first(&client->handles))) {
+ struct ion_handle *handle = rb_entry(n, struct ion_handle,
+ node);
+ ion_handle_destroy(&handle->ref);
+ }
+
+ idr_destroy(&client->idr);
+
+ down_write(&dev->lock);
+ if (client->task)
+ put_task_struct(client->task);
+ rb_erase(&client->node, &dev->clients);
+ debugfs_remove_recursive(client->debug_root);
+ up_write(&dev->lock);
+
+ kfree(client->display_name);
+ kfree(client->name);
+ kfree(client);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct sg_table *table;
+
+ mutex_lock(&client->lock);
+ if (!ion_handle_validate(client, handle)) {
+ pr_err("%s: invalid handle passed to map_dma.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ table = buffer->sg_table;
+ mutex_unlock(&client->lock);
+ return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ int err, i, empty = -1;
+ struct dma_iommu_mapping *iommu_map;
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+ unsigned int nents = buffer->sg_table->nents;
+ struct ion_mapping *map_ptr;
+ struct scatterlist *sg;
+
+ iommu_map = to_dma_iommu_mapping(attachment->dev);
+ if (!iommu_map) {
+ ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+ return buffer->sg_table;
+ }
+
+ mutex_lock(&buffer->lock);
+ for (i = 0; i < ARRAY_SIZE(buffer->mapping); i++) {
+ map_ptr = &buffer->mapping[i];
+ if (!map_ptr->dev) {
+ empty = i;
+ continue;
+ }
+
+ if (to_dma_iommu_mapping(map_ptr->dev) == iommu_map) {
+ kref_get(&map_ptr->kref);
+ mutex_unlock(&buffer->lock);
+ return &map_ptr->sgt;
+ }
+ }
+
+ if (!empty) {
+ err = -ENOMEM;
+ goto err_no_space;
+ }
+
+ map_ptr = &buffer->mapping[empty];
+ err = sg_alloc_table(&map_ptr->sgt, nents, GFP_KERNEL);
+ if (err)
+ goto err_sg_alloc_table;
+
+ for_each_sg(buffer->sg_table->sgl, sg, nents, i)
+ memcpy(map_ptr->sgt.sgl + i, sg, sizeof(*sg));
+
+ nents = dma_map_sg(attachment->dev, map_ptr->sgt.sgl, nents, direction);
+ if (!nents) {
+ err = -EINVAL;
+ goto err_dma_map_sg;
+ }
+
+ kref_init(&map_ptr->kref);
+ map_ptr->dev = attachment->dev;
+ mutex_unlock(&buffer->lock);
+ return &map_ptr->sgt;
+
+err_dma_map_sg:
+ sg_free_table(&map_ptr->sgt);
+err_sg_alloc_table:
+err_no_space:
+ mutex_unlock(&buffer->lock);
+ return ERR_PTR(err);
+}
+
+static void __ion_unmap_dma_buf(struct kref *kref)
+{
+ struct ion_mapping *map_ptr;
+
+ map_ptr = container_of(kref, struct ion_mapping, kref);
+ dma_unmap_sg(map_ptr->dev, map_ptr->sgt.sgl, map_ptr->sgt.nents,
+ DMA_BIDIRECTIONAL);
+ sg_free_table(&map_ptr->sgt);
+ memset(map_ptr, 0, sizeof(*map_ptr));
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ int i;
+ struct dma_iommu_mapping *iommu_map;
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+ struct ion_mapping *map_ptr;
+
+ iommu_map = to_dma_iommu_mapping(attachment->dev);
+ if (!iommu_map)
+ return;
+
+ mutex_lock(&buffer->lock);
+ for (i = 0; i < ARRAY_SIZE(buffer->mapping); i++) {
+ map_ptr = &buffer->mapping[i];
+ if (!map_ptr->dev)
+ continue;
+
+ if (to_dma_iommu_mapping(map_ptr->dev) == iommu_map) {
+ kref_put(&map_ptr->kref, __ion_unmap_dma_buf);
+ mutex_unlock(&buffer->lock);
+ return;
+ }
+ }
+
+ dev_warn(attachment->dev, "Not found a map(%p)\n",
+ to_dma_iommu_mapping(attachment->dev));
+
+ mutex_unlock(&buffer->lock);
+}
+
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ /*
+ * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+ * for the the targeted device, but this works on the currently targeted
+ * hardware.
+ */
+ sg_dma_address(&sg) = page_to_phys(page);
+ dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
+struct ion_vma_list {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct ion_vma_list *vma_list;
+ int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ int i;
+
+ pr_debug("%s: syncing for device %s\n", __func__,
+ dev ? dev_name(dev) : "null");
+
+ if (!ion_buffer_fault_user_mappings(buffer))
+ return;
+
+ mutex_lock(&buffer->lock);
+ for (i = 0; i < pages; i++) {
+ struct page *page = buffer->pages[i];
+
+ if (ion_buffer_page_is_dirty(page))
+ ion_pages_sync_for_device(dev, ion_buffer_page(page),
+ PAGE_SIZE, dir);
+
+ ion_buffer_page_clean(buffer->pages + i);
+ }
+ list_for_each_entry(vma_list, &buffer->vmas, list) {
+ struct vm_area_struct *vma = vma_list->vma;
+
+ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+ NULL);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ unsigned long pfn;
+ int ret;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+ BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+
+ pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ mutex_unlock(&buffer->lock);
+ if (ret)
+ return VM_FAULT_ERROR;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list;
+
+ vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ if (!vma_list)
+ return;
+ vma_list->vma = vma;
+ mutex_lock(&buffer->lock);
+ list_add(&vma_list->list, &buffer->vmas);
+ mutex_unlock(&buffer->lock);
+ pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list, *tmp;
+
+ pr_debug("%s\n", __func__);
+ mutex_lock(&buffer->lock);
+ list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+ if (vma_list->vma != vma)
+ continue;
+ list_del(&vma_list->list);
+ kfree(vma_list);
+ pr_debug("%s: deleting %p\n", __func__, vma);
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+static struct vm_operations_struct ion_vma_ops = {
+ .open = ion_vm_open,
+ .close = ion_vm_close,
+ .fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ int ret = 0;
+
+ if (!buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping "
+ "to userspace\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ion_buffer_fault_user_mappings(buffer)) {
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+ VM_DONTDUMP;
+ vma->vm_private_data = buffer;
+ vma->vm_ops = &ion_vma_ops;
+ ion_vm_open(vma);
+ return 0;
+ }
+
+ if (!(buffer->flags & ION_FLAG_CACHED))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
+
+ if (ret)
+ pr_err("%s: failure mapping buffer to userspace\n",
+ __func__);
+
+ return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ int i;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ for (i = 0; i < ARRAY_SIZE(buffer->importer); i++) {
+ struct ion_importer *imp;
+
+ imp = &buffer->importer[i];
+ if (imp->dev && imp->delete)
+ imp->delete(imp->priv);
+ }
+
+ ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ return;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ return 0;
+}
+
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+static int ion_dma_buf_set_private(struct dma_buf *dmabuf, struct device *dev,
+ void *priv, void (*delete)(void *))
+{
+ int i, empty = -1, err = 0;
+ struct ion_buffer *buffer = dmabuf->priv;
+ struct ion_importer *imp;
+
+ mutex_lock(&buffer->lock);
+ for (i = 0; i < ARRAY_SIZE(buffer->importer); i++) {
+ imp = &buffer->importer[i];
+ if ((empty == -1) && !imp->dev)
+ empty = i;
+
+ if (dev == imp->dev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(buffer->importer)) {
+ if (empty == -1) {
+ pr_err("ION: Needs more importer space\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ imp = &buffer->importer[empty];
+ i = empty;
+ }
+
+ imp->dev = dev;
+ imp->priv = priv;
+ imp->delete = delete;
+out:
+ mutex_unlock(&buffer->lock);
+ dev_dbg(dev, "%s() dmabuf=%p err=%d i=%d priv=%p\n",
+ __func__, dmabuf, err, i, priv);
+ return err;
+}
+
+static void *ion_dma_buf_get_private(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ int i;
+ void *priv = NULL;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ for (i = 0; i < ARRAY_SIZE(buffer->importer); i++) {
+ struct ion_importer *imp;
+
+ imp = &buffer->importer[i];
+ if (dev == imp->dev) {
+ priv = imp->priv;
+ break;
+ }
+ }
+ mutex_unlock(&buffer->lock);
+ dev_dbg(dev, "%s() dmabuf=%p i=%d priv=%p\n",
+ __func__, dmabuf, i, priv);
+ return priv;
+}
+
+static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *addr;
+
+ mutex_lock(&buffer->lock);
+ addr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ pr_debug("%s() %p\n", __func__, addr);
+ return addr;
+}
+
+static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ pr_debug("%s() %p\n", __func__, vaddr);
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_map_dma_buf,
+ .unmap_dma_buf = ion_unmap_dma_buf,
+ .mmap = ion_mmap,
+ .release = ion_dma_buf_release,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .kmap_atomic = ion_dma_buf_kmap,
+ .kunmap_atomic = ion_dma_buf_kunmap,
+ .kmap = ion_dma_buf_kmap,
+ .kunmap = ion_dma_buf_kunmap,
+ .vmap = ion_dma_buf_vmap,
+ .vunmap = ion_dma_buf_vunmap,
+ .set_drvdata = ion_dma_buf_set_private,
+ .get_drvdata = ion_dma_buf_get_private,
+};
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ struct ion_buffer *buffer;
+ struct dma_buf *dmabuf;
+ bool valid_handle;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to share.\n", __func__);
+ mutex_unlock(&client->lock);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = handle->buffer;
+ ion_buffer_get(buffer);
+ mutex_unlock(&client->lock);
+
+ dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
+ if (IS_ERR(dmabuf)) {
+ ion_buffer_put(buffer);
+ return dmabuf;
+ }
+
+ return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+ struct dma_buf *dmabuf;
+ int fd;
+
+ dmabuf = ion_share_dma_buf(client, handle);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+ if (fd < 0)
+ dma_buf_put(dmabuf);
+
+ return fd;
+}
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct ion_handle *handle;
+ int ret;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_PTR(PTR_ERR(dmabuf));
+ /* if this memory came from ion */
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not import dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return ERR_PTR(-EINVAL);
+ }
+ buffer = dmabuf->priv;
+
+ mutex_lock(&client->lock);
+ /* if a handle exists for this buffer just take a reference to it */
+ handle = ion_handle_lookup(client, buffer);
+ if (!IS_ERR(handle)) {
+ ion_handle_get(handle);
+ mutex_unlock(&client->lock);
+ goto end;
+ }
+ mutex_unlock(&client->lock);
+
+ handle = ion_handle_create(client, buffer);
+ if (IS_ERR(handle))
+ goto end;
+
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+
+end:
+ dma_buf_put(dmabuf);
+ return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ /* if this memory came from ion */
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not sync dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
+ buffer = dmabuf->priv;
+
+ dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+ buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ dma_buf_put(dmabuf);
+ return 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+ switch (cmd) {
+ case ION_IOC_SYNC:
+ case ION_IOC_FREE:
+ case ION_IOC_CUSTOM:
+ return _IOC_WRITE;
+ default:
+ return _IOC_DIR(cmd);
+ }
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ion_client *client = filp->private_data;
+ struct ion_device *dev = client->dev;
+ struct ion_handle *cleanup_handle = NULL;
+ int ret = 0;
+ unsigned int dir;
+
+ union {
+ struct ion_fd_data fd;
+ struct ion_allocation_data allocation;
+ struct ion_handle_data handle;
+ struct ion_custom_data custom;
+ } data;
+
+ dir = ion_ioctl_dir(cmd);
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (dir & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_ALLOC:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_alloc(client, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+ break;
+ }
+ case ION_IOC_FREE:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ ion_free(client, handle);
+ ion_handle_put(handle);
+ break;
+ }
+ case ION_IOC_SHARE:
+ case ION_IOC_MAP:
+ {
+ struct ion_handle *handle;
+
+ handle = ion_handle_get_by_id(client, data.handle.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ data.fd.fd = ion_share_dma_buf_fd(client, handle);
+ ion_handle_put(handle);
+ if (data.fd.fd < 0)
+ ret = data.fd.fd;
+ break;
+ }
+ case ION_IOC_IMPORT:
+ {
+ struct ion_handle *handle;
+ handle = ion_import_dma_buf(client, data.fd.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ break;
+ }
+ case ION_IOC_SYNC:
+ {
+ ret = ion_sync_for_device(client, data.fd.fd);
+ break;
+ }
+ case ION_IOC_CUSTOM:
+ {
+ if (!dev->custom_ioctl)
+ return -ENOTTY;
+ ret = dev->custom_ioctl(client, data.custom.cmd,
+ data.custom.arg);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle)
+ ion_free(client, cleanup_handle);
+ return -EFAULT;
+ }
+ }
+ return ret;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+ struct ion_client *client = file->private_data;
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ ion_client_destroy(client);
+ return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+ struct ion_client *client;
+ char debug_name[64];
+
+ pr_debug("%s: %d\n", __func__, __LINE__);
+ snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
+ client = ion_client_create(dev, debug_name);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+ file->private_data = client;
+
+ return 0;
+}
+
+static const struct file_operations ion_fops = {
+ .owner = THIS_MODULE,
+ .open = ion_open,
+ .release = ion_release,
+ .unlocked_ioctl = ion_ioctl,
+ .compat_ioctl = compat_ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+ unsigned int id)
+{
+ size_t size = 0;
+ struct rb_node *n;
+
+ mutex_lock(&client->lock);
+ for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+ struct ion_handle *handle = rb_entry(n,
+ struct ion_handle,
+ node);
+ if (handle->buffer->heap->id == id)
+ size += handle->buffer->size;
+ }
+ mutex_unlock(&client->lock);
+ return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+ struct ion_heap *heap = s->private;
+ struct ion_device *dev = heap->dev;
+ struct rb_node *n;
+ size_t total_size = 0;
+ size_t total_orphaned_size = 0;
+
+ seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
+ seq_printf(s, "----------------------------------------------------\n");
+
+ for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+ struct ion_client *client = rb_entry(n, struct ion_client,
+ node);
+ size_t size = ion_debug_heap_total(client, heap->id);
+ if (!size)
+ continue;
+ if (client->task) {
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, client->task);
+ seq_printf(s, "%16.s %16u %16zu\n", task_comm,
+ client->pid, size);
+ } else {
+ seq_printf(s, "%16.s %16u %16zu\n", client->name,
+ client->pid, size);
+ }
+ }
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "orphaned allocations (info is from last known client):"
+ "\n");
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ if (buffer->heap->id != heap->id)
+ continue;
+ total_size += buffer->size;
+ if (!buffer->handle_count) {
+ seq_printf(s, "%16.s %16u %16zu %d %d\n",
+ buffer->task_comm, buffer->pid,
+ buffer->size, buffer->kmap_cnt,
+ atomic_read(&buffer->ref.refcount));
+ total_orphaned_size += buffer->size;
+ }
+ }
+ mutex_unlock(&dev->buffer_lock);
+ seq_printf(s, "----------------------------------------------------\n");
+ seq_printf(s, "%16.s %16zu\n", "total orphaned",
+ total_orphaned_size);
+ seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ seq_printf(s, "%16.s %16zu\n", "deferred free",
+ heap->free_list_size);
+ seq_printf(s, "----------------------------------------------------\n");
+
+ if (heap->debug_show)
+ heap->debug_show(heap, s, unused);
+
+ return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+ .open = ion_debug_heap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
+{
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ if (!val)
+ return 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
+
+ heap->shrinker.shrink(&heap->shrinker, &sc);
+ return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
+
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
+
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ *val = objs;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+ debug_shrink_set, "%llu\n");
+#endif
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+ struct dentry *debug_file;
+
+ if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+ !heap->ops->unmap_dma)
+ pr_err("%s: can not add heap with invalid ops struct.\n",
+ __func__);
+
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ ion_heap_init_deferred_free(heap);
+
+ if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+ ion_heap_init_shrinker(heap);
+
+ heap->dev = dev;
+ down_write(&dev->lock);
+ /* use negative heap->id to reverse the priority -- when traversing
+ the list later attempt higher id numbers first */
+ plist_node_init(&heap->node, -heap->id);
+ plist_add(&heap->node, &dev->heaps);
+ debug_file = debugfs_create_file(heap->name, 0664,
+ dev->heaps_debug_root, heap,
+ &debug_heap_fops);
+
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to create heap debugfs at %s/%s\n",
+ path, heap->name);
+ }
+
+#ifdef DEBUG_HEAP_SHRINKER
+ if (heap->shrinker.shrink) {
+ char debug_name[64];
+
+ snprintf(debug_name, 64, "%s_shrink", heap->name);
+ debug_file = debugfs_create_file(
+ debug_name, 0644, dev->heaps_debug_root, heap,
+ &debug_shrink_fops);
+ if (!debug_file) {
+ char buf[256], *path;
+ path = dentry_path(dev->heaps_debug_root, buf, 256);
+ pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
+ path, debug_name);
+ }
+ }
+#endif
+ up_write(&dev->lock);
+}
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg))
+{
+ struct ion_device *idev;
+ int ret;
+
+ idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ if (!idev)
+ return ERR_PTR(-ENOMEM);
+
+ idev->dev.minor = MISC_DYNAMIC_MINOR;
+ idev->dev.name = "ion";
+ idev->dev.fops = &ion_fops;
+ idev->dev.parent = NULL;
+ ret = misc_register(&idev->dev);
+ if (ret) {
+ pr_err("ion: failed to register misc device.\n");
+ return ERR_PTR(ret);
+ }
+
+ idev->debug_root = debugfs_create_dir("ion", NULL);
+ if (!idev->debug_root) {
+ pr_err("ion: failed to create debugfs root directory.\n");
+ goto debugfs_done;
+ }
+ idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
+ if (!idev->heaps_debug_root) {
+ pr_err("ion: failed to create debugfs heaps directory.\n");
+ goto debugfs_done;
+ }
+ idev->clients_debug_root = debugfs_create_dir("clients",
+ idev->debug_root);
+ if (!idev->clients_debug_root)
+ pr_err("ion: failed to create debugfs clients directory.\n");
+
+debugfs_done:
+
+ idev->custom_ioctl = custom_ioctl;
+ idev->buffers = RB_ROOT;
+ mutex_init(&idev->buffer_lock);
+ init_rwsem(&idev->lock);
+ plist_head_init(&idev->heaps);
+ idev->clients = RB_ROOT;
+ return idev;
+}
+
+void ion_device_destroy(struct ion_device *dev)
+{
+ misc_deregister(&dev->dev);
+ debugfs_remove_recursive(dev->debug_root);
+ /* XXX need to free the heaps and clients ? */
+ kfree(dev);
+}
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr; i++) {
+ if (data->heaps[i].size == 0)
+ continue;
+
+ if (data->heaps[i].base == 0) {
+ phys_addr_t paddr;
+ paddr = memblock_alloc_base(data->heaps[i].size,
+ data->heaps[i].align,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!paddr) {
+ pr_err("%s: error allocating memblock for "
+ "heap %d\n",
+ __func__, i);
+ continue;
+ }
+ data->heaps[i].base = paddr;
+ } else {
+ int ret = memblock_reserve(data->heaps[i].base,
+ data->heaps[i].size);
+ if (ret)
+ pr_err("memblock reserve of %zx@%lx failed\n",
+ data->heaps[i].size,
+ data->heaps[i].base);
+ }
+ pr_info("%s: %s reserved base %lx size %zu\n", __func__,
+ data->heaps[i].name,
+ data->heaps[i].base,
+ data->heaps[i].size);
+ }
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
new file mode 100644
index 000000000000..dcd2a0cdb192
--- /dev/null
+++ b/drivers/staging/android/ion/ion.h
@@ -0,0 +1,204 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "../uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/* This should be removed some day when phys_addr_t's are fully
+ plumbed in the kernel, and all instances of ion_phys_addr_t should
+ be converted to phys_addr_t. For the time being many kernel interfaces
+ do not accept phys_addr_t's that would have to */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type: type of the heap from ion_heap_type enum
+ * @id: unique identifier for heap. When allocating higher numbers
+ * will be allocated from first. At allocation these are passed
+ * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name: used for debug purposes
+ * @base: base address of heap in physical memory if applicable
+ * @size: size of the heap in bytes if applicable
+ * @align: required alignment in physical memory if applicable
+ * @priv: private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+ enum ion_heap_type type;
+ unsigned int id;
+ const char *name;
+ ion_phys_addr_t base;
+ size_t size;
+ ion_phys_addr_t align;
+ void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr: number of structures in the array
+ * @heaps: array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+ int nr;
+ struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data: platform data specifying starting physical address and
+ * size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specfic sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
+/**
+ * ion_client_create() - allocate a client and returns it
+ * @dev: the global ion device
+ * @heap_type_mask: mask of heaps this client can allocate from
+ * @name: used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+ const char *name);
+
+/**
+ * ion_client_destroy() - free's a client and all it's handles
+ * @client: the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client: the client
+ * @len: size of the allocation
+ * @align: requested allocation alignment, lots of hardware blocks
+ * have alignment requirements of some kind
+ * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
+ * heaps will be tried in order from highest to lowest
+ * id
+ * @flags: heap flags, the low 16 bits are consumed by ion, the
+ * high 16 bits are passed on to the respective heap and
+ * can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client: the client
+ * @handle: the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_phys - returns the physical address and len of a handle
+ * @client: the client
+ * @handle: the handle
+ * @addr: a pointer to put the address in
+ * @len: a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address. It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead. Returns -EINVAL if the handle is invalid. This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client: the client
+ * @handle: the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client: the client
+ * @handle: handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client: the client
+ * @handle: handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client: the client
+ * @handle: the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client: the client
+ * @handle: the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * @client: the client
+ * @fd: the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
new file mode 100644
index 000000000000..3cb05b9b0e93
--- /dev/null
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -0,0 +1,194 @@
+/*
+ * drivers/staging/android/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_carveout_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+};
+
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size,
+ unsigned long align)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+ unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+ if (!offset)
+ return ION_CARVEOUT_ALLOCATE_FAIL;
+
+ return offset;
+}
+
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+ return;
+ gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
+ *len = buffer->size;
+ return 0;
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct sg_table *table;
+ ion_phys_addr_t paddr;
+ int ret;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ paddr = ion_carveout_allocate(heap, size, align);
+ if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ goto err_free_table;
+ }
+
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+ buffer->priv_virt = table;
+
+ return 0;
+
+err_free_table:
+ sg_free_table(table);
+err_free:
+ kfree(table);
+ return ret;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ ion_heap_buffer_zero(buffer);
+
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
+ ion_carveout_free(heap, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+ .allocate = ion_carveout_heap_allocate,
+ .free = ion_carveout_heap_free,
+ .phys = ion_carveout_heap_phys,
+ .map_dma = ion_carveout_heap_map_dma,
+ .unmap_dma = ion_carveout_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_carveout_heap *carveout_heap;
+ int ret;
+
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
+
+ carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+ if (!carveout_heap)
+ return ERR_PTR(-ENOMEM);
+
+ carveout_heap->pool = gen_pool_create(12, -1);
+ if (!carveout_heap->pool) {
+ kfree(carveout_heap);
+ return ERR_PTR(-ENOMEM);
+ }
+ carveout_heap->base = heap_data->base;
+ gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+ -1);
+ carveout_heap->heap.ops = &carveout_heap_ops;
+ carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+ carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+ return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ gen_pool_destroy(carveout_heap->pool);
+ kfree(carveout_heap);
+ carveout_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
new file mode 100644
index 000000000000..d40f5f831808
--- /dev/null
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/staging/android/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_chunk_heap {
+ struct ion_heap heap;
+ struct gen_pool *pool;
+ ion_phys_addr_t base;
+ unsigned long chunk_size;
+ unsigned long size;
+ unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret, i;
+ unsigned long num_chunks;
+ unsigned long allocated_size;
+
+ if (align > chunk_heap->chunk_size)
+ return -EINVAL;
+
+ allocated_size = ALIGN(size, chunk_heap->chunk_size);
+ num_chunks = allocated_size / chunk_heap->chunk_size;
+
+ if (allocated_size > chunk_heap->size - chunk_heap->allocated)
+ return -ENOMEM;
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+ ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ return ret;
+ }
+
+ sg = table->sgl;
+ for (i = 0; i < num_chunks; i++) {
+ unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+ chunk_heap->chunk_size);
+ if (!paddr)
+ goto err;
+ sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+ chunk_heap->chunk_size, 0);
+ sg = sg_next(sg);
+ }
+
+ buffer->priv_virt = table;
+ chunk_heap->allocated += allocated_size;
+ return 0;
+err:
+ sg = table->sgl;
+ for (i -= 1; i >= 0; i--) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg->length);
+ sg = sg_next(sg);
+ }
+ sg_free_table(table);
+ kfree(table);
+ return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+ struct sg_table *table = buffer->priv_virt;
+ struct scatterlist *sg;
+ int i;
+ unsigned long allocated_size;
+
+ allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
+
+ ion_heap_buffer_zero(buffer);
+
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+ sg->length);
+ }
+ chunk_heap->allocated -= allocated_size;
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+ .allocate = ion_chunk_heap_allocate,
+ .free = ion_chunk_heap_free,
+ .map_dma = ion_chunk_heap_map_dma,
+ .unmap_dma = ion_chunk_heap_unmap_dma,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_chunk_heap *chunk_heap;
+ int ret;
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
+
+ chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ if (!chunk_heap)
+ return ERR_PTR(-ENOMEM);
+
+ chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+ chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+ PAGE_SHIFT, -1);
+ if (!chunk_heap->pool) {
+ ret = -ENOMEM;
+ goto error_gen_pool_create;
+ }
+ chunk_heap->base = heap_data->base;
+ chunk_heap->size = heap_data->size;
+ chunk_heap->allocated = 0;
+
+ gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+ chunk_heap->heap.ops = &chunk_heap_ops;
+ chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+ chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
+ heap_data->size, heap_data->align);
+
+ return &chunk_heap->heap;
+
+error_gen_pool_create:
+ kfree(chunk_heap);
+ return ERR_PTR(ret);
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_chunk_heap *chunk_heap =
+ container_of(heap, struct ion_chunk_heap, heap);
+
+ gen_pool_destroy(chunk_heap->pool);
+ kfree(chunk_heap);
+ chunk_heap = NULL;
+}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
new file mode 100644
index 000000000000..f0f98897e4b9
--- /dev/null
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -0,0 +1,218 @@
+/*
+ * drivers/staging/android/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_heap {
+ struct ion_heap heap;
+ struct device *dev;
+};
+
+#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
+
+struct ion_cma_buffer_info {
+ void *cpu_addr;
+ dma_addr_t handle;
+ struct sg_table *table;
+};
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replaced by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ struct page *page = virt_to_page(cpu_addr);
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+ unsigned long len, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info;
+
+ dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ return -EINVAL;
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(dev, "Can't allocate buffer info\n");
+ return ION_CMA_ALLOCATE_FAILED;
+ }
+
+ info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+ GFP_HIGHUSER | __GFP_ZERO);
+
+ if (!info->cpu_addr) {
+ dev_err(dev, "Fail to allocate buffer\n");
+ goto err;
+ }
+
+ info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!info->table) {
+ dev_err(dev, "Fail to allocate sg table\n");
+ goto free_mem;
+ }
+
+ if (ion_cma_get_sgtable
+ (dev, info->table, info->cpu_addr, info->handle, len))
+ goto free_table;
+ /* keep this for memory release */
+ buffer->priv_virt = info;
+ dev_dbg(dev, "Allocate buffer %p\n", buffer);
+ return 0;
+
+free_table:
+ kfree(info->table);
+free_mem:
+ dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+err:
+ kfree(info);
+ return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Release buffer %p\n", buffer);
+ /* release memory */
+ dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ /* release sg table */
+ sg_free_table(info->table);
+ kfree(info->table);
+ kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+ &info->handle);
+
+ *addr = info->handle;
+ *len = buffer->size;
+
+ return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+ struct device *dev = cma_heap->dev;
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+ return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+ buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct ion_cma_buffer_info *info = buffer->priv_virt;
+ /* kernel memory mapping has been done at allocation time */
+ return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+ .allocate = ion_cma_allocate,
+ .free = ion_cma_free,
+ .map_dma = ion_cma_heap_map_dma,
+ .unmap_dma = ion_cma_heap_unmap_dma,
+ .phys = ion_cma_phys,
+ .map_user = ion_cma_mmap,
+ .map_kernel = ion_cma_map_kernel,
+ .unmap_kernel = ion_cma_unmap_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_cma_heap *cma_heap;
+
+ cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+
+ if (!cma_heap)
+ return ERR_PTR(-ENOMEM);
+
+ cma_heap->heap.ops = &ion_cma_ops;
+ /* get device from private heaps data, later it will be
+ * used to make the link with reserved CMA memory */
+ cma_heap->dev = data->priv;
+ cma_heap->heap.type = ION_HEAP_TYPE_DMA;
+ return &cma_heap->heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+ kfree(cma_heap);
+}
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
new file mode 100644
index 000000000000..2bcd762dbb43
--- /dev/null
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -0,0 +1,163 @@
+/*
+ * drivers/staging/android/ion/ion_dummy_driver.c
+ *
+ * Copyright (C) 2013 Linaro, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_device *idev;
+struct ion_heap **heaps;
+
+void *carveout_ptr;
+void *chunk_ptr;
+
+struct ion_platform_heap dummy_heaps[] = {
+ {
+ .id = ION_HEAP_TYPE_SYSTEM,
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .name = "system",
+ },
+ {
+ .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
+ .name = "system contig",
+ },
+ {
+ .id = ION_HEAP_TYPE_CARVEOUT,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = "carveout",
+ .size = SZ_4M,
+ },
+ {
+ .id = ION_HEAP_TYPE_CHUNK,
+ .type = ION_HEAP_TYPE_CHUNK,
+ .name = "chunk",
+ .size = SZ_4M,
+ .align = SZ_16K,
+ .priv = (void *)(SZ_16K),
+ },
+ {
+ .id = ION_HEAP_TYPE_DMA,
+ .type = ION_HEAP_TYPE_DMA,
+ .name = "dma",
+ },
+};
+
+struct ion_platform_data dummy_ion_pdata = {
+ .nr = 5,
+ .heaps = dummy_heaps,
+};
+
+static int __init ion_dummy_init(void)
+{
+ int i, err;
+
+ idev = ion_device_create(NULL);
+ heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
+ GFP_KERNEL);
+ if (!heaps)
+ return PTR_ERR(heaps);
+
+
+ /* Allocate a dummy carveout heap */
+ carveout_ptr = alloc_pages_exact(
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
+ GFP_KERNEL);
+ if (carveout_ptr)
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
+ virt_to_phys(carveout_ptr);
+ else
+ pr_err("ion_dummy: Could not allocate carveout\n");
+
+ /* Allocate a dummy chunk heap */
+ chunk_ptr = alloc_pages_exact(
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
+ GFP_KERNEL);
+ if (chunk_ptr)
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
+ else
+ pr_err("ion_dummy: Could not allocate chunk\n");
+
+ for (i = 0; i < dummy_ion_pdata.nr; i++) {
+ struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
+
+ if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
+ !heap_data->base)
+ continue;
+
+ if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
+ continue;
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ return 0;
+err:
+ for (i = 0; i < dummy_ion_pdata.nr; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+
+ if (carveout_ptr) {
+ free_pages_exact(carveout_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ carveout_ptr = NULL;
+ }
+ if (chunk_ptr) {
+ free_pages_exact(chunk_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ chunk_ptr = NULL;
+ }
+ return err;
+}
+
+static void __exit ion_dummy_exit(void)
+{
+ int i;
+
+ ion_device_destroy(idev);
+
+ for (i = 0; i < dummy_ion_pdata.nr; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+
+ if (carveout_ptr) {
+ free_pages_exact(carveout_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
+ carveout_ptr = NULL;
+ }
+ if (chunk_ptr) {
+ free_pages_exact(chunk_ptr,
+ dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
+ chunk_ptr = NULL;
+ }
+
+ return;
+}
+
+module_init(ion_dummy_init);
+module_exit(ion_dummy_exit);
+
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
new file mode 100644
index 000000000000..551fe2e0bc2d
--- /dev/null
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -0,0 +1,369 @@
+/*
+ * drivers/staging/android/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ struct scatterlist *sg;
+ int i, j;
+ void *vaddr;
+ pgprot_t pgprot;
+ struct sg_table *table = buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+
+ if (!pages)
+ return NULL;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++)
+ *(tmp++) = page++;
+ }
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
+ vfree(pages);
+
+ if (vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table = buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ int i;
+ int ret;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len = sg->length - offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+ void *addr = vm_map_ram(pages, num, -1, pgprot);
+ if (!addr)
+ return -ENOMEM;
+ memset(addr, 0, PAGE_SIZE * num);
+ vm_unmap_ram(addr, num);
+
+ return 0;
+}
+
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+ pgprot_t pgprot)
+{
+ int p = 0;
+ int ret = 0;
+ struct sg_page_iter piter;
+ struct page *pages[32];
+
+ for_each_sg_page(sgl, &piter, nents, 0) {
+ pages[p++] = sg_page_iter_page(&piter);
+ if (p == ARRAY_SIZE(pages)) {
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+ if (ret)
+ return ret;
+ p = 0;
+ }
+ }
+ if (p)
+ ret = ion_heap_clear_pages(pages, p, pgprot);
+
+ return ret;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ pgprot_t pgprot;
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ return ion_heap_sglist_zero(&sg, 1, pgprot);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+ spin_lock(&heap->free_lock);
+ list_add(&buffer->list, &heap->free_list);
+ heap->free_list_size += buffer->size;
+ spin_unlock(&heap->free_lock);
+ wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+ size_t size;
+
+ spin_lock(&heap->free_lock);
+ size = heap->free_list_size;
+ spin_unlock(&heap->free_lock);
+
+ return size;
+}
+
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+ bool skip_pools)
+{
+ struct ion_buffer *buffer;
+ size_t total_drained = 0;
+
+ if (ion_heap_freelist_size(heap) == 0)
+ return 0;
+
+ spin_lock(&heap->free_lock);
+ if (size == 0)
+ size = heap->free_list_size;
+
+ while (!list_empty(&heap->free_list)) {
+ if (total_drained >= size)
+ break;
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ if (skip_pools)
+ buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+ total_drained += buffer->size;
+ spin_unlock(&heap->free_lock);
+ ion_buffer_destroy(buffer);
+ spin_lock(&heap->free_lock);
+ }
+ spin_unlock(&heap->free_lock);
+
+ return total_drained;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+ return _ion_heap_freelist_drain(heap, size, true);
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+ struct ion_heap *heap = data;
+
+ while (true) {
+ struct ion_buffer *buffer;
+
+ wait_event_freezable(heap->waitqueue,
+ ion_heap_freelist_size(heap) > 0);
+
+ spin_lock(&heap->free_lock);
+ if (list_empty(&heap->free_list)) {
+ spin_unlock(&heap->free_lock);
+ continue;
+ }
+ buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+ list);
+ list_del(&buffer->list);
+ heap->free_list_size -= buffer->size;
+ spin_unlock(&heap->free_lock);
+ ion_buffer_destroy(buffer);
+ }
+
+ return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ INIT_LIST_HEAD(&heap->free_list);
+ heap->free_list_size = 0;
+ spin_lock_init(&heap->free_lock);
+ init_waitqueue_head(&heap->waitqueue);
+ heap->task = kthread_run(ion_heap_deferred_free, heap,
+ "%s", heap->name);
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
+ if (IS_ERR(heap->task)) {
+ pr_err("%s: creating thread for deferred free failed\n",
+ __func__);
+ return PTR_RET(heap->task);
+ }
+ return 0;
+}
+
+static int ion_heap_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+ shrinker);
+ int total = 0;
+ int freed = 0;
+ int to_scan = sc->nr_to_scan;
+
+ if (to_scan == 0)
+ goto out;
+
+ /*
+ * shrink the free list first, no point in zeroing the memory if we're
+ * just going to reclaim it. Also, skip any possible page pooling.
+ */
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
+ PAGE_SIZE;
+
+ to_scan -= freed;
+ if (to_scan < 0)
+ to_scan = 0;
+
+out:
+ total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+ if (heap->ops->shrink)
+ total += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+ return total;
+}
+
+void ion_heap_init_shrinker(struct ion_heap *heap)
+{
+ heap->shrinker.shrink = ion_heap_shrink;
+ heap->shrinker.seeks = DEFAULT_SEEKS;
+ heap->shrinker.batch = 0;
+ register_shrinker(&heap->shrinker);
+}
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct ion_heap *heap = NULL;
+
+ switch (heap_data->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ heap = ion_system_contig_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ heap = ion_system_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ heap = ion_carveout_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_CHUNK:
+ heap = ion_chunk_heap_create(heap_data);
+ break;
+ case ION_HEAP_TYPE_DMA:
+ heap = ion_cma_heap_create(heap_data);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap_data->type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR_OR_NULL(heap)) {
+ pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+ __func__, heap_data->name, heap_data->type,
+ heap_data->base, heap_data->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap->name = heap_data->name;
+ heap->id = heap_data->id;
+ return heap;
+}
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+ if (!heap)
+ return;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_SYSTEM_CONTIG:
+ ion_system_contig_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_SYSTEM:
+ ion_system_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CARVEOUT:
+ ion_carveout_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_CHUNK:
+ ion_chunk_heap_destroy(heap);
+ break;
+ case ION_HEAP_TYPE_DMA:
+ ion_cma_heap_destroy(heap);
+ break;
+ default:
+ pr_err("%s: Invalid heap type %d\n", __func__,
+ heap->type);
+ }
+}
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
new file mode 100644
index 000000000000..e55bd6303464
--- /dev/null
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -0,0 +1,190 @@
+/*
+ * drivers/staging/android/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "ion_priv.h"
+
+struct ion_page_pool_item {
+ struct page *page;
+ struct list_head list;
+};
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+ struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+ if (!page)
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
+ return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+ struct page *page)
+{
+ __free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+ struct ion_page_pool_item *item;
+
+ item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ mutex_lock(&pool->mutex);
+ item->page = page;
+ if (PageHighMem(page)) {
+ list_add_tail(&item->list, &pool->high_items);
+ pool->high_count++;
+ } else {
+ list_add_tail(&item->list, &pool->low_items);
+ pool->low_count++;
+ }
+ mutex_unlock(&pool->mutex);
+ return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+ struct ion_page_pool_item *item;
+ struct page *page;
+
+ if (high) {
+ BUG_ON(!pool->high_count);
+ item = list_first_entry(&pool->high_items,
+ struct ion_page_pool_item, list);
+ pool->high_count--;
+ } else {
+ BUG_ON(!pool->low_count);
+ item = list_first_entry(&pool->low_items,
+ struct ion_page_pool_item, list);
+ pool->low_count--;
+ }
+
+ list_del(&item->list);
+ page = item->page;
+ kfree(item);
+ return page;
+}
+
+void *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+ struct page *page = NULL;
+
+ BUG_ON(!pool);
+
+ mutex_lock(&pool->mutex);
+ if (pool->high_count)
+ page = ion_page_pool_remove(pool, true);
+ else if (pool->low_count)
+ page = ion_page_pool_remove(pool, false);
+ mutex_unlock(&pool->mutex);
+
+ if (!page)
+ page = ion_page_pool_alloc_pages(pool);
+
+ return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
+{
+ int ret;
+
+ ret = ion_page_pool_add(pool, page);
+ if (ret)
+ ion_page_pool_free_pages(pool, page);
+}
+
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+{
+ int total = 0;
+
+ total += high ? (pool->high_count + pool->low_count) *
+ (1 << pool->order) :
+ pool->low_count * (1 << pool->order);
+ return total;
+}
+
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ int i;
+ bool high;
+
+ high = !!(gfp_mask & __GFP_HIGHMEM);
+
+ for (i = 0; i < nr_to_scan; i++) {
+ struct page *page;
+
+ mutex_lock(&pool->mutex);
+ if (pool->low_count) {
+ page = ion_page_pool_remove(pool, false);
+ } else if (high && pool->high_count) {
+ page = ion_page_pool_remove(pool, true);
+ } else {
+ mutex_unlock(&pool->mutex);
+ break;
+ }
+ mutex_unlock(&pool->mutex);
+ ion_page_pool_free_pages(pool, page);
+ }
+
+ return ion_page_pool_total(pool, high);
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
+{
+ struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
+ GFP_KERNEL);
+ if (!pool)
+ return NULL;
+ pool->high_count = 0;
+ pool->low_count = 0;
+ INIT_LIST_HEAD(&pool->low_items);
+ INIT_LIST_HEAD(&pool->high_items);
+ pool->gfp_mask = gfp_mask;
+ pool->order = order;
+ mutex_init(&pool->mutex);
+ plist_node_init(&pool->list, order);
+
+ return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+ kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+ return 0;
+}
+
+static void __exit ion_page_pool_exit(void)
+{
+}
+
+module_init(ion_page_pool_init);
+module_exit(ion_page_pool_exit);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
new file mode 100644
index 000000000000..e6f84224ac37
--- /dev/null
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -0,0 +1,423 @@
+/*
+ * drivers/staging/android/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+#include "ion.h"
+
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+struct ion_mapping {
+ struct device *dev; /* to get a map and dma_ops */
+ struct sg_table sgt;
+ struct kref kref;
+};
+#define NUM_ION_MAPPING 5 /* FIXME: dynamically allocate more than this */
+
+struct ion_importer {
+ struct device *dev;
+ void *priv;
+ void (*delete)(void *);
+};
+#define NUM_ION_IMPORTER 5 /* FIXME: dynamically allocate more than this */
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref: refernce count
+ * @node: node in the ion_device buffers tree
+ * @dev: back pointer to the ion_device
+ * @heap: back pointer to the heap the buffer came from
+ * @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @priv_phys: private data to the buffer representable as
+ * an ion_phys_addr_t (and someday a phys_addr_t)
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kenrel mapping if kmap_cnt is not zero
+ * @dmap_cnt: number of times the buffer is mapped for dma
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @pages: flat array of pages in the buffer -- used by fault
+ * handler and only valid for buffers that are faulted in
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
+*/
+struct ion_buffer {
+ struct kref ref;
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
+ struct ion_device *dev;
+ struct ion_heap *heap;
+ unsigned long flags;
+ unsigned long private_flags;
+ size_t size;
+ union {
+ void *priv_virt;
+ ion_phys_addr_t priv_phys;
+ };
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ int dmap_cnt;
+ struct sg_table *sg_table;
+ struct page **pages;
+ struct list_head vmas;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
+
+ struct ion_importer importer[NUM_ION_IMPORTER];
+ struct ion_mapping mapping[NUM_ION_MAPPING];
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate: allocate memory
+ * @free: free memory
+ * @phys get physical address of a buffer (only define on
+ * physically contiguous heaps)
+ * @map_dma map the memory for dma to a scatterlist
+ * @unmap_dma unmap the memory for dma
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
+ */
+struct ion_heap_ops {
+ int (*allocate) (struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long align, unsigned long flags);
+ void (*free) (struct ion_buffer *buffer);
+ int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len);
+ struct sg_table *(*map_dma) (struct ion_heap *heap,
+ struct ion_buffer *buffer);
+ void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+ int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node: rb node to put the heap on the device's tree of heaps
+ * @dev: back pointer to the ion_device
+ * @type: type of heap
+ * @ops: ops struct as above
+ * @flags: flags
+ * @id: id of heap, also indicates priority of this heap when
+ * allocating. These are specified by platform data and
+ * MUST be unique
+ * @name: used for debugging
+ * @shrinker: a shrinker for the heap
+ * @free_list: free list head if deferred free is used
+ * @free_list_size size of the deferred free list in bytes
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+ struct plist_node node;
+ struct ion_device *dev;
+ enum ion_heap_type type;
+ struct ion_heap_ops *ops;
+ unsigned long flags;
+ unsigned int id;
+ const char *name;
+ struct shrinker shrinker;
+ struct list_head free_list;
+ size_t free_list_size;
+ spinlock_t free_lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer: buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl: arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+ (struct ion_client *client,
+ unsigned int cmd,
+ unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev: the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev: the device
+ * @heap: the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+ struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap: the heap
+ * @buffer: the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap: the heap
+ * @size: ammount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+ size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap: the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+ unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+ unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant peformance benefit on
+ * many systems */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant peformance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *);
+void *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool: the pool
+ * @gfp_mask: the memory type to reclaim
+ * @nr_to_scan: number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ * device
+ * @dev: the device the pages will be used with
+ * @page: the first page to be flushed
+ * @size: size in bytes of region to be flushed
+ * @dir: direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
new file mode 100644
index 000000000000..c92363356ae1
--- /dev/null
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -0,0 +1,446 @@
+/*
+ * drivers/staging/android/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+ __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+ int i;
+ for (i = 0; i < num_orders; i++)
+ if (order == orders[i])
+ return i;
+ BUG();
+ return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+ return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+ struct ion_heap heap;
+ struct ion_page_pool **pools;
+};
+
+struct page_info {
+ struct page *page;
+ unsigned int order;
+ struct list_head list;
+};
+
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long order)
+{
+ bool cached = ion_buffer_cached(buffer);
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ struct page *page;
+
+ if (!cached) {
+ page = ion_page_pool_alloc(pool);
+ } else {
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (order > 4)
+ gfp_flags = high_order_gfp_flags;
+ page = alloc_pages(gfp_flags, order);
+ if (!page)
+ return NULL;
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ }
+ if (!page)
+ return NULL;
+
+ return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+ struct ion_buffer *buffer, struct page *page,
+ unsigned int order)
+{
+ bool cached = ion_buffer_cached(buffer);
+
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
+ struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+ ion_page_pool_free(pool, page);
+ } else {
+ __free_pages(page, order);
+ }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ struct page_info *info;
+ int i;
+
+ info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ for (i = 0; i < num_orders; i++) {
+ if (size < order_to_size(orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_buffer_page(heap, buffer, orders[i]);
+ if (!page)
+ continue;
+
+ info->page = page;
+ info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
+ return info;
+ }
+ kfree(info);
+
+ return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret;
+ struct list_head pages;
+ struct page_info *info, *tmp_info;
+ int i = 0;
+ unsigned long size_remaining = PAGE_ALIGN(size);
+ unsigned int max_order = orders[0];
+
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
+ if (size / PAGE_SIZE > totalram_pages / 2)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pages);
+ while (size_remaining > 0) {
+ info = alloc_largest_available(sys_heap, buffer, size_remaining,
+ max_order);
+ if (!info)
+ goto err;
+ list_add_tail(&info->list, &pages);
+ size_remaining -= (1 << info->order) * PAGE_SIZE;
+ max_order = info->order;
+ i++;
+ }
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
+ goto err;
+
+ ret = sg_alloc_table(table, i, GFP_KERNEL);
+ if (ret)
+ goto err1;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ struct page *page = info->page;
+ sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ list_del(&info->list);
+ kfree(info);
+ }
+
+ buffer->priv_virt = table;
+ return 0;
+err1:
+ kfree(table);
+err:
+ list_for_each_entry_safe(info, tmp_info, &pages, list) {
+ free_buffer_page(sys_heap, buffer, info->page, info->order);
+ kfree(info);
+ }
+ return -ENOMEM;
+}
+
+static void ion_system_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *heap = buffer->heap;
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ struct sg_table *table = buffer->sg_table;
+ bool cached = ion_buffer_cached(buffer);
+ struct scatterlist *sg;
+ LIST_HEAD(pages);
+ int i;
+
+ /* uncached pages come from the page pools, zero them before returning
+ for security purposes (other allocations are zerod at alloc time */
+ if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+ ion_heap_buffer_zero(buffer);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg->length));
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return;
+}
+
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+ int nr_to_scan)
+{
+ struct ion_system_heap *sys_heap;
+ int nr_total = 0;
+ int i;
+
+ sys_heap = container_of(heap, struct ion_system_heap, heap);
+
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+ }
+
+ return nr_total;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+ .allocate = ion_system_heap_allocate,
+ .free = ion_system_heap_free,
+ .map_dma = ion_system_heap_map_dma,
+ .unmap_dma = ion_system_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .shrink = ion_system_heap_shrink,
+};
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+ void *unused)
+{
+
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool = sys_heap->pools[i];
+ seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+ pool->high_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->high_count);
+ seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+ pool->low_count, pool->order,
+ (1 << pool->order) * PAGE_SIZE * pool->low_count);
+ }
+ return 0;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_system_heap *heap;
+ int i;
+
+ heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->heap.ops = &system_heap_ops;
+ heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+ heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+ GFP_KERNEL);
+ if (!heap->pools)
+ goto err_alloc_pools;
+ for (i = 0; i < num_orders; i++) {
+ struct ion_page_pool *pool;
+ gfp_t gfp_flags = low_order_gfp_flags;
+
+ if (orders[i] > 4)
+ gfp_flags = high_order_gfp_flags;
+ pool = ion_page_pool_create(gfp_flags, orders[i]);
+ if (!pool)
+ goto err_create_pool;
+ heap->pools[i] = pool;
+ }
+
+ heap->heap.debug_show = ion_system_heap_debug_show;
+ return &heap->heap;
+err_create_pool:
+ for (i = 0; i < num_orders; i++)
+ if (heap->pools[i])
+ ion_page_pool_destroy(heap->pools[i]);
+ kfree(heap->pools);
+err_alloc_pools:
+ kfree(heap);
+ return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_system_heap *sys_heap = container_of(heap,
+ struct ion_system_heap,
+ heap);
+ int i;
+
+ for (i = 0; i < num_orders; i++)
+ ion_page_pool_destroy(sys_heap->pools[i]);
+ kfree(sys_heap->pools);
+ kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long len,
+ unsigned long align,
+ unsigned long flags)
+{
+ int order = get_order(len);
+ struct page *page;
+ struct sg_table *table;
+ unsigned long i;
+ int ret;
+
+ if (align > (PAGE_SIZE << order))
+ return -EINVAL;
+
+ page = alloc_pages(low_order_gfp_flags, order);
+ if (!page)
+ return -ENOMEM;
+
+ split_page(page, order);
+
+ len = PAGE_ALIGN(len);
+ for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+ __free_page(page + i);
+
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ sg_set_page(table->sgl, page, len, 0);
+
+ buffer->priv_virt = table;
+
+ ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
+ return 0;
+
+out:
+ for (i = 0; i < len >> PAGE_SHIFT; i++)
+ __free_page(page + i);
+ kfree(table);
+ return ret;
+}
+
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+ unsigned long i;
+
+ for (i = 0; i < pages; i++)
+ __free_page(page + i);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ *addr = page_to_phys(page);
+ *len = buffer->size;
+ return 0;
+}
+
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ return buffer->priv_virt;
+}
+
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+ .allocate = ion_system_contig_heap_allocate,
+ .free = ion_system_contig_heap_free,
+ .phys = ion_system_contig_heap_phys,
+ .map_dma = ion_system_contig_heap_map_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+ struct ion_heap *heap;
+
+ heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &kmalloc_ops;
+ heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
new file mode 100644
index 000000000000..654acb5c8eba
--- /dev/null
+++ b/drivers/staging/android/ion/ion_test.c
@@ -0,0 +1,282 @@
+/*
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "ion-test: " fmt
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ion.h"
+#include "../uapi/ion_test.h"
+
+#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
+
+struct ion_test_device {
+ struct miscdevice misc;
+};
+
+struct ion_test_data {
+ struct dma_buf *dma_buf;
+ struct device *dev;
+};
+
+static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
+ void __user *ptr, size_t offset, size_t size, bool write)
+{
+ int ret = 0;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct sg_page_iter sg_iter;
+ unsigned long offset_page;
+
+ attach = dma_buf_attach(dma_buf, dev);
+ if (IS_ERR(attach))
+ return PTR_ERR(attach);
+
+ table = dma_buf_map_attachment(attach, dir);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ offset_page = offset >> PAGE_SHIFT;
+ offset %= PAGE_SIZE;
+
+ for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
+ struct page *page = sg_page_iter_page(&sg_iter);
+ void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
+ size_t to_copy = PAGE_SIZE - offset;
+
+ to_copy = min(to_copy, size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (write)
+ ret = copy_from_user(vaddr + offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + offset, to_copy);
+
+ vunmap(vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ size -= to_copy;
+ if (!size)
+ break;
+ ptr += to_copy;
+ offset = 0;
+ }
+
+err:
+ dma_buf_unmap_attachment(attach, table, dir);
+ dma_buf_detach(dma_buf, attach);
+ return ret;
+}
+
+static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
+ size_t offset, size_t size, bool write)
+{
+ int ret;
+ unsigned long page_offset = offset >> PAGE_SHIFT;
+ size_t copy_offset = offset % PAGE_SIZE;
+ size_t copy_size = size;
+ enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (offset > dma_buf->size || size > dma_buf->size - offset)
+ return -EINVAL;
+
+ ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+ if (ret)
+ return ret;
+
+ while (copy_size > 0) {
+ size_t to_copy;
+ void *vaddr = dma_buf_kmap(dma_buf, page_offset);
+
+ if (!vaddr)
+ goto err;
+
+ to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
+
+ if (write)
+ ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
+ else
+ ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
+
+ dma_buf_kunmap(dma_buf, page_offset, vaddr);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ copy_size -= to_copy;
+ ptr += to_copy;
+ page_offset++;
+ copy_offset = 0;
+ }
+err:
+ dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+ return ret;
+}
+
+static long ion_test_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct ion_test_data *test_data = filp->private_data;
+ int ret = 0;
+
+ union {
+ struct ion_test_rw_data test_rw;
+ } data;
+
+ if (_IOC_SIZE(cmd) > sizeof(data))
+ return -EINVAL;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ION_IOC_TEST_SET_FD:
+ {
+ struct dma_buf *dma_buf = NULL;
+ int fd = arg;
+
+ if (fd >= 0) {
+ dma_buf = dma_buf_get((int)arg);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+ }
+ if (test_data->dma_buf)
+ dma_buf_put(test_data->dma_buf);
+ test_data->dma_buf = dma_buf;
+ break;
+ }
+ case ION_IOC_TEST_DMA_MAPPING:
+ {
+ ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ case ION_IOC_TEST_KERNEL_MAPPING:
+ {
+ ret = ion_handle_test_kernel(test_data->dma_buf,
+ u64_to_uptr(data.test_rw.ptr),
+ data.test_rw.offset, data.test_rw.size,
+ data.test_rw.write);
+ break;
+ }
+ default:
+ return -ENOTTY;
+ }
+
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+static int ion_test_open(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data;
+ struct miscdevice *miscdev = file->private_data;
+
+ data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = miscdev->parent;
+
+ file->private_data = data;
+
+ return 0;
+}
+
+static int ion_test_release(struct inode *inode, struct file *file)
+{
+ struct ion_test_data *data = file->private_data;
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct file_operations ion_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ion_test_ioctl,
+ .compat_ioctl = ion_test_ioctl,
+ .open = ion_test_open,
+ .release = ion_test_release,
+};
+
+static int __init ion_test_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ion_test_device *testdev;
+
+ testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
+ GFP_KERNEL);
+ if (!testdev)
+ return -ENOMEM;
+
+ testdev->misc.minor = MISC_DYNAMIC_MINOR;
+ testdev->misc.name = "ion-test";
+ testdev->misc.fops = &ion_test_fops;
+ testdev->misc.parent = &pdev->dev;
+ ret = misc_register(&testdev->misc);
+ if (ret) {
+ pr_err("failed to register misc device.\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, testdev);
+
+ return 0;
+}
+
+static struct platform_driver ion_test_platform_driver = {
+ .driver = {
+ .name = "ion-test",
+ },
+};
+
+static int __init ion_test_init(void)
+{
+ platform_device_register_simple("ion-test", -1, NULL, 0);
+ return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
+}
+
+static void __exit ion_test_exit(void)
+{
+ platform_driver_unregister(&ion_test_platform_driver);
+}
+
+module_init(ion_test_init);
+module_exit(ion_test_exit);
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
new file mode 100644
index 000000000000..11cd003fb08f
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/Makefile
@@ -0,0 +1 @@
+obj-y += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
new file mode 100644
index 000000000000..3474c65f87fa
--- /dev/null
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -0,0 +1,84 @@
+/*
+ * drivers/gpu/tegra/tegra_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "../ion.h"
+#include "../ion_priv.h"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+static int tegra_ion_probe(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+ int i;
+
+ num_heaps = pdata->nr;
+
+ heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+ idev = ion_device_create(NULL);
+ if (IS_ERR_OR_NULL(idev)) {
+ kfree(heaps);
+ return PTR_ERR(idev);
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ heaps[i] = ion_heap_create(heap_data);
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(idev, heaps[i]);
+ }
+ platform_set_drvdata(pdev, idev);
+ return 0;
+err:
+ for (i = 0; i < num_heaps; i++) {
+ if (heaps[i])
+ ion_heap_destroy(heaps[i]);
+ }
+ kfree(heaps);
+ return err;
+}
+
+static int tegra_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ ion_device_destroy(idev);
+ for (i = 0; i < num_heaps; i++)
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+ return 0;
+}
+
+static struct platform_driver ion_driver = {
+ .probe = tegra_ion_probe,
+ .remove = tegra_ion_remove,
+ .driver = { .name = "ion-tegra" }
+};
+
+module_platform_driver(ion_driver);
+
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index fe74494868ef..eeac2918448d 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -39,8 +39,10 @@
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/rcupdate.h>
-#include <linux/profile.h>
#include <linux/notifier.h>
+#ifdef CONFIG_TEGRA_NVMAP
+#include <linux/nvmap.h>
+#endif
static uint32_t lowmem_debug_level = 1;
static short lowmem_adj[6] = {
@@ -74,20 +76,29 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
int tasksize;
int i;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+ int minfree = 0;
int selected_tasksize = 0;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
- int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
+ struct sysinfo swap_info;
+ int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages
+#ifdef CONFIG_TEGRA_NVMAP
+ + nvmap_page_pool_get_unused_pages()
+#endif
+ ;
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
+ si_swapinfo(&swap_info);
+ other_free += swap_info.freeswap;
+
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
- if (other_free < lowmem_minfree[i] &&
- other_file < lowmem_minfree[i]) {
+ minfree = lowmem_minfree[i];
+ if (other_free < minfree && other_file < minfree) {
min_score_adj = lowmem_adj[i];
break;
}
@@ -144,13 +155,22 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
- lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
- p->pid, p->comm, oom_score_adj, tasksize);
+ lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+ p->comm, p->pid, oom_score_adj, tasksize);
}
if (selected) {
- lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
- selected->pid, selected->comm,
- selected_oom_score_adj, selected_tasksize);
+ lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
+ " to free %ldkB on behalf of '%s' (%d) because\n" \
+ " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
+ " Free memory is %ldkB above reserved\n",
+ selected->comm, selected->pid,
+ selected_oom_score_adj,
+ selected_tasksize * (long)(PAGE_SIZE / 1024),
+ current->comm, current->pid,
+ other_file * (long)(PAGE_SIZE / 1024),
+ minfree * (long)(PAGE_SIZE / 1024),
+ min_score_adj,
+ other_free * (long)(PAGE_SIZE / 1024));
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
@@ -178,9 +198,94 @@ static void __exit lowmem_exit(void)
unregister_shrinker(&lowmem_shrinker);
}
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+static short lowmem_oom_adj_to_oom_score_adj(short oom_adj)
+{
+ if (oom_adj == OOM_ADJUST_MAX)
+ return OOM_SCORE_ADJ_MAX;
+ else
+ return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+}
+
+static void lowmem_autodetect_oom_adj_values(void)
+{
+ int i;
+ short oom_adj;
+ short oom_score_adj;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+
+ if (array_size <= 0)
+ return;
+
+ oom_adj = lowmem_adj[array_size - 1];
+ if (oom_adj > OOM_ADJUST_MAX)
+ return;
+
+ oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+ if (oom_score_adj <= OOM_ADJUST_MAX)
+ return;
+
+ lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
+ for (i = 0; i < array_size; i++) {
+ oom_adj = lowmem_adj[i];
+ oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+ lowmem_adj[i] = oom_score_adj;
+ lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
+ oom_adj, oom_score_adj);
+ }
+}
+
+static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_array_ops.set(val, kp);
+
+ /* HACK: Autodetect oom_adj values in lowmem_adj array */
+ lowmem_autodetect_oom_adj_values();
+
+ return ret;
+}
+
+static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
+{
+ return param_array_ops.get(buffer, kp);
+}
+
+static void lowmem_adj_array_free(void *arg)
+{
+ param_array_ops.free(arg);
+}
+
+static struct kernel_param_ops lowmem_adj_array_ops = {
+ .set = lowmem_adj_array_set,
+ .get = lowmem_adj_array_get,
+ .free = lowmem_adj_array_free,
+};
+
+static const struct kparam_array __param_arr_adj = {
+ .max = ARRAY_SIZE(lowmem_adj),
+ .num = &lowmem_adj_size,
+ .ops = &param_ops_short,
+ .elemsize = sizeof(lowmem_adj[0]),
+ .elem = lowmem_adj,
+};
+#endif
+
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+__module_param_call(MODULE_PARAM_PREFIX, adj,
+ &lowmem_adj_array_ops,
+ .arr = &__param_arr_adj,
+ S_IRUGO | S_IWUSR, -1);
+__MODULE_PARM_TYPE(adj, "array of short");
+#else
module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size,
S_IRUGO | S_IWUSR);
+#endif
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
S_IRUGO | S_IWUSR);
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f18..1a50669ec8a9 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -18,10 +18,9 @@
#define _LINUX_SW_SYNC_H
#include <linux/types.h>
-
-#ifdef __KERNEL__
-
+#include <linux/kconfig.h>
#include "sync.h"
+#include "uapi/sw_sync.h"
struct sw_sync_timeline {
struct sync_timeline obj;
@@ -35,24 +34,26 @@ struct sw_sync_pt {
u32 value;
};
+#if IS_ENABLED(CONFIG_SW_SYNC)
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
-
-#endif /* __KERNEL __ */
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 3893a3574769..f0df89ec9b35 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -34,6 +34,7 @@
static void sync_fence_signal_pt(struct sync_pt *pt);
static int _sync_pt_has_signaled(struct sync_pt *pt);
static void sync_fence_free(struct kref *kref);
+static void sync_fence_dump(struct sync_fence *fence);
static void sync_dump(void);
static LIST_HEAD(sync_timeline_list_head);
@@ -79,27 +80,27 @@ static void sync_timeline_free(struct kref *kref)
container_of(kref, struct sync_timeline, kref);
unsigned long flags;
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
obj->destroyed = true;
+ smp_wmb();
/*
- * If this is not the last reference, signal any children
- * that their parent is going away.
+ * signal any children that their parent is going away.
*/
+ sync_timeline_signal(obj);
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
+ kref_put(&obj->kref, sync_timeline_free);
}
EXPORT_SYMBOL(sync_timeline_destroy);
@@ -613,6 +614,7 @@ int sync_fence_wait(struct sync_fence *fence, long timeout)
if (fence->status < 0) {
pr_info("fence error %d on [%p]\n", fence->status, fence);
+ sync_fence_dump(fence);
sync_dump();
return fence->status;
}
@@ -621,6 +623,7 @@ int sync_fence_wait(struct sync_fence *fence, long timeout)
if (timeout > 0) {
pr_info("fence timeout on [%p] after %dms\n", fence,
jiffies_to_msecs(timeout));
+ sync_fence_dump(fence);
sync_dump();
}
return -ETIME;
@@ -842,6 +845,21 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd,
}
}
+static void sync_fence_dump(struct sync_fence *fence)
+{
+ struct sync_pt *pt;
+ char val[32];
+
+ list_for_each_entry(pt, &fence->pt_list_head, pt_list) {
+ val[0] = '\0';
+ if (pt->parent->ops->pt_value_str)
+ pt->parent->ops->pt_value_str(pt, val, sizeof(val));
+
+ pr_info("name=%s, value=%s\n", pt->parent->name, val);
+ }
+
+}
+
#ifdef CONFIG_DEBUG_FS
static const char *sync_status_str(int status)
{
@@ -1002,11 +1020,11 @@ void sync_dump(void)
if ((s.count - i) > DUMP_CHUNK) {
char c = s.buf[i + DUMP_CHUNK];
s.buf[i + DUMP_CHUNK] = 0;
- pr_cont("%s", s.buf + i);
+ pr_debug("%s", s.buf + i);
s.buf[i + DUMP_CHUNK] = c;
} else {
s.buf[s.count] = 0;
- pr_cont("%s", s.buf + i);
+ pr_debug("%s", s.buf + i);
}
}
}
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index 38ea986dc70f..75da9e85ac69 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -14,14 +14,14 @@
#define _LINUX_SYNC_H
#include <linux/types.h>
-#ifdef __KERNEL__
-
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include "uapi/sync.h"
+
struct sync_timeline;
struct sync_pt;
struct sync_fence;
@@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence,
*/
int sync_fence_wait(struct sync_fence *fence, long timeout);
-#endif /* __KERNEL__ */
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_pt_info - detailed sync_pt information
- * @len: length of sync_pt_info including any driver_data
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implmenting the parent
- * @status: status of the sync_pt 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependant data
- */
-struct sync_pt_info {
- __u32 len;
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-
- __u8 driver_data[0];
-};
-
-/**
- * struct sync_fence_info_data - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data reutnred to userspace
- * including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @pt_info: a sync_pt_info struct for every sync_pt in the fence
- */
-struct sync_fence_info_data {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 pt_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds. Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
- struct sync_fence_info_data)
-
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
index ec9e2ae2de0d..b8dde8917f39 100644
--- a/drivers/staging/android/timed_output.c
+++ b/drivers/staging/android/timed_output.c
@@ -103,8 +103,8 @@ void timed_output_dev_unregister(struct timed_output_dev *tdev)
{
tdev->enable(tdev, 0);
device_remove_file(tdev->dev, &dev_attr_enable);
- device_destroy(timed_output_class, MKDEV(0, tdev->index));
dev_set_drvdata(tdev->dev, NULL);
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
}
EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h
new file mode 100644
index 000000000000..af3d203e4d9d
--- /dev/null
+++ b/drivers/staging/android/uapi/android_alarm.h
@@ -0,0 +1,65 @@
+/* drivers/staging/android/uapi/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ANDROID_ALARM_H
+#define _UAPI_LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+ ANDROID_ALARM_WAKENUP_BY_RTCCHIP = 11,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_WAKENUP_BY_RTCCHIP_MASK =
+ 1U << ANDROID_ALARM_WAKENUP_BY_RTCCHIP,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644
index 000000000000..ba4743c71d6b
--- /dev/null
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -0,0 +1,47 @@
+/*
+ * drivers/staging/android/uapi/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
new file mode 100644
index 000000000000..4098c502fc36
--- /dev/null
+++ b/drivers/staging/android/uapi/binder.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+#ifdef BINDER_IPC_32BIT
+typedef __u32 binder_size_t;
+typedef __u32 binder_uintptr_t;
+#else
+typedef __u64 binder_size_t;
+typedef __u64 binder_uintptr_t;
+#endif
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ __u32 type;
+ __u32 flags;
+
+ /* 8 bytes of data. */
+ union {
+ binder_uintptr_t binder; /* local object */
+ __u32 handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ binder_uintptr_t cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+ binder_size_t write_size; /* bytes to write */
+ binder_size_t write_consumed; /* bytes consumed by driver */
+ binder_uintptr_t write_buffer;
+ binder_size_t read_size; /* bytes to read */
+ binder_size_t read_consumed; /* bytes consumed by driver */
+ binder_uintptr_t read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ __s32 protocol_version;
+};
+
+/* This is the current protocol version. */
+#ifdef BINDER_IPC_32BIT
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+#else
+#define BINDER_CURRENT_PROTOCOL_VERSION 8
+#endif
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
+#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ __u32 handle; /* target descriptor of command transaction */
+ binder_uintptr_t ptr; /* target descriptor of return transaction */
+ } target;
+ binder_uintptr_t cookie; /* target object cookie */
+ __u32 code; /* transaction command */
+
+ /* General information about the transaction. */
+ __u32 flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ binder_size_t data_size; /* number of bytes of data */
+ binder_size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ binder_uintptr_t buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ binder_uintptr_t offsets;
+ } ptr;
+ __u8 buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+};
+
+struct binder_handle_cookie {
+ __u32 handle;
+ binder_uintptr_t cookie;
+} __attribute__((packed));
+
+struct binder_pri_desc {
+ __s32 priority;
+ __u32 desc;
+};
+
+struct binder_pri_ptr_cookie {
+ __s32 priority;
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+};
+
+enum binder_driver_return_protocol {
+ BR_ERROR = _IOR('r', 0, __s32),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incoming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum binder_driver_command_protocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, __u32),
+ BC_ACQUIRE = _IOW('c', 5, __u32),
+ BC_RELEASE = _IOW('c', 6, __u32),
+ BC_DECREFS = _IOW('c', 7, __u32),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_handle_cookie),
+ /*
+ * int: handle
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
new file mode 100644
index 000000000000..f09e7c154d69
--- /dev/null
+++ b/drivers/staging/android/uapi/ion.h
@@ -0,0 +1,196 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
+ * carveout heap, allocations are physically
+ * contiguous
+ * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
+ * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
+ * is used to identify the heaps, so only 32
+ * total heap types are supported
+ */
+enum ion_heap_type {
+ ION_HEAP_TYPE_SYSTEM,
+ ION_HEAP_TYPE_SYSTEM_CONTIG,
+ ION_HEAP_TYPE_CARVEOUT,
+ ION_HEAP_TYPE_CHUNK,
+ ION_HEAP_TYPE_DMA,
+ ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
+ are at the end of this enum */
+ ION_NUM_HEAPS = 16,
+};
+
+#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
+#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
+#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
+
+#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
+ at mmap time, if this is set
+ caches must be managed manually */
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len: size of the allocation
+ * @align: required alignment of the allocation
+ * @heap_id_mask: mask of heap ids to allocate from
+ * @flags: flags passed to heap
+ * @handle: pointer that will be populated with a cookie to use to
+ * refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+ size_t len;
+ size_t align;
+ unsigned int heap_id_mask;
+ unsigned int flags;
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle: a handle
+ * @fd: a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+ ion_user_handle_t handle;
+ int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle: a handle
+ */
+struct ion_handle_data {
+ ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd: the custom ioctl function to call
+ * @arg: additional data to pass to the custom ioctl, typically a user
+ * pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+ unsigned int cmd;
+ unsigned long arg;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
+ struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle. Returns the struct with the fd field set to a file
+ * descriptor open in the current address space. This file descriptor
+ * can then be passed to another process. The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
new file mode 100644
index 000000000000..ffef06f63133
--- /dev/null
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -0,0 +1,70 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_TEST_H
+#define _UAPI_LINUX_ION_TEST_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct ion_test_rw_data - metadata passed to the kernel to read handle
+ * @ptr: a pointer to an area at least as large as size
+ * @offset: offset into the ion buffer to start reading
+ * @size: size to read or write
+ * @write: 1 to write, 0 to read
+ */
+struct ion_test_rw_data {
+ __u64 ptr;
+ __u64 offset;
+ __u64 size;
+ int write;
+ int __padding;
+};
+
+#define ION_IOC_MAGIC 'I'
+
+/**
+ * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
+ *
+ * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
+ * release the first fd.
+ */
+#define ION_IOC_TEST_SET_FD \
+ _IO(ION_IOC_MAGIC, 0xf0)
+
+/**
+ * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
+ *
+ * Reads or writes the memory from a handle using an uncached mapping. Can be
+ * used by unit tests to emulate a DMA engine as close as possible. Only
+ * expected to be used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_DMA_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
+
+/**
+ * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
+ *
+ * Reads or writes the memory from a handle using a kernel mapping. Can be
+ * used by unit tests to test heap map_kernel functions. Only expected to be
+ * used for debugging and testing, may not always be available.
+ */
+#define ION_IOC_TEST_KERNEL_MAPPING \
+ _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
+
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
new file mode 100644
index 000000000000..9b5d4869505c
--- /dev/null
+++ b/drivers/staging/android/uapi/sw_sync.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SW_SYNC_H
+#define _UAPI_LINUX_SW_SYNC_H
+
+#include <linux/types.h>
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
new file mode 100644
index 000000000000..57fdaadc4b04
--- /dev/null
+++ b/drivers/staging/android/uapi/sync.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SYNC_H
+#define _UAPI_LINUX_SYNC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct sync_merge_data - data passed to merge ioctl
+ * @fd2: file descriptor of second fence
+ * @name: name of new fence
+ * @fence: returns the fd of the new fence to userspace
+ */
+struct sync_merge_data {
+ __s32 fd2; /* fd of second fence */
+ char name[32]; /* name of new fence */
+ __s32 fence; /* fd on newly created fence */
+};
+
+/**
+ * struct sync_pt_info - detailed sync_pt information
+ * @len: length of sync_pt_info including any driver_data
+ * @obj_name: name of parent sync_timeline
+ * @driver_name: name of driver implmenting the parent
+ * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @timestamp_ns: timestamp of status change in nanoseconds
+ * @driver_data: any driver dependant data
+ */
+struct sync_pt_info {
+ __u32 len;
+ char obj_name[32];
+ char driver_name[32];
+ __s32 status;
+ __u64 timestamp_ns;
+
+ __u8 driver_data[0];
+};
+
+/**
+ * struct sync_fence_info_data - data returned from fence info ioctl
+ * @len: ioctl caller writes the size of the buffer its passing in.
+ * ioctl returns length of sync_fence_data reutnred to userspace
+ * including pt_info.
+ * @name: name of fence
+ * @status: status of fence. 1: signaled 0:active <0:error
+ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ */
+struct sync_fence_info_data {
+ __u32 len;
+ char name[32];
+ __s32 status;
+
+ __u8 pt_info[0];
+};
+
+#define SYNC_IOC_MAGIC '>'
+
+/**
+ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
+ *
+ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
+ */
+#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
+
+/**
+ * DOC: SYNC_IOC_MERGE - merge two fences
+ *
+ * Takes a struct sync_merge_data. Creates a new fence containing copies of
+ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
+ * new fence's fd in sync_merge_data.fence
+ */
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+
+/**
+ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
+ *
+ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Caller should write the size of the buffer into len. On return, len is
+ * updated to reflect the total size of the sync_fence_info_data including
+ * pt_info.
+ *
+ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
+ * To itterate over the sync_pt_infos, use the sync_pt_info.len field.
+ */
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
+ struct sync_fence_info_data)
+
+#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index db4d6dc03243..49985ee1cb2c 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -16,6 +16,7 @@ source "drivers/staging/iio/magnetometer/Kconfig"
source "drivers/staging/iio/meter/Kconfig"
source "drivers/staging/iio/resolver/Kconfig"
source "drivers/staging/iio/trigger/Kconfig"
+source "drivers/staging/iio/pressure/Kconfig"
config IIO_DUMMY_EVGEN
tristate
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index d87106135b27..4b5d9c76e4d0 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -21,3 +21,4 @@ obj-y += magnetometer/
obj-y += meter/
obj-y += resolver/
obj-y += trigger/
+obj-y += pressure/
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index cabc7a367db5..609a35b253da 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -100,6 +100,24 @@ config AD7280
To compile this driver as a module, choose M here: the
module will be called ad7280a
+config MAX77660_ADC
+ bool "Maxim MAX77660 ADC sub module"
+ depends on MFD_MAX77660
+ help
+ MAX77660 is a pmic for smartphones and tablets which supports
+ 13 channel ADC. Add iio driver to read teh different channel of
+ ADCs.
+
+config AS3722_ADC_EXTCON
+ tristate "AS3722 General Purpose ADC with EXTCON"
+ depends on MFD_AS3722
+ depends on EXTCON
+ help
+ AS3722 AMS chip is used in smartphones and tablets which supports a
+ two channel ADC. Support the continuous mode of scanning and
+ notification through extcon.
+
+
config LPC32XX_ADC
tristate "NXP LPC32XX ADC"
depends on ARCH_LPC32XX
@@ -130,4 +148,12 @@ config SPEAR_ADC
Say yes here to build support for the integrated ADC inside the
ST SPEAr SoC. Provides direct access via sysfs.
+config PALMAS_GPADC
+ tristate "TI Palmas General Purpose ADC"
+ depends on MFD_PALMAS
+ help
+ Palmas series pmic chip is used in smartphones and tablets
+ which supports a 16 channel GPADC. Add iio driver to read
+ different channel of GPADCs.
+
endmenu
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 3e9fb143d25b..ea01e1e70eb4 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7816) += ad7816.o
obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7280) += ad7280a.o
+obj-$(CONFIG_AS3722_ADC_EXTCON) += as3722-adc-extcon.o
+obj-$(CONFIG_MAX77660_ADC) += max77660-adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o
+obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
diff --git a/drivers/staging/iio/adc/as3722-adc-extcon.c b/drivers/staging/iio/adc/as3722-adc-extcon.c
new file mode 100644
index 000000000000..537a03e9b658
--- /dev/null
+++ b/drivers/staging/iio/adc/as3722-adc-extcon.c
@@ -0,0 +1,317 @@
+/*
+ * as3722-adc-extcon.c -- AMS AS3722 ADC EXTCON.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Mallikarjun Kasoju<mkasoju@nvidia.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/extcon.h>
+#include <linux/mfd/as3722.h>
+
+struct as3722_adc {
+ struct device *dev;
+ struct as3722 *as3722;
+ struct extcon_dev edev;
+ int irq;
+ int hi_threshold;
+ int low_threshold;
+};
+
+const char *as3722_adc_excon_cable[] = {
+ [0] = "USB-Host",
+ NULL,
+};
+
+static int as3722_read_adc1_cable_update(struct as3722_adc *adc)
+{
+ struct as3722 *as3722 = adc->as3722;
+ int result;
+ int ret;
+ u32 val;
+
+ ret = as3722_read(as3722, AS3722_ADC1_MSB_RESULT_REG , &val);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_MSB_RESULT read failed %d\n", ret);
+ return ret;
+ }
+ result = ((val & AS3722_ADC_MASK_MSB_VAL) << 3);
+
+ ret = as3722_read(as3722, AS3722_ADC1_LSB_RESULT_REG, &val);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_LSB_RESULT read failed %d\n", ret);
+ return ret;
+ }
+ result |= val & AS3722_ADC_MASK_LSB_VAL;
+
+ if (result >= adc->hi_threshold) {
+ extcon_set_cable_state(&adc->edev, "USB-Host", false);
+ dev_info(adc->dev, "USB-Host is disconnected\n");
+ } else if (result <= adc->low_threshold) {
+ extcon_set_cable_state(&adc->edev, "USB-Host", true);
+ dev_info(adc->dev, "USB-Host is connected\n");
+ }
+ return ret;
+}
+
+static irqreturn_t as3722_adc_extcon_irq(int irq, void *data)
+{
+ struct as3722_adc *adc = data;
+
+ as3722_read_adc1_cable_update(adc);
+ return IRQ_HANDLED;
+}
+
+static int as3722_adc_extcon_probe(struct platform_device *pdev)
+{
+ struct as3722 *as3722 = dev_get_drvdata(pdev->dev.parent);
+ struct as3722_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
+ struct device_node *node = pdev->dev.parent->of_node;
+ struct as3722_adc_extcon_platform_data *extcon_pdata;
+ struct as3722_adc *adc;
+ int ret = 0;
+ unsigned int try_counter = 0;
+ u32 val;
+
+ if (node && !pdata) {
+ extcon_pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct as3722_adc_extcon_platform_data),
+ GFP_KERNEL);
+ of_property_read_string(node, "ams,extcon-name",
+ &extcon_pdata->connection_name);
+ extcon_pdata->enable_adc1_continuous_mode =
+ of_property_read_bool(node,
+ "ams,enable-adc1-continuous-mode");
+ extcon_pdata->enable_low_voltage_range =
+ of_property_read_bool(node,
+ "ams,enable-low-voltage-range");
+ of_property_read_u32(node, "ams,adc-channel",
+ &extcon_pdata->adc_channel);
+ of_property_read_u32(node, "ams,hi-threshold",
+ &extcon_pdata->hi_threshold);
+ of_property_read_u32(node, "ams,low-threshold",
+ &extcon_pdata->low_threshold);
+ } else if (pdata && pdata->extcon_pdata)
+ extcon_pdata = pdata->extcon_pdata;
+ else {
+ dev_err(&pdev->dev, "no platform data available\n");
+ return -ENODEV;
+ }
+
+ adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
+ if (!adc) {
+ dev_err(&pdev->dev, "Malloc adc failed\n");
+ return -ENOMEM;
+ }
+
+ adc->dev = &pdev->dev;
+ adc->as3722 = as3722;
+ dev_set_drvdata(&pdev->dev, adc);
+ adc->irq = platform_get_irq(pdev, 0);
+ adc->hi_threshold = extcon_pdata->hi_threshold;
+ adc->low_threshold = extcon_pdata->low_threshold;
+
+ if (!extcon_pdata->enable_adc1_continuous_mode)
+ goto skip_adc_config;
+
+ /* Set ADC threshold values */
+ ret = as3722_write(as3722, AS3722_ADC1_THRESHOLD_HI_MSB_REG,
+ (extcon_pdata->hi_threshold >> 3) & 0x7F);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_THRESHOLD_HI_MSB write failed %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = as3722_write(as3722, AS3722_ADC1_THRESHOLD_HI_LSB_REG,
+ (extcon_pdata->hi_threshold) & 0x7);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_THRESHOLD_HI_LSB write failed %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = as3722_write(as3722, AS3722_ADC1_THRESHOLD_LO_MSB_REG,
+ (extcon_pdata->low_threshold >> 3) & 0x7F);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_THRESHOLD_LO_MSB write failed %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = as3722_write(as3722, AS3722_ADC1_THRESHOLD_LO_LSB_REG,
+ (extcon_pdata->low_threshold) & 0x7);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_THRESHOLD_LO_LSB write failed %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Configure adc1 */
+ val = (extcon_pdata->adc_channel & 0x1F) |
+ AS3722_ADC1_INTEVAL_SCAN_MASK;
+ if (extcon_pdata->enable_low_voltage_range)
+ val |= AS3722_ADC1_LOW_VOLTAGE_RANGE_MASK;
+ ret = as3722_write(as3722, AS3722_ADC1_CONTROL_REG, val);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_CONTROL write failed %d\n", ret);
+ return ret;
+ }
+
+ /* Start ADC */
+ ret = as3722_update_bits(as3722, AS3722_ADC1_CONTROL_REG,
+ AS3722_ADC1_CONVERSION_START_MASK,
+ AS3722_ADC1_CONVERSION_START_MASK);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_CONTROL write failed %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for 1 conversion */
+ do {
+ ret = as3722_read(as3722, AS3722_ADC1_MSB_RESULT_REG ,
+ &val);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1_MSB_RESULT read failed %d\n",
+ ret);
+ return ret;
+ }
+ if (!(val & AS3722_ADC1_MASK_CONV_NOTREADY))
+ break;
+ udelay(500);
+ } while (try_counter++ < 10);
+
+ adc->edev.name = (extcon_pdata->connection_name) ?
+ extcon_pdata->connection_name : pdev->name;
+ adc->edev.supported_cable = as3722_adc_excon_cable;
+ ret = extcon_dev_register(&adc->edev, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "extcon dev register failed %d\n", ret);
+ return ret;
+ }
+
+ /* Read ADC result */
+ ret = as3722_read_adc1_cable_update(adc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "ADC read failed %d\n", ret);
+ goto scrub_edev;
+ }
+
+ ret = as3722_update_bits(as3722, AS3722_ADC1_CONTROL_REG,
+ AS3722_ADC1_INTEVAL_SCAN_MASK, AS3722_ADC1_INTEVAL_SCAN_MASK);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC1 INTEVAL_SCAN set failed: %d\n", ret);
+ goto scrub_edev;
+ }
+
+ ret = request_threaded_irq(adc->irq, NULL, as3722_adc_extcon_irq,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME, dev_name(adc->dev),
+ adc);
+ if (ret < 0) {
+ dev_err(adc->dev, "request irq %d failed: %dn", adc->irq, ret);
+ goto stop_adc1;
+ }
+
+skip_adc_config:
+ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+
+stop_adc1:
+ as3722_update_bits(as3722, AS3722_ADC1_CONTROL_REG,
+ AS3722_ADC1_CONVERSION_START_MASK, 0);
+scrub_edev:
+ extcon_dev_unregister(&adc->edev);
+
+ return ret;
+}
+
+static int as3722_adc_extcon_remove(struct platform_device *pdev)
+{
+ struct as3722_adc *adc = dev_get_drvdata(&pdev->dev);
+ struct as3722 *as3722 = adc->as3722;
+
+ as3722_update_bits(as3722, AS3722_ADC1_CONTROL_REG,
+ AS3722_ADC1_CONVERSION_START_MASK, 0);
+ extcon_dev_unregister(&adc->edev);
+ free_irq(adc->irq, adc);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int as3722_adc_extcon_suspend(struct device *dev)
+{
+ struct as3722_adc *adc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(adc->irq);
+
+ return 0;
+}
+
+static int as3722_adc_extcon_resume(struct device *dev)
+{
+ struct as3722_adc *adc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(adc->irq);
+
+ return 0;
+};
+#endif
+
+static const struct dev_pm_ops as3722_adc_extcon_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(as3722_adc_extcon_suspend,
+ as3722_adc_extcon_resume)
+};
+
+static struct platform_driver as3722_adc_extcon_driver = {
+ .probe = as3722_adc_extcon_probe,
+ .remove = as3722_adc_extcon_remove,
+ .driver = {
+ .name = "as3722-adc-extcon",
+ .owner = THIS_MODULE,
+ .pm = &as3722_adc_extcon_pm_ops,
+ },
+};
+
+static int __init as3722_adc_extcon_init(void)
+{
+ return platform_driver_register(&as3722_adc_extcon_driver);
+}
+
+subsys_initcall_sync(as3722_adc_extcon_init);
+
+static void __exit as3722_adc_extcon_exit(void)
+{
+ platform_driver_unregister(&as3722_adc_extcon_driver);
+}
+module_exit(as3722_adc_extcon_exit);
+
+MODULE_DESCRIPTION("as3722 ADC extcon driver");
+MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_ALIAS("platform:as3722-adc-extcon");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/max77660-adc.c b/drivers/staging/iio/adc/max77660-adc.c
new file mode 100644
index 000000000000..19e2d7490072
--- /dev/null
+++ b/drivers/staging/iio/adc/max77660-adc.c
@@ -0,0 +1,629 @@
+/*
+ * max77660-adc.c -- MAXIM MAX77660 ADC.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/pm.h>
+#include <linux/mfd/max77660/max77660-core.h>
+#include <linux/completion.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+
+#define MOD_NAME "max77660-adc"
+#define ADC_CONVERTION_TIMEOUT (msecs_to_jiffies(1000))
+
+struct max77660_adc_info {
+ int acquisition_time_us;
+ int mult_volt_uV;
+ int div_val;
+};
+
+#define MAX77660_ADC_INFO(_chan, _adcacq, _mult, _div) \
+[MAX77660_ADC_CH_##_chan] = { \
+ .acquisition_time_us = _adcacq, \
+ .mult_volt_uV = _mult, \
+ .div_val = _div, \
+ }
+
+static struct max77660_adc_info max77660_adc_info[] = {
+ MAX77660_ADC_INFO(VBYP, 12, 10240, 4096),
+ MAX77660_ADC_INFO(TDIE, 16, 1, 1),
+ MAX77660_ADC_INFO(VBBATT, 16, 4300, 4096),
+ MAX77660_ADC_INFO(VSYS, 12, 5120, 4096),
+ MAX77660_ADC_INFO(VDCIN, 12, 10240, 4096),
+ MAX77660_ADC_INFO(VWCSNS, 12, 10240, 4096),
+ MAX77660_ADC_INFO(VTHM, 64, 2500, 4096),
+ MAX77660_ADC_INFO(VICHG, 8, 2500, 4096),
+ MAX77660_ADC_INFO(VMBATDET, 96, 2500, 4096),
+ MAX77660_ADC_INFO(VMBAT, 12, 5120, 4096),
+ MAX77660_ADC_INFO(ADC0, 16, 2500, 4096),
+ MAX77660_ADC_INFO(ADC1, 16, 2500, 4096),
+ MAX77660_ADC_INFO(ADC2, 16, 2500, 4096),
+ MAX77660_ADC_INFO(ADC3, 16, 2500, 4096),
+};
+
+struct max77660_adc {
+ struct device *dev;
+ struct device *parent;
+ u8 adc_control;
+ u8 iadc_val;
+ int irq;
+ struct max77660_adc_info *adc_info;
+ struct completion conv_completion;
+ struct max77660_adc_wakeup_property adc_wake_props;
+ bool wakeup_props_available;
+};
+
+static inline int max77660_is_valid_channel(struct max77660_adc *adc, int chan)
+{
+ /*
+ * ES1.0: Do not convert ADC0 channel otherwise it will shutdown system.
+ */
+ if (max77660_is_es_1_0(adc->parent) &&
+ (chan == MAX77660_ADC_CH_ADC0)) {
+ dev_err(adc->dev, "ES1.0 verion errata: do not convert ADC0\n");
+ return false;
+ }
+ return true;
+}
+
+static irqreturn_t max77660_adc_irq(int irq, void *data)
+{
+ struct max77660_adc *adc = data;
+ u8 status;
+ int ret;
+
+ ret = max77660_reg_read(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCINT, &status);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCINT read failed: %d\n", ret);
+ goto out;
+ }
+
+ if (status & MAX77660_ADCINT_ADCCONVINT)
+ complete(&adc->conv_completion);
+ else if (status & MAX77660_ADCINT_DTRINT)
+ dev_info(adc->dev, "DTR int occured\n");
+ else if (status & MAX77660_ADCINT_DTFINT)
+ dev_info(adc->dev, "DTF int occured\n");
+ else
+ dev_err(adc->dev, "ADC-IRQ for unknown reason, 0x%02x\n",
+ status);
+out:
+ return IRQ_HANDLED;
+}
+
+static int max77660_adc_start_mask_interrupt(struct max77660_adc *adc, int irq,
+ int mask)
+{
+ int ret;
+
+ if (mask)
+ ret = max77660_reg_set_bits(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCINTM, irq);
+ else
+ ret = max77660_reg_clr_bits(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCINTM, irq);
+ if (ret < 0)
+ dev_err(adc->dev, "ADCINTM update failed: %d\n", ret);
+ return ret;
+}
+
+static int max77660_adc_enable(struct max77660_adc *adc, int enable)
+{
+ int ret;
+
+ if (enable)
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCCTRL,
+ adc->adc_control | MAX77660_ADCCTRL_ADCEN);
+ else
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCCTRL, adc->adc_control);
+ if (ret < 0)
+ dev_err(adc->dev, "ADCCTRL write failed: %d\n", ret);
+ return ret;
+}
+
+static int max77660_adc_start_convertion(struct max77660_adc *adc, int adc_chan)
+{
+ u8 adc_l;
+ u8 adc_h;
+ int ret;
+ u8 chan0 = 0;
+ u8 chan1 = 0;
+
+ ret = max77660_adc_enable(adc, true);
+ if (ret < 0)
+ return ret;
+
+ ret = max77660_adc_start_mask_interrupt(adc,
+ MAX77660_ADCINT_ADCCONVINT, 0);
+ if (ret < 0)
+ goto out;
+
+ if (adc_chan < 8)
+ chan0 = BIT(adc_chan);
+ else
+ chan1 = BIT(adc_chan - 8);
+
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCSEL0, chan0);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCSEL0 write failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCSEL1, chan1);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCSEL1 write failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCCHSEL, adc_chan);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCCHSEL write failed: %d\n", ret);
+ goto out;
+ }
+
+ if (adc_chan >= MAX77660_ADC_CH_ADC0) {
+ int chan_num = adc_chan - MAX77660_ADC_CH_ADC0;
+ u8 iadc = adc->iadc_val;
+ iadc |= MAX77660_IADC_IADCMUX(chan_num);
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_IADC, iadc);
+ if (ret < 0) {
+ dev_err(adc->dev, "IADC write failed: %d\n", ret);
+ goto out;
+ }
+ }
+
+ if (adc->adc_info[adc_chan].acquisition_time_us)
+ udelay(adc->adc_info[adc_chan].acquisition_time_us);
+
+ INIT_COMPLETION(adc->conv_completion);
+ ret = max77660_reg_update(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCCTRL, MAX77660_ADCCTRL_ADCCONV,
+ MAX77660_ADCCTRL_ADCCONV);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCCTR write failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&adc->conv_completion,
+ ADC_CONVERTION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(adc->dev, "ADC conversion not completed\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ ret = max77660_reg_read(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCDATAL, &adc_l);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCDATAL read failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = max77660_reg_read(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCDATAH, &adc_h);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCDATAH read failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = ((adc_h & 0xF) << 8) | adc_l;
+
+out:
+ max77660_adc_start_mask_interrupt(adc, MAX77660_ADCINT_ADCCONVINT, 1);
+ max77660_adc_enable(adc, false);
+ return ret;
+}
+
+static int max77660_adc_read_channel(struct max77660_adc *adc, int adc_chan)
+{
+ int ret;
+ int val;
+
+ ret = max77660_adc_start_convertion(adc, adc_chan);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC start coversion failed\n");
+ return ret;
+ }
+
+ if (adc_chan != MAX77660_ADC_CH_TDIE)
+ val = (adc->adc_info[adc_chan].mult_volt_uV * ret) /
+ adc->adc_info[adc_chan].div_val;
+ else
+ val = (298 * ret * 250)/(163 * 4095) - 273;
+ return val;
+}
+
+static int max77660_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct max77660_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ if (!max77660_is_valid_channel(adc, chan->channel))
+ return -EINVAL;
+
+ switch (mask) {
+ case 0:
+ mutex_lock(&indio_dev->mlock);
+ ret = max77660_adc_start_convertion(adc, chan->channel);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC start coversion failed\n");
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ *val = ret;
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ mutex_lock(&indio_dev->mlock);
+ ret = max77660_adc_read_channel(adc, chan->channel);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC read failed: %d\n", ret);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ *val = ret;
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info max77660_adc_iio_info = {
+ .read_raw = max77660_adc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define MAX77660_ADC_CHAN_IIO(chan) \
+{ \
+ .datasheet_name = MAX77660_DATASHEET_NAME(chan), \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = 1, \
+ .channel = MAX77660_ADC_CH_##chan, \
+}
+
+static const struct iio_chan_spec max77660_adc_iio_channel[] = {
+ MAX77660_ADC_CHAN_IIO(VBYP),
+ MAX77660_ADC_CHAN_IIO(TDIE),
+ MAX77660_ADC_CHAN_IIO(VBBATT),
+ MAX77660_ADC_CHAN_IIO(VSYS),
+ MAX77660_ADC_CHAN_IIO(VDCIN),
+ MAX77660_ADC_CHAN_IIO(VWCSNS),
+ MAX77660_ADC_CHAN_IIO(VTHM),
+ MAX77660_ADC_CHAN_IIO(VICHG),
+ MAX77660_ADC_CHAN_IIO(VMBATDET),
+ MAX77660_ADC_CHAN_IIO(VMBAT),
+ MAX77660_ADC_CHAN_IIO(ADC0),
+ MAX77660_ADC_CHAN_IIO(ADC1),
+ MAX77660_ADC_CHAN_IIO(ADC2),
+ MAX77660_ADC_CHAN_IIO(ADC3),
+};
+
+static int max77660_adc_probe(struct platform_device *pdev)
+{
+ struct max77660_adc *adc;
+ struct max77660_platform_data *pdata;
+ struct max77660_adc_platform_data *adc_pdata;
+ struct iio_dev *iodev;
+ int ret;
+
+ pdata = dev_get_platdata(pdev->dev.parent);
+ if (!pdata || !pdata->adc_pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -ENODEV;
+ }
+ adc_pdata = pdata->adc_pdata;
+
+ iodev = iio_device_alloc(sizeof(*adc));
+ if (!iodev) {
+ dev_err(&pdev->dev, "iio_device_alloc failed\n");
+ return -ENOMEM;
+ }
+ adc = iio_priv(iodev);
+ adc->dev = &pdev->dev;
+ adc->parent = pdev->dev.parent;
+ adc->adc_info = max77660_adc_info;
+ init_completion(&adc->conv_completion);
+ dev_set_drvdata(&pdev->dev, iodev);
+
+ adc->irq = platform_get_irq(pdev, 0);
+ ret = request_threaded_irq(adc->irq, NULL,
+ max77660_adc_irq,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME, dev_name(adc->dev),
+ adc);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "request irq %d failed: %dn", adc->irq, ret);
+ goto out;
+ }
+
+ if (adc_pdata->adc_ref_enabled)
+ adc->adc_control |= MAX77660_ADCCTRL_ADCREFEN;
+
+ if (adc_pdata->adc_avg_sample <= 1)
+ adc->adc_control |= MAX77660_ADCCTRL_ADCAVG(0);
+ else if (adc_pdata->adc_avg_sample <= 2)
+ adc->adc_control |= MAX77660_ADCCTRL_ADCAVG(1);
+ else if (adc_pdata->adc_avg_sample <= 16)
+ adc->adc_control |= MAX77660_ADCCTRL_ADCAVG(2);
+ else
+ adc->adc_control |= MAX77660_ADCCTRL_ADCAVG(3);
+
+ if (adc_pdata->adc_current_uA == 0)
+ adc->iadc_val = 0;
+ else if (adc_pdata->adc_current_uA <= 10)
+ adc->iadc_val = 1;
+ else if (adc_pdata->adc_current_uA <= 50)
+ adc->iadc_val = 2;
+ else
+ adc->iadc_val = 3;
+
+ iodev->name = MOD_NAME;
+ iodev->dev.parent = &pdev->dev;
+ iodev->info = &max77660_adc_iio_info;
+ iodev->modes = INDIO_DIRECT_MODE;
+ iodev->channels = max77660_adc_iio_channel;
+ iodev->num_channels = ARRAY_SIZE(max77660_adc_iio_channel);
+ ret = iio_device_register(iodev);
+ if (ret < 0) {
+ dev_err(adc->dev, "iio_device_register() failed: %d\n", ret);
+ goto out_irq_free;
+ }
+
+ if (adc_pdata->channel_mapping) {
+ ret = iio_map_array_register(iodev, adc_pdata->channel_mapping);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "iio_map_array_register() failed: %d\n", ret);
+ goto out_irq_free;
+ }
+ }
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ if (adc_pdata->adc_wakeup_data) {
+ memcpy(&adc->adc_wake_props, adc_pdata->adc_wakeup_data,
+ sizeof(struct max77660_adc_wakeup_property));
+ adc->wakeup_props_available = true;
+ device_wakeup_enable(&pdev->dev);
+ }
+ return 0;
+
+out_irq_free:
+ free_irq(adc->irq, adc);
+out:
+ iio_device_free(iodev);
+ return ret;
+}
+
+static int max77660_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *iodev = dev_to_iio_dev(&pdev->dev);
+ struct max77660_adc *adc = iio_priv(iodev);
+
+ iio_device_unregister(iodev);
+ free_irq(adc->irq, adc);
+ iio_device_free(iodev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max77660_adc_wakeup_configure(struct max77660_adc *adc)
+{
+ int thres_h, thres_l, ch;
+ u8 int_mask, ch1, ch0;
+ u8 adc_avg;
+ int ret;
+
+ thres_h = 0;
+ thres_l = 0;
+ int_mask = 0xFF;
+ ch = adc->adc_wake_props.adc_channel_number;
+ if (adc->adc_wake_props.adc_high_threshold > 0) {
+ thres_h = adc->adc_wake_props.adc_high_threshold & 0xFFF;
+ int_mask &= ~MAX77660_ADCINT_DTRINT;
+ }
+ if (adc->adc_wake_props.adc_low_threshold > 0) {
+ thres_l = adc->adc_wake_props.adc_low_threshold & 0xFFF;
+ int_mask &= ~MAX77660_ADCINT_DTFINT;
+ }
+
+ ch0 = (ch > 7) ? 0 : BIT(ch);
+ ch1 = (ch > 7) ? BIT(ch - 8) : 0;
+
+ if (adc->adc_wake_props.adc_avg_sample <= 1)
+ adc_avg = MAX77660_ADCCTRL_ADCAVG(0);
+ else if (adc->adc_wake_props.adc_avg_sample <= 2)
+ adc_avg = MAX77660_ADCCTRL_ADCAVG(1);
+ else if (adc->adc_wake_props.adc_avg_sample <= 16)
+ adc_avg = MAX77660_ADCCTRL_ADCAVG(2);
+ else
+ adc_avg = MAX77660_ADCCTRL_ADCAVG(3);
+
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_DTRL, thres_h & 0xFF);
+ if (ret < 0) {
+ dev_err(adc->dev, "DTRL write failed: %d\n", ret);
+ return ret;
+ }
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_DTRH, (thres_h >> 8) & 0xF);
+ if (ret < 0) {
+ dev_err(adc->dev, "DTRH write failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_DTFL, thres_l & 0xFF);
+ if (ret < 0) {
+ dev_err(adc->dev, "DTFL write failed: %d\n", ret);
+ return ret;
+ }
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_DTFH, (thres_l >> 8) & 0xF);
+ if (ret < 0) {
+ dev_err(adc->dev, "DTFH write failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCSEL0, ch0);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCSEL0 write failed: %d\n", ret);
+ return ret;
+ }
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCSEL1, ch1);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCSEL1 write failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = max77660_reg_update(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCCTRL, adc_avg,
+ MAX77660_ADCCTRL_ADCAVG_MASK);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCCTRL update failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCINTM, int_mask);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCINTM write failed\n");
+ return ret;
+ }
+
+ ret = max77660_reg_update(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCCTRL, MAX77660_ADCCTRL_ADCCONT,
+ MAX77660_ADCCTRL_ADCCONT);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCCTR update failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int max77660_adc_wakeup_reset(struct max77660_adc *adc)
+{
+ int ret;
+
+ ret = max77660_reg_write(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCINTM, 0xFF);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCINTM write failed\n");
+ return ret;
+ }
+
+ ret = max77660_reg_update(adc->parent, MAX77660_PWR_SLAVE,
+ MAX77660_REG_ADCCTRL, 0, MAX77660_ADCCTRL_ADCCONT);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCCTR update failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int max77660_adc_suspend(struct device *dev)
+{
+ struct iio_dev *iodev = dev_to_iio_dev(dev);
+ struct max77660_adc *adc = iio_priv(iodev);
+ int ret;
+
+ if (!device_may_wakeup(dev) || !adc->wakeup_props_available)
+ goto skip_wakeup;
+
+ ret = max77660_adc_wakeup_configure(adc);
+ if (ret < 0)
+ goto skip_wakeup;
+
+ enable_irq_wake(adc->irq);
+skip_wakeup:
+ return 0;
+}
+
+static int max77660_adc_resume(struct device *dev)
+{
+ struct iio_dev *iodev = dev_to_iio_dev(dev);
+ struct max77660_adc *adc = iio_priv(iodev);
+ int ret;
+
+ if (!device_may_wakeup(dev) || !adc->wakeup_props_available)
+ goto skip_wakeup;
+
+ ret = max77660_adc_wakeup_reset(adc);
+ if (ret < 0)
+ goto skip_wakeup;
+
+ disable_irq_wake(adc->irq);
+skip_wakeup:
+ return 0;
+};
+#endif
+
+static const struct dev_pm_ops max77660_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(max77660_adc_suspend,
+ max77660_adc_resume)
+};
+
+static struct platform_driver max77660_adc_driver = {
+ .probe = max77660_adc_probe,
+ .remove = max77660_adc_remove,
+ .driver = {
+ .name = "max77660-adc",
+ .owner = THIS_MODULE,
+ .pm = &max77660_pm_ops,
+ },
+};
+
+static int __init max77660_adc_driver_init(void)
+{
+ return platform_driver_register(&max77660_adc_driver);
+}
+subsys_initcall_sync(max77660_adc_driver_init);
+
+static void __exit max77660_adc_driver_exit(void)
+{
+ platform_driver_unregister(&max77660_adc_driver);
+}
+module_exit(max77660_adc_driver_exit);
+
+
+MODULE_DESCRIPTION("max77660 ADC driver");
+MODULE_AUTHOR("Laxman Dewangan<ldewangan@nvidia.com>");
+MODULE_ALIAS("platform:max77660-adc");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/palmas_gpadc.c b/drivers/staging/iio/adc/palmas_gpadc.c
new file mode 100644
index 000000000000..dc2e03fd2a48
--- /dev/null
+++ b/drivers/staging/iio/adc/palmas_gpadc.c
@@ -0,0 +1,1306 @@
+/*
+ * palmas-adc.c -- TI PALMAS GPADC.
+ *
+ * Copyright (c) 2013-2014, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Pradeep Goudagunta <pgoudagunta@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/pm.h>
+#include <linux/mfd/palmas.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/driver.h>
+#include <linux/mutex.h>
+
+#define MOD_NAME "palmas-gpadc"
+#define ADC_CONVERTION_TIMEOUT (msecs_to_jiffies(5000))
+#define TO_BE_CALCULATED 0
+#define PRECISION_MULTIPLIER 1000000LL
+
+struct palmas_gpadc_info {
+/* calibration codes and regs */
+ int x1;
+ int x2;
+ u8 trim1_reg;
+ u8 trim2_reg;
+ s64 gain;
+ s64 offset;
+ bool is_correct_code;
+};
+
+#define PALMAS_ADC_INFO(_chan, _x1, _x2, _t1, _t2, _is_correct_code) \
+[PALMAS_ADC_CH_##_chan] = { \
+ .x1 = _x1, \
+ .x2 = _x2, \
+ .gain = TO_BE_CALCULATED, \
+ .offset = TO_BE_CALCULATED, \
+ .trim1_reg = PALMAS_GPADC_TRIM##_t1, \
+ .trim2_reg = PALMAS_GPADC_TRIM##_t2, \
+ .is_correct_code = _is_correct_code \
+ }
+
+static struct palmas_gpadc_info palmas_gpadc_info[] = {
+ PALMAS_ADC_INFO(IN0, 2064, 3112, 1, 2, false),
+ PALMAS_ADC_INFO(IN1, 2064, 3112, 1, 2, false),
+ PALMAS_ADC_INFO(IN2, 2064, 3112, 3, 4, false),
+ PALMAS_ADC_INFO(IN3, 2064, 3112, 1, 2, false),
+ PALMAS_ADC_INFO(IN4, 2064, 3112, 1, 2, false),
+ PALMAS_ADC_INFO(IN5, 2064, 3112, 1, 2, false),
+ PALMAS_ADC_INFO(IN6, 2064, 3112, 5, 6, false),
+ PALMAS_ADC_INFO(IN7, 2064, 3112, 7, 8, false),
+ PALMAS_ADC_INFO(IN8, 2064, 3112, 9, 10, false),
+ PALMAS_ADC_INFO(IN9, 2064, 3112, 11, 12, false),
+ PALMAS_ADC_INFO(IN10, 2064, 3112, 13, 14, false),
+ PALMAS_ADC_INFO(IN11, 0, 0, INVALID, INVALID, true),
+ PALMAS_ADC_INFO(IN12, 0, 0, INVALID, INVALID, true),
+ PALMAS_ADC_INFO(IN13, 0, 0, INVALID, INVALID, true),
+ PALMAS_ADC_INFO(IN14, 2064, 3112, 15, 16, false),
+ PALMAS_ADC_INFO(IN15, 0, 0, INVALID, INVALID, true),
+};
+
+struct palmas_gpadc {
+ struct device *dev;
+ struct palmas *palmas;
+ u8 ch0_current;
+ u8 ch3_current;
+ bool ch3_dual_current;
+ bool extended_delay;
+ int irq;
+ int irq_auto_0;
+ int irq_auto_1;
+ struct palmas_gpadc_info *adc_info;
+ struct completion conv_completion;
+ struct palmas_adc_auto_conv_property auto_conv0_data;
+ struct palmas_adc_auto_conv_property auto_conv1_data;
+ bool auto_conv0_enable;
+ bool auto_conv1_enable;
+ int auto_conversion_period;
+
+ struct dentry *dentry;
+ bool is_shutdown;
+ struct mutex lock;
+};
+
+/*
+ * GPADC lock issue in AUTO mode.
+ * Impact: In AUTO mode, GPADC conversion can be locked after disabling AUTO
+ * mode feature.
+ * Details:
+ * When the AUTO mode is the only conversion mode enabled, if the AUTO
+ * mode feature is disabled with bit GPADC_AUTO_CTRL.AUTO_CONV1_EN = 0
+ * or bit GPADC_AUTO_CTRL.AUTO_CONV0_EN = 0 during a conversion, the
+ * conversion mechanism can be seen as locked meaning that all following
+ * conversion will give 0 as a result. Bit GPADC_STATUS.GPADC_AVAILABLE
+ * will stay at 0 meaning that GPADC is busy. An RT conversion can unlock
+ * the GPADC.
+ *
+ * Workaround(s):
+ * To avoid the lock mechanism, the workaround to follow before any stop
+ * conversion request is:
+ * Force the GPADC state machine to be ON by using the
+ * GPADC_CTRL1.GPADC_FORCE bit = 1
+ * Shutdown the GPADC AUTO conversion using
+ * GPADC_AUTO_CTRL.SHUTDOWN_CONV[01] = 0.
+ * After 100us, force the GPADC state machine to be OFF by using the
+ * GPADC_CTRL1.GPADC_FORCE bit = 0
+ */
+static int palmas_disable_auto_conversion(struct palmas_gpadc *adc)
+{
+ int ret;
+
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_CTRL1,
+ PALMAS_GPADC_CTRL1_GPADC_FORCE,
+ PALMAS_GPADC_CTRL1_GPADC_FORCE);
+ if (ret < 0) {
+ dev_err(adc->dev, "GPADC_CTRL1 update failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_AUTO_CTRL, 0);
+ if (ret < 0) {
+ dev_err(adc->dev, "AUTO_CTRL write failed: %d\n", ret);
+ return ret;
+ }
+
+ udelay(100);
+
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_CTRL1,
+ PALMAS_GPADC_CTRL1_GPADC_FORCE, 0);
+ if (ret < 0) {
+ dev_err(adc->dev, "GPADC_CTRL1 update failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static irqreturn_t palmas_gpadc_irq(int irq, void *data)
+{
+ struct palmas_gpadc *adc = data;
+
+ complete(&adc->conv_completion);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t palmas_gpadc_irq_auto(int irq, void *data)
+{
+ struct palmas_gpadc *adc = data;
+ unsigned int val = 0;
+ int ret;
+
+ ret = palmas_read(adc->palmas, PALMAS_INTERRUPT_BASE,
+ PALMAS_INT3_LINE_STATE, &val);
+ if (ret < 0)
+ dev_err(adc->dev, "%s: Failed to read INT3_LINE_STATE, %d\n",
+ __func__, ret);
+
+ if (val & PALMAS_INT3_LINE_STATE_GPADC_AUTO_0)
+ dev_info(adc->dev, "Auto0 threshold interrupt occurred\n");
+ if (val & PALMAS_INT3_LINE_STATE_GPADC_AUTO_1)
+ dev_info(adc->dev, "Auto1 threshold interrupt occurred\n");
+
+ return IRQ_HANDLED;
+}
+
+static int palmas_gpadc_auto_conv_configure(struct palmas_gpadc *adc)
+{
+ int adc_period, conv;
+ int i;
+ int ch0 = 0, ch1 = 0;
+ int thres;
+ int ret;
+
+ if (!adc->auto_conv0_enable && !adc->auto_conv1_enable)
+ return 0;
+
+ adc_period = adc->auto_conversion_period;
+ for (i = 0; i < 16; ++i) {
+ if (((1000 * (1 << i))/32) > adc_period)
+ break;
+ }
+ if (i > 0)
+ i--;
+ adc_period = i;
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_AUTO_CTRL,
+ PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_MASK,
+ adc_period);
+ if (ret < 0) {
+ dev_err(adc->dev, "AUTO_CTRL write failed: %d\n", ret);
+ return ret;
+ }
+
+ conv = 0;
+ if (adc->auto_conv0_enable) {
+ int is_high;
+
+ ch0 = adc->auto_conv0_data.adc_channel_number;
+ conv |= PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN;
+ conv |= (adc->auto_conv0_data.adc_shutdown ?
+ PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0 : 0);
+ if (adc->auto_conv0_data.adc_high_threshold > 0) {
+ thres = adc->auto_conv0_data.adc_high_threshold;
+ is_high = 0;
+ } else {
+ thres = adc->auto_conv0_data.adc_low_threshold;
+ is_high = BIT(7);
+ }
+
+ ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_THRES_CONV0_LSB, thres & 0xFF);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "THRES_CONV0_LSB write failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_THRES_CONV0_MSB,
+ ((thres >> 8) & 0xF) | is_high);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "THRES_CONV0_MSB write failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (adc->auto_conv1_enable) {
+ int is_high;
+
+ ch1 = adc->auto_conv1_data.adc_channel_number;
+ conv |= PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN;
+ conv |= (adc->auto_conv1_data.adc_shutdown ?
+ PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1 : 0);
+ if (adc->auto_conv1_data.adc_high_threshold > 0) {
+ thres = adc->auto_conv1_data.adc_high_threshold;
+ is_high = 0;
+ } else {
+ thres = adc->auto_conv1_data.adc_low_threshold;
+ is_high = BIT(7);
+ }
+
+ ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_THRES_CONV1_LSB, thres & 0xFF);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "THRES_CONV1_LSB write failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_THRES_CONV1_MSB,
+ ((thres >> 8) & 0xF) | is_high);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "THRES_CONV1_MSB write failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_AUTO_SELECT, (ch1 << 4) | ch0);
+ if (ret < 0) {
+ dev_err(adc->dev, "AUTO_SELECT write failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_AUTO_CTRL,
+ PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1 |
+ PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0 |
+ PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN |
+ PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN, conv);
+ if (ret < 0) {
+ dev_err(adc->dev, "AUTO_CTRL write failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int palmas_gpadc_auto_conv_reset(struct palmas_gpadc *adc)
+{
+ int ret;
+
+ if (!adc->auto_conv0_enable && !adc->auto_conv1_enable)
+ return 0;
+
+ ret = palmas_disable_auto_conversion(adc);
+ if (ret < 0) {
+ dev_err(adc->dev, "Disable auto conversion failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = palmas_write(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_AUTO_SELECT, 0);
+ if (ret < 0) {
+ dev_err(adc->dev, "AUTO_SELECT write failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int palmas_gpadc_check_status(struct palmas_gpadc *adc)
+{
+ int retry_cnt = 3;
+ int check_cnt = 3;
+ int loop_cnt = 3;
+ unsigned int val = 0;
+ int ret;
+
+retry:
+ do {
+ ret = palmas_read(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_STATUS, &val);
+ if (ret < 0) {
+ dev_err(adc->dev, "%s: Failed to read STATUS, %d\n",
+ __func__, ret);
+ return ret;
+ } else if (val & PALMAS_GPADC_STATUS_GPADC_AVAILABLE) {
+ if (--check_cnt == 0)
+ break;
+ } else {
+ dev_warn(adc->dev, "%s: GPADC is busy, STATUS 0x%02x\n",
+ __func__, val);
+ }
+ udelay(100);
+ } while (loop_cnt-- > 0);
+
+ if (check_cnt == 0) {
+ if (retry_cnt < 3)
+ dev_warn(adc->dev, "%s: GPADC is unlocked.\n",
+ __func__);
+ return 0;
+ }
+
+ dev_warn(adc->dev, "%s: GPADC is locked.\n", __func__);
+ dev_warn(adc->dev, "%s: Perform RT conversion to unlock GPADC.\n",
+ __func__);
+ palmas_disable_auto_conversion(adc);
+ palmas_write(adc->palmas, PALMAS_GPADC_BASE, PALMAS_GPADC_RT_SELECT,
+ PALMAS_GPADC_RT_SELECT_RT_CONV_EN);
+ palmas_write(adc->palmas, PALMAS_GPADC_BASE, PALMAS_GPADC_RT_CTRL,
+ PALMAS_GPADC_RT_CTRL_START_POLARITY);
+ udelay(100);
+ palmas_write(adc->palmas, PALMAS_GPADC_BASE, PALMAS_GPADC_RT_CTRL, 0);
+ palmas_write(adc->palmas, PALMAS_GPADC_BASE, PALMAS_GPADC_RT_SELECT, 0);
+ if (retry_cnt-- > 0) {
+ goto retry;
+ } else {
+ dev_err(adc->dev, "%s: Failed to unlock GPADC.\n", __func__);
+ return -EDEADLK;
+ }
+
+ return 0;
+}
+
+static int palmas_gpadc_start_mask_interrupt(struct palmas_gpadc *adc, int mask)
+{
+ int ret;
+
+ if (!mask)
+ ret = palmas_update_bits(adc->palmas, PALMAS_INTERRUPT_BASE,
+ PALMAS_INT3_MASK,
+ PALMAS_INT3_MASK_GPADC_EOC_SW, 0);
+ else
+ ret = palmas_update_bits(adc->palmas, PALMAS_INTERRUPT_BASE,
+ PALMAS_INT3_MASK,
+ PALMAS_INT3_MASK_GPADC_EOC_SW,
+ PALMAS_INT3_MASK_GPADC_EOC_SW);
+ if (ret < 0)
+ dev_err(adc->dev, "GPADC INT MASK update failed: %d\n", ret);
+
+ return ret;
+}
+
+static int palmas_gpadc_enable(struct palmas_gpadc *adc, int adc_chan,
+ int enable)
+{
+ unsigned int mask, val;
+ int ret;
+
+ if (enable) {
+ val = (adc->extended_delay
+ << PALMAS_GPADC_RT_CTRL_EXTEND_DELAY_SHIFT);
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_RT_CTRL,
+ PALMAS_GPADC_RT_CTRL_EXTEND_DELAY, val);
+ if (ret < 0) {
+ dev_err(adc->dev, "RT_CTRL update failed: %d\n", ret);
+ return ret;
+ }
+
+ mask = (PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_MASK |
+ PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_MASK |
+ PALMAS_GPADC_CTRL1_GPADC_FORCE);
+ val = (adc->ch0_current
+ << PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_SHIFT);
+ val |= (adc->ch3_current
+ << PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT);
+ val |= PALMAS_GPADC_CTRL1_GPADC_FORCE;
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_CTRL1, mask, val);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "Failed to update current setting: %d\n", ret);
+ return ret;
+ }
+
+ mask = (PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_MASK |
+ PALMAS_GPADC_SW_SELECT_SW_CONV_EN);
+ val = (adc_chan | PALMAS_GPADC_SW_SELECT_SW_CONV_EN);
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_SW_SELECT, mask, val);
+ if (ret < 0) {
+ dev_err(adc->dev, "SW_SELECT update failed: %d\n", ret);
+ return ret;
+ }
+ } else {
+ mask = val = 0;
+ mask |= PALMAS_GPADC_CTRL1_GPADC_FORCE;
+
+ /* Restore CH3 current source if CH3 is dual current mode. */
+ if ((adc_chan == PALMAS_ADC_CH_IN3) && adc->ch3_dual_current) {
+ mask |= PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_MASK;
+ val |= (adc->ch3_current
+ << PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT);
+ }
+
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_CTRL1, mask, val);
+ if (ret < 0) {
+ dev_err(adc->dev, "CTRL1 update failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int palmas_gpadc_read_prepare(struct palmas_gpadc *adc, int adc_chan)
+{
+ int ret;
+
+ ret = palmas_gpadc_enable(adc, adc_chan, true);
+ if (ret < 0)
+ return ret;
+
+ return palmas_gpadc_start_mask_interrupt(adc, 0);
+}
+
+static void palmas_gpadc_read_done(struct palmas_gpadc *adc, int adc_chan)
+{
+ palmas_gpadc_start_mask_interrupt(adc, 1);
+ palmas_gpadc_enable(adc, adc_chan, false);
+}
+
+static int palmas_gpadc_calibrate(struct palmas_gpadc *adc, int adc_chan)
+{
+ s64 k;
+ int d1;
+ int d2;
+ int ret;
+ int x1 = adc->adc_info[adc_chan].x1;
+ int x2 = adc->adc_info[adc_chan].x2;
+
+ ret = palmas_read(adc->palmas, PALMAS_TRIM_GPADC_BASE,
+ adc->adc_info[adc_chan].trim1_reg, &d1);
+ if (ret < 0) {
+ dev_err(adc->dev, "TRIM read failed: %d\n", ret);
+ goto scrub;
+ }
+
+ ret = palmas_read(adc->palmas, PALMAS_TRIM_GPADC_BASE,
+ adc->adc_info[adc_chan].trim2_reg, &d2);
+ if (ret < 0) {
+ dev_err(adc->dev, "TRIM read failed: %d\n", ret);
+ goto scrub;
+ }
+
+ /* Gain Calculation */
+ k = PRECISION_MULTIPLIER;
+ k += div64_s64(PRECISION_MULTIPLIER * (d2 - d1), x2 - x1);
+ adc->adc_info[adc_chan].gain = k;
+
+ /* Offset Calculation */
+ adc->adc_info[adc_chan].offset = (d1 * PRECISION_MULTIPLIER);
+ adc->adc_info[adc_chan].offset -= ((k - PRECISION_MULTIPLIER) * x1);
+
+scrub:
+ return ret;
+}
+
+static int palmas_gpadc_start_convertion(struct palmas_gpadc *adc, int adc_chan)
+{
+ unsigned int val;
+ int ret;
+
+ INIT_COMPLETION(adc->conv_completion);
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_SW_SELECT,
+ PALMAS_GPADC_SW_SELECT_SW_START_CONV0,
+ PALMAS_GPADC_SW_SELECT_SW_START_CONV0);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADC_SW_START write failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&adc->conv_completion,
+ ADC_CONVERTION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(adc->dev, "ADC conversion not completed\n");
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ ret = palmas_bulk_read(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_SW_CONV0_LSB, &val, 2);
+ if (ret < 0) {
+ dev_err(adc->dev, "ADCDATA read failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = (val & 0xFFF);
+ if (ret == 0) {
+ ret = palmas_gpadc_check_status(adc);
+ if (ret < 0)
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+static int palmas_gpadc_get_calibrated_code(struct palmas_gpadc *adc,
+ int adc_chan, int val)
+{
+ s64 code = val * PRECISION_MULTIPLIER;
+
+ if ((code - adc->adc_info[adc_chan].offset) < 0) {
+ dev_err(adc->dev, "No Input Connected\n");
+ return 0;
+ }
+
+ if (!(adc->adc_info[adc_chan].is_correct_code)) {
+ code -= adc->adc_info[adc_chan].offset;
+ code = div_s64(code, adc->adc_info[adc_chan].gain);
+ return code;
+ }
+
+ return val;
+}
+
+static int palmas_gpadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct palmas_gpadc *adc = iio_priv(indio_dev);
+ int adc_chan = chan->channel;
+ int ret = 0;
+
+ if (adc_chan > PALMAS_ADC_CH_MAX)
+ return -EINVAL;
+
+ mutex_lock(&adc->lock);
+ if (adc->is_shutdown) {
+ mutex_unlock(&adc->lock);
+ return -EINVAL;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = palmas_gpadc_read_prepare(adc, adc_chan);
+ if (ret < 0)
+ goto out;
+
+ ret = palmas_gpadc_start_convertion(adc, adc_chan);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "ADC start coversion failed\n");
+ goto out;
+ }
+
+ if (mask == IIO_CHAN_INFO_PROCESSED)
+ ret = palmas_gpadc_get_calibrated_code(
+ adc, adc_chan, ret);
+
+ *val = ret;
+
+ ret = IIO_VAL_INT;
+ goto out;
+
+ case IIO_CHAN_INFO_RAW_DUAL:
+ case IIO_CHAN_INFO_PROCESSED_DUAL:
+ ret = palmas_gpadc_read_prepare(adc, adc_chan);
+ if (ret < 0)
+ goto out;
+
+ ret = palmas_gpadc_start_convertion(adc, adc_chan);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "ADC start coversion failed\n");
+ goto out;
+ }
+
+ if (mask == IIO_CHAN_INFO_PROCESSED_DUAL)
+ ret = palmas_gpadc_get_calibrated_code(
+ adc, adc_chan, ret);
+
+ *val = ret;
+
+ if ((adc_chan == PALMAS_ADC_CH_IN3)
+ && adc->ch3_dual_current && val2) {
+ unsigned int reg_mask, reg_val;
+
+ reg_mask = PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_MASK;
+ reg_val = ((adc->ch3_current + 1)
+ << PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT);
+ ret = palmas_update_bits(adc->palmas, PALMAS_GPADC_BASE,
+ PALMAS_GPADC_CTRL1,
+ reg_mask, reg_val);
+ if (ret < 0) {
+ dev_err(adc->dev, "CTRL1 update failed\n");
+ goto out;
+ }
+
+ ret = palmas_gpadc_start_convertion(adc, adc_chan);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "ADC start coversion failed\n");
+ goto out;
+ }
+
+ if (mask == IIO_CHAN_INFO_PROCESSED_DUAL)
+ ret = palmas_gpadc_get_calibrated_code(
+ adc, adc_chan, ret);
+
+ *val2 = ret;
+ }
+
+ ret = IIO_VAL_INT;
+ goto out;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&adc->lock);
+ return ret;
+
+out:
+ palmas_gpadc_read_done(adc, adc_chan);
+ mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&adc->lock);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int auto_conv_period_get(void *data, u64 *val)
+{
+ struct palmas_gpadc *adc = (struct palmas_gpadc *)data;
+
+ *val = adc->auto_conversion_period;
+ return 0;
+}
+
+static int auto_conv_period_set(void *data, u64 val)
+{
+ struct palmas_gpadc *adc = (struct palmas_gpadc *)data;
+ struct iio_dev *iodev = dev_get_drvdata(adc->dev);
+
+ adc->auto_conversion_period = val;
+
+ mutex_lock(&iodev->mlock);
+ palmas_gpadc_auto_conv_reset(adc);
+ palmas_gpadc_auto_conv_configure(adc);
+ mutex_unlock(&iodev->mlock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(auto_conv_period_fops, auto_conv_period_get,
+ auto_conv_period_set, "%llu\n");
+
+static ssize_t auto_conv_data_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct palmas_gpadc *adc = file->private_data;
+ unsigned char *d_iname;
+ char buf[64] = { 0, };
+ ssize_t ret = 0;
+
+ d_iname = file->f_path.dentry->d_iname;
+
+ if (!strcmp("auto_conv0_channel", d_iname)) {
+ ret = snprintf(buf, sizeof(buf), "%d\n",
+ adc->auto_conv0_data.adc_channel_number);
+ } else if (!strcmp("auto_conv1_channel", d_iname)) {
+ ret = snprintf(buf, sizeof(buf), "%d\n",
+ adc->auto_conv1_data.adc_channel_number);
+ } else if (!strcmp("auto_conv0_high_threshold", d_iname)) {
+ ret = snprintf(buf, sizeof(buf), "%d\n",
+ adc->auto_conv0_data.adc_high_threshold);
+ } else if (!strcmp("auto_conv1_high_threshold", d_iname)) {
+ ret = snprintf(buf, sizeof(buf), "%d\n",
+ adc->auto_conv1_data.adc_high_threshold);
+ } else if (!strcmp("auto_conv0_low_threshold", d_iname)) {
+ ret = snprintf(buf, sizeof(buf), "%d\n",
+ adc->auto_conv0_data.adc_low_threshold);
+ } else if (!strcmp("auto_conv1_low_threshold", d_iname)) {
+ ret = snprintf(buf, sizeof(buf), "%d\n",
+ adc->auto_conv1_data.adc_low_threshold);
+ } else if (!strcmp("auto_conv0_shutdown", d_iname)) {
+ ret = snprintf(buf, sizeof(buf), "%d\n",
+ adc->auto_conv0_data.adc_shutdown);
+ } else if (!strcmp("auto_conv1_shutdown", d_iname)) {
+ ret = snprintf(buf, sizeof(buf), "%d\n",
+ adc->auto_conv1_data.adc_shutdown);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+}
+
+static ssize_t auto_conv_data_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct palmas_gpadc *adc = file->private_data;
+ struct iio_dev *iodev = dev_get_drvdata(adc->dev);
+ unsigned char *d_iname;
+ char buf[64] = { 0, };
+ int val;
+ ssize_t buf_size;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (!sscanf(buf, "%d\n", &val))
+ return -EINVAL;
+
+ d_iname = file->f_path.dentry->d_iname;
+
+ if (!strcmp("auto_conv0_channel", d_iname)) {
+ adc->auto_conv0_data.adc_channel_number = val;
+ } else if (!strcmp("auto_conv1_channel", d_iname)) {
+ adc->auto_conv1_data.adc_channel_number = val;
+ } else if (!strcmp("auto_conv0_high_threshold", d_iname)) {
+ adc->auto_conv0_data.adc_high_threshold = val;
+ if (val > 0)
+ adc->auto_conv0_data.adc_low_threshold = 0;
+ } else if (!strcmp("auto_conv1_high_threshold", d_iname)) {
+ adc->auto_conv1_data.adc_high_threshold = val;
+ if (val > 0)
+ adc->auto_conv1_data.adc_low_threshold = 0;
+ } else if (!strcmp("auto_conv0_low_threshold", d_iname)) {
+ adc->auto_conv0_data.adc_low_threshold = val;
+ if (val > 0)
+ adc->auto_conv0_data.adc_high_threshold = 0;
+ } else if (!strcmp("auto_conv1_low_threshold", d_iname)) {
+ adc->auto_conv1_data.adc_low_threshold = val;
+ if (val > 0)
+ adc->auto_conv1_data.adc_high_threshold = 0;
+ } else if (!strcmp("auto_conv0_shutdown", d_iname)) {
+ adc->auto_conv0_data.adc_shutdown = val;
+ } else if (!strcmp("auto_conv1_shutdown", d_iname)) {
+ adc->auto_conv1_data.adc_shutdown = val;
+ }
+
+ mutex_lock(&iodev->mlock);
+ palmas_gpadc_auto_conv_reset(adc);
+ palmas_gpadc_auto_conv_configure(adc);
+ mutex_unlock(&iodev->mlock);
+ return buf_size;
+}
+
+static const struct file_operations auto_conv_data_fops = {
+ .open = simple_open,
+ .write = auto_conv_data_write,
+ .read = auto_conv_data_read,
+};
+
+static void palmas_gpadc_debugfs_init(struct palmas_gpadc *adc)
+{
+ adc->dentry = debugfs_create_dir(dev_name(adc->dev), NULL);
+ if (!adc->dentry) {
+ dev_err(adc->dev, "%s: failed to create debugfs dir\n",
+ __func__);
+ return;
+ }
+
+ if (adc->auto_conv0_enable || adc->auto_conv1_enable)
+ debugfs_create_file("auto_conv_period", 0644,
+ adc->dentry, adc,
+ &auto_conv_period_fops);
+
+ if (adc->auto_conv0_enable) {
+ debugfs_create_file("auto_conv0_channel", 0644,
+ adc->dentry, adc, &auto_conv_data_fops);
+ debugfs_create_file("auto_conv0_high_threshold", 0644,
+ adc->dentry, adc, &auto_conv_data_fops);
+ debugfs_create_file("auto_conv0_low_threshold", 0644,
+ adc->dentry, adc, &auto_conv_data_fops);
+ debugfs_create_file("auto_conv0_shutdown", 0644,
+ adc->dentry, adc, &auto_conv_data_fops);
+ }
+
+ if (adc->auto_conv1_enable) {
+ debugfs_create_file("auto_conv1_channel", 0644,
+ adc->dentry, adc, &auto_conv_data_fops);
+ debugfs_create_file("auto_conv1_high_threshold", 0644,
+ adc->dentry, adc, &auto_conv_data_fops);
+ debugfs_create_file("auto_conv1_low_threshold", 0644,
+ adc->dentry, adc, &auto_conv_data_fops);
+ debugfs_create_file("auto_conv1_shutdown", 0644,
+ adc->dentry, adc, &auto_conv_data_fops);
+ }
+}
+#else
+static void palmas_gpadc_debugfs_init(struct palmas_gpadc *adc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static const struct iio_info palmas_gpadc_iio_info = {
+ .read_raw = palmas_gpadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define PALMAS_ADC_CHAN_IIO(chan) \
+{ \
+ .datasheet_name = PALMAS_DATASHEET_NAME(chan), \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .indexed = 1, \
+ .channel = PALMAS_ADC_CH_##chan, \
+}
+
+#define PALMAS_ADC_CHAN_DUAL_IIO(chan) \
+{ \
+ .datasheet_name = PALMAS_DATASHEET_NAME(chan), \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_RAW_DUAL) | \
+ BIT(IIO_CHAN_INFO_PROCESSED_DUAL), \
+ .indexed = 1, \
+ .channel = PALMAS_ADC_CH_##chan, \
+}
+
+static const struct iio_chan_spec palmas_gpadc_iio_channel[] = {
+ PALMAS_ADC_CHAN_IIO(IN0),
+ PALMAS_ADC_CHAN_IIO(IN1),
+ PALMAS_ADC_CHAN_IIO(IN2),
+ PALMAS_ADC_CHAN_DUAL_IIO(IN3),
+ PALMAS_ADC_CHAN_IIO(IN4),
+ PALMAS_ADC_CHAN_IIO(IN5),
+ PALMAS_ADC_CHAN_IIO(IN6),
+ PALMAS_ADC_CHAN_IIO(IN7),
+ PALMAS_ADC_CHAN_IIO(IN8),
+ PALMAS_ADC_CHAN_IIO(IN9),
+ PALMAS_ADC_CHAN_IIO(IN10),
+ PALMAS_ADC_CHAN_IIO(IN11),
+ PALMAS_ADC_CHAN_IIO(IN12),
+ PALMAS_ADC_CHAN_IIO(IN13),
+ PALMAS_ADC_CHAN_IIO(IN14),
+ PALMAS_ADC_CHAN_IIO(IN15),
+};
+
+static int palmas_gpadc_get_autoconv_prop(struct device *dev,
+ struct device_node *np, const char *node_name,
+ struct palmas_adc_auto_conv_property **conv_prop)
+{
+ struct device_node *conv_node;
+ struct palmas_adc_auto_conv_property *cprop;
+ int ret;
+ u32 pval;
+ s32 thres;
+
+ conv_node = of_get_child_by_name(np, node_name);
+ if (!conv_node)
+ return -EINVAL;
+
+ cprop = devm_kzalloc(dev, sizeof(*cprop), GFP_KERNEL);
+ if (!cprop)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(conv_node, "ti,adc-channel-number", &pval);
+ if (ret < 0) {
+ dev_err(dev, "Autoconversion channel is missing\n");
+ return ret;
+ }
+ cprop->adc_channel_number = pval;
+
+ ret = of_property_read_s32(conv_node, "ti,adc-high-threshold", &thres);
+ if (!ret)
+ cprop->adc_high_threshold = thres;
+
+ ret = of_property_read_s32(conv_node, "ti,adc-low-threshold", &thres);
+ if (!ret)
+ cprop->adc_low_threshold = thres;
+
+ cprop->adc_shutdown = of_property_read_bool(conv_node,
+ "ti,enable-shutdown");
+ *conv_prop = cprop;
+ return 0;
+}
+
+static int palmas_gpadc_get_adc_dt_data(struct platform_device *pdev,
+ struct palmas_gpadc_platform_data **gpadc_pdata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct palmas_gpadc_platform_data *gp_data;
+ struct device_node *map_node;
+ struct device_node *child;
+ struct iio_map *palmas_iio_map;
+ struct palmas_adc_auto_conv_property *conv_prop;
+ int ret;
+ u32 pval;
+ int nmap, nvalid_map;
+
+ gp_data = devm_kzalloc(&pdev->dev, sizeof(*gp_data), GFP_KERNEL);
+ if (!gp_data)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "ti,channel0-current-microamp", &pval);
+ if (!ret)
+ gp_data->ch0_current = pval;
+
+ ret = of_property_read_u32(np, "ti,channel3-current-microamp", &pval);
+ if (!ret)
+ gp_data->ch3_current = pval;
+
+ gp_data->ch3_dual_current = of_property_read_bool(np,
+ "ti,enable-channel3-dual-current");
+
+ gp_data->extended_delay = of_property_read_bool(np,
+ "ti,enable-extended-delay");
+
+ ret = of_property_read_u32(np, "ti,auto-conversion-period-ms", &pval);
+ if (!ret)
+ gp_data->auto_conversion_period_ms = pval;
+
+ ret = palmas_gpadc_get_autoconv_prop(&pdev->dev, np, "auto_conv0",
+ &conv_prop);
+ if (!ret)
+ gp_data->adc_auto_conv0_data = conv_prop;
+
+ ret = palmas_gpadc_get_autoconv_prop(&pdev->dev, np, "auto_conv1",
+ &conv_prop);
+ if (!ret)
+ gp_data->adc_auto_conv1_data = conv_prop;
+
+ map_node = of_get_child_by_name(np, "iio_map");
+ if (!map_node) {
+ dev_warn(&pdev->dev, "IIO map table not found\n");
+ goto done;
+ }
+
+ nmap = of_get_child_count(map_node);
+ if (!nmap)
+ goto done;
+
+ nmap++;
+ palmas_iio_map = devm_kzalloc(&pdev->dev,
+ sizeof(*palmas_iio_map) * nmap, GFP_KERNEL);
+ if (!palmas_iio_map)
+ goto done;
+
+ nvalid_map = 0;
+ for_each_child_of_node(map_node, child) {
+ ret = of_property_read_u32(child, "ti,adc-channel-number",
+ &pval);
+ if (!ret && pval < ARRAY_SIZE(palmas_gpadc_iio_channel))
+ palmas_iio_map[nvalid_map].adc_channel_label =
+ palmas_gpadc_iio_channel[pval].datasheet_name;
+ of_property_read_string(child, "ti,adc-consumer-device",
+ &palmas_iio_map[nvalid_map].consumer_dev_name);
+ of_property_read_string(child, "ti,adc-consumer-channel",
+ &palmas_iio_map[nvalid_map].consumer_channel);
+ dev_dbg(&pdev->dev,
+ "Channel %s consumer dev %s and consumer channel %s\n",
+ palmas_iio_map[nvalid_map].adc_channel_label,
+ palmas_iio_map[nvalid_map].consumer_dev_name,
+ palmas_iio_map[nvalid_map].consumer_channel);
+ nvalid_map++;
+ }
+ palmas_iio_map[nvalid_map].adc_channel_label = NULL;
+ palmas_iio_map[nvalid_map].consumer_dev_name = NULL;
+ palmas_iio_map[nvalid_map].consumer_channel = NULL;
+
+ gp_data->iio_maps = palmas_iio_map;
+
+done:
+ *gpadc_pdata = gp_data;
+ return 0;
+}
+
+static int palmas_gpadc_probe(struct platform_device *pdev)
+{
+ struct palmas_gpadc *adc;
+ struct palmas_platform_data *pdata;
+ struct palmas_gpadc_platform_data *gpadc_pdata = NULL;
+ struct iio_dev *iodev;
+ int ret, i;
+
+ pdata = dev_get_platdata(pdev->dev.parent);
+ if (pdata && pdata->gpadc_pdata)
+ gpadc_pdata = pdata->gpadc_pdata;
+
+ if (!gpadc_pdata && pdev->dev.of_node) {
+ ret = palmas_gpadc_get_adc_dt_data(pdev, &gpadc_pdata);
+ if (ret < 0)
+ return ret;
+ }
+ if (!gpadc_pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -ENODEV;
+ }
+
+ iodev = iio_device_alloc(sizeof(*adc));
+ if (!iodev) {
+ dev_err(&pdev->dev, "iio_device_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ if (gpadc_pdata->iio_maps) {
+ ret = iio_map_array_register(iodev, gpadc_pdata->iio_maps);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "iio_map_array_register failed\n");
+ goto out;
+ }
+ }
+
+ adc = iio_priv(iodev);
+ adc->dev = &pdev->dev;
+ adc->palmas = dev_get_drvdata(pdev->dev.parent);
+ adc->adc_info = palmas_gpadc_info;
+ init_completion(&adc->conv_completion);
+ dev_set_drvdata(&pdev->dev, iodev);
+
+ adc->is_shutdown = false;
+ mutex_init(&adc->lock);
+
+ adc->auto_conversion_period = gpadc_pdata->auto_conversion_period_ms;
+ adc->irq = palmas_irq_get_virq(adc->palmas, PALMAS_GPADC_EOC_SW_IRQ);
+ ret = request_threaded_irq(adc->irq, NULL,
+ palmas_gpadc_irq,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME, dev_name(adc->dev),
+ adc);
+ if (ret < 0) {
+ dev_err(adc->dev,
+ "request irq %d failed: %dn", adc->irq, ret);
+ goto out_unregister_map;
+ }
+
+ if (gpadc_pdata->adc_auto_conv0_data) {
+ memcpy(&adc->auto_conv0_data, gpadc_pdata->adc_auto_conv0_data,
+ sizeof(adc->auto_conv0_data));
+ adc->auto_conv0_enable = true;
+ adc->irq_auto_0 = palmas_irq_get_virq(adc->palmas,
+ PALMAS_GPADC_AUTO_0_IRQ);
+ ret = request_threaded_irq(adc->irq_auto_0, NULL,
+ palmas_gpadc_irq_auto,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ "palmas-adc-auto-0", adc);
+ if (ret < 0) {
+ dev_err(adc->dev, "request auto0 irq %d failed: %dn",
+ adc->irq_auto_0, ret);
+ goto out_irq_free;
+ }
+ }
+
+ if (gpadc_pdata->adc_auto_conv1_data) {
+ memcpy(&adc->auto_conv1_data, gpadc_pdata->adc_auto_conv1_data,
+ sizeof(adc->auto_conv1_data));
+ adc->auto_conv1_enable = true;
+ adc->irq_auto_1 = palmas_irq_get_virq(adc->palmas,
+ PALMAS_GPADC_AUTO_1_IRQ);
+ ret = request_threaded_irq(adc->irq_auto_1, NULL,
+ palmas_gpadc_irq_auto,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ "palmas-adc-auto-1", adc);
+ if (ret < 0) {
+ dev_err(adc->dev, "request auto1 irq %d failed: %dn",
+ adc->irq_auto_1, ret);
+ goto out_irq_auto0_free;
+ }
+ }
+
+ if (gpadc_pdata->ch0_current == 0)
+ adc->ch0_current = PALMAS_ADC_CH0_CURRENT_SRC_0;
+ else if (gpadc_pdata->ch0_current <= 5)
+ adc->ch0_current = PALMAS_ADC_CH0_CURRENT_SRC_5;
+ else if (gpadc_pdata->ch0_current <= 15)
+ adc->ch0_current = PALMAS_ADC_CH0_CURRENT_SRC_15;
+ else
+ adc->ch0_current = PALMAS_ADC_CH0_CURRENT_SRC_20;
+
+ if (gpadc_pdata->ch3_current == 0)
+ adc->ch3_current = PALMAS_ADC_CH3_CURRENT_SRC_0;
+ else if (gpadc_pdata->ch3_current <= 10)
+ adc->ch3_current = PALMAS_ADC_CH3_CURRENT_SRC_10;
+ else if (gpadc_pdata->ch3_current <= 400)
+ adc->ch3_current = PALMAS_ADC_CH3_CURRENT_SRC_400;
+ else
+ adc->ch3_current = PALMAS_ADC_CH3_CURRENT_SRC_800;
+
+ /* If ch3_dual_current is true, it will measure ch3 input signal with
+ * ch3_current and the next current of ch3_current. */
+ adc->ch3_dual_current = gpadc_pdata->ch3_dual_current;
+ if (adc->ch3_dual_current &&
+ (adc->ch3_current == PALMAS_ADC_CH3_CURRENT_SRC_800)) {
+ dev_warn(adc->dev,
+ "Disable ch3_dual_current by wrong current setting\n");
+ adc->ch3_dual_current = false;
+ }
+
+ adc->extended_delay = gpadc_pdata->extended_delay;
+
+ iodev->name = MOD_NAME;
+ iodev->dev.parent = &pdev->dev;
+ iodev->info = &palmas_gpadc_iio_info;
+ iodev->modes = INDIO_DIRECT_MODE;
+ iodev->channels = palmas_gpadc_iio_channel;
+ iodev->num_channels = ARRAY_SIZE(palmas_gpadc_iio_channel);
+
+ ret = iio_device_register(iodev);
+ if (ret < 0) {
+ dev_err(adc->dev, "iio_device_register() failed: %d\n", ret);
+ goto out_irq_auto1_free;
+ }
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ for (i = 0; i < PALMAS_ADC_CH_MAX; i++) {
+ if (!(adc->adc_info[i].is_correct_code))
+ palmas_gpadc_calibrate(adc, i);
+ }
+
+ if (adc->auto_conv0_enable || adc->auto_conv1_enable)
+ device_wakeup_enable(&pdev->dev);
+
+ ret = palmas_gpadc_check_status(adc);
+ if (ret < 0)
+ goto out_irq_auto1_free;
+
+ palmas_gpadc_auto_conv_reset(adc);
+ ret = palmas_gpadc_auto_conv_configure(adc);
+ if (ret < 0) {
+ dev_err(adc->dev, "auto_conv_configure() failed: %d\n", ret);
+ goto out_irq_auto1_free;
+ }
+
+ palmas_gpadc_debugfs_init(adc);
+ return 0;
+
+out_irq_auto1_free:
+ if (gpadc_pdata->adc_auto_conv1_data)
+ free_irq(adc->irq_auto_1, adc);
+out_irq_auto0_free:
+ if (gpadc_pdata->adc_auto_conv0_data)
+ free_irq(adc->irq_auto_0, adc);
+out_irq_free:
+ free_irq(adc->irq, adc);
+out_unregister_map:
+ if (gpadc_pdata->iio_maps)
+ iio_map_array_unregister(iodev);
+out:
+ iio_device_free(iodev);
+ return ret;
+}
+
+static int palmas_gpadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *iodev = dev_get_drvdata(&pdev->dev);
+ struct palmas_gpadc *adc = iio_priv(iodev);
+ struct palmas_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
+
+ debugfs_remove_recursive(adc->dentry);
+ if (pdata->gpadc_pdata->iio_maps)
+ iio_map_array_unregister(iodev);
+ iio_device_unregister(iodev);
+ free_irq(adc->irq, adc);
+ if (adc->auto_conv0_enable)
+ free_irq(adc->irq_auto_0, adc);
+ if (adc->auto_conv1_enable)
+ free_irq(adc->irq_auto_1, adc);
+ iio_device_free(iodev);
+ return 0;
+}
+
+static void palmas_gpadc_shutdown(struct platform_device *pdev)
+{
+ struct iio_dev *iodev = dev_get_drvdata(&pdev->dev);
+ struct palmas_gpadc *adc = iio_priv(iodev);
+
+ mutex_lock(&adc->lock);
+ adc->is_shutdown = true;
+ if (adc->auto_conv0_enable || adc->auto_conv1_enable)
+ palmas_gpadc_auto_conv_reset(adc);
+ mutex_unlock(&adc->lock);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int palmas_gpadc_suspend(struct device *dev)
+{
+ struct iio_dev *iodev = dev_get_drvdata(dev);
+ struct palmas_gpadc *adc = iio_priv(iodev);
+ int wakeup = adc->auto_conv0_enable || adc->auto_conv1_enable;
+
+ if (!device_may_wakeup(dev) || !wakeup)
+ return 0;
+
+ if (adc->auto_conv0_enable)
+ enable_irq_wake(adc->irq_auto_0);
+
+ if (adc->auto_conv1_enable)
+ enable_irq_wake(adc->irq_auto_1);
+
+ return 0;
+}
+
+static int palmas_gpadc_resume(struct device *dev)
+{
+ struct iio_dev *iodev = dev_get_drvdata(dev);
+ struct palmas_gpadc *adc = iio_priv(iodev);
+ int wakeup = adc->auto_conv0_enable || adc->auto_conv1_enable;
+
+ if (!device_may_wakeup(dev) || !wakeup)
+ return 0;
+
+ if (adc->auto_conv0_enable)
+ disable_irq_wake(adc->irq_auto_0);
+
+ if (adc->auto_conv1_enable)
+ disable_irq_wake(adc->irq_auto_1);
+
+ return 0;
+};
+#endif
+
+static const struct dev_pm_ops palmas_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(palmas_gpadc_suspend,
+ palmas_gpadc_resume)
+};
+
+static struct of_device_id of_palmas_gpadc_match_tbl[] = {
+ { .compatible = "ti,palmas-gpadc", },
+ { /* end */ }
+};
+MODULE_DEVICE_TABLE(of, of_palmas_gpadc_match_tbl);
+
+static struct platform_driver palmas_gpadc_driver = {
+ .probe = palmas_gpadc_probe,
+ .remove = palmas_gpadc_remove,
+ .shutdown = palmas_gpadc_shutdown,
+ .driver = {
+ .name = MOD_NAME,
+ .owner = THIS_MODULE,
+ .pm = &palmas_pm_ops,
+ .of_match_table = of_palmas_gpadc_match_tbl,
+ },
+};
+
+static int __init palmas_gpadc_init(void)
+{
+ return platform_driver_register(&palmas_gpadc_driver);
+}
+module_init(palmas_gpadc_init);
+
+static void __exit palmas_gpadc_exit(void)
+{
+ platform_driver_unregister(&palmas_gpadc_driver);
+}
+module_exit(palmas_gpadc_exit);
+
+MODULE_DESCRIPTION("palmas GPADC driver");
+MODULE_AUTHOR("Pradeep Goudagunta<pgoudagunta@nvidia.com>");
+MODULE_ALIAS("platform:palmas-gpadc");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index ca8d6e66c899..3630c68dee51 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -3,6 +3,17 @@
#
menu "Light sensors"
+config SENSORS_CM3218
+ tristate "CM3218 Ambient light sensor"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ Say Y here to enable the CM3218 Ambient Light Sensor.
+ This driver will provide the measurements of ambient light intensity
+ in its own units.
+ Data from sensor is accessible via sysfs.
+
config SENSORS_ISL29018
tristate "ISL 29018 light and proximity sensor"
depends on I2C
@@ -16,15 +27,25 @@ config SENSORS_ISL29018
Data from sensor is accessible via sysfs.
config SENSORS_ISL29028
- tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
+ tristate "Intersil ISL29028/ISL29028 Concurrent Light and Proximity Sensor"
depends on I2C
select REGMAP_I2C
help
- Provides driver for the Intersil's ISL29028 device.
+ Provides driver for the Intersil's ISL29028/ISL29029 device.
This driver supports the sysfs interface to get the ALS, IR intensity,
Proximity value via iio. The ISL29028 provides the concurrent sensing
of ambient light and proximity.
+config SENSORS_JSA1127
+ tristate "JSA1127 Ambient light sensor"
+ depends on I2C
+ default n
+ help
+ Say Y here to enable the JSA1127 Ambient Light Sensor.
+ This driver provides measurements of ambient light intensity in
+ its own units.
+ Data from sensor is accessible via sysfs.
+
config TSL2583
tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
depends on I2C
@@ -40,4 +61,94 @@ config TSL2x7x
tmd2672, tsl2772, tmd2772 devices.
Provides iio_events and direct access via sysfs.
+config SENSORS_LTR558
+ tristate "LTR558 Ambient light and proximity sensor"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for ambient light sensing and
+ proximity ir sensing from Lite On Technology LTR558.
+
+config SENSORS_MAX44005
+ tristate "MAX44005 ALS, RGB, temperature, IR, proximity Sensor"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for ambient light, RGB, IR,
+ Proximity and temperature sensing from MAX44005.
+
+config SENSORS_STM8T143
+ tristate "STM8T143 proximity sensor"
+ default n
+ help
+ If you say yes here you get support for ST Microelectronics
+ STM8T143 sensor driver.
+ The STM8T143 sensor driver provices suport for proximity sensing.
+ Data from sensor is accessible via sysfs.
+
+config SENSORS_TCS3772
+ tristate "TCS3772 Ambient light, RGB and proximity Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ default n
+ help
+ If you say yes here you get support for ambient light, RGB and
+ Proximity sensing from TCS3772.
+
+config SENSORS_CM3217
+ tristate "CM3217 Ambient light sensor"
+ depends on I2C
+ select LS_OF
+ select LS_SYSFS
+ default n
+ help
+ Say Y here to enable the CM3217 Ambient Light Sensor.
+ This driver will provide the measurements of ambient light intensity
+ in its own units.
+ Data from sensor is accessible via sysfs.
+
+config LS_OF
+ tristate "Device Tree parsing for Light sensor specification details"
+ depends on OF
+ default n
+ help
+ Say Y for helpers to retrieve the sensor specification details like
+ vendor, power consumed, max range, resolution and integration.
+ This information is common to most Ambient light sensors as well as
+ proximity sensors. Hence, have common callbacks to retrive this
+ information from the respective device tree nodes.
+
+config LS_SYSFS
+ tristate "IIO registration for sensor meta data"
+ depends on LS_OF
+ default n
+ help
+ Say Y for to share sensor specification details to user space.
+ The information is retrieved from device platform data. If you are
+ using helpers from this file, make sure device's platform data
+ contains all the required information.
+
+config SENSORS_IQS253
+ tristate "IQS253 capacitive sensor"
+ depends on I2C
+ select LS_OF
+ select LS_SYFS
+ default n
+ help
+ Say Y to enable proximity detection using IQS253 capacitive sensor.
+ This driver uses I2C to program the device.
+ This communicates proximity events to SAR sensor through gpio.
+
+config SENSORS_IQS253_AS_PROXIMITY
+ tristate "Use IQS253 capacitive sensor for proximity detection"
+ depends on I2C
+ select LS_OF
+ select LS_SYFS
+ default n
+ help
+ Say Y to enable proximity detection using IQS253 capacitive sensor.
+ This driver exports sensor's specifications in DT node to user space.
+ Hence, it needs LS_OF and LS_SYSFS support.
+ This driver uses I2C to program the device.
+
endmenu
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
index 9960fdf7c15b..7afa16109514 100644
--- a/drivers/staging/iio/light/Makefile
+++ b/drivers/staging/iio/light/Makefile
@@ -1,8 +1,23 @@
#
# Makefile for industrial I/O Light sensors
#
+GCOV_PROFILE := y
+ccflags-y := -Werror
+
+obj-$(CONFIG_SENSORS_CM3218) += cm3218.o
obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
obj-$(CONFIG_TSL2583) += tsl2583.o
obj-$(CONFIG_TSL2x7x) += tsl2x7x_core.o
+obj-$(CONFIG_SENSORS_JSA1127) += jsa1127.o
+obj-$(CONFIG_SENSORS_LTR558) += ltr558als.o
+obj-$(CONFIG_SENSORS_MAX44005) += max44005.o
+obj-$(CONFIG_SENSORS_STM8T143) += stm8t143.o
+obj-$(CONFIG_SENSORS_TCS3772) += tcs3772.o
+obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
+obj-$(CONFIG_TSL2583) += tsl2583.o
+obj-$(CONFIG_SENSORS_CM3217) += cm3217.o
+obj-$(CONFIG_LS_SYSFS) += ls_sysfs.o
+obj-$(CONFIG_LS_OF) += ls_dt.o
+obj-$(CONFIG_SENSORS_IQS253) += iqs253.o
diff --git a/drivers/staging/iio/light/cm3217.c b/drivers/staging/iio/light/cm3217.c
new file mode 100644
index 000000000000..a955bb255d7e
--- /dev/null
+++ b/drivers/staging/iio/light/cm3217.c
@@ -0,0 +1,542 @@
+/* Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/light/ls_sysfs.h>
+#include <linux/iio/light/ls_dt.h>
+
+/* IT = Integration Time. The amount of time the photons hit the sensor.
+ * STEP = the value from HW which is the photon count during IT.
+ * LUX = STEP * (CM3217_RESOLUTION_STEP / IT) / CM3217_RESOLUTION_DIVIDER
+ * The above LUX reported as LUX * CM3217_INPUT_LUX_DIVISOR.
+ * The final value is done in user space to get a float value of
+ * LUX / CM3217_INPUT_LUX_DIVISOR.
+ */
+#define CM3217_NAME "cm3217"
+#define CM3217_I2C_ADDR_CMD1_WR (0x10)
+#define CM3217_I2C_ADDR_CMD2_WR (0x11)
+#define CM3217_I2C_ADDR_RD (0x10)
+#define CM3217_HW_CMD1_DFLT (0x22)
+#define CM3217_HW_CMD1_BIT_SD (0)
+#define CM3217_HW_CMD1_BIT_IT_T (2)
+#define CM3217_HW_CMD2_BIT_FD_IT (5)
+#define CM3217_HW_DELAY (10)
+#define CM3217_POWER_UA (90)
+#define CM3217_RESOLUTION (1)
+#define CM3217_RESOLUTION_STEP (6000000L)
+#define CM3217_RESOLUTION_DIVIDER (10000L)
+#define CM3217_POLL_DELAY_MS_DFLT (1600)
+#define CM3217_POLL_DELAY_MS_MIN (33 + CM3217_HW_DELAY)
+#define CM3217_INPUT_LUX_DIVISOR (10)
+#define CM3217_INPUT_LUX_MIN (0)
+#define CM3217_INPUT_LUX_MAX (119156)
+#define CM3217_INPUT_LUX_FUZZ (0)
+#define CM3217_INPUT_LUX_FLAT (0)
+#define CM3217_MAX_REGULATORS (1)
+
+enum als_state {
+ CHIP_POWER_OFF,
+ CHIP_POWER_ON_ALS_OFF,
+ CHIP_POWER_ON_ALS_ON,
+};
+
+enum i2c_state {
+ I2C_XFER_NOT_ENABLED,
+ I2c_XFER_OK_REG_NOT_SYNC,
+ I2c_XFER_OK_REG_SYNC,
+};
+
+struct cm3217_inf {
+ struct i2c_client *i2c;
+ struct workqueue_struct *wq;
+ struct delayed_work dw;
+ struct regulator_bulk_data vreg[CM3217_MAX_REGULATORS];
+ int raw_illuminance_val;
+ int als_state;
+};
+
+static int cm3217_i2c_rd(struct cm3217_inf *inf)
+{
+ struct i2c_msg msg[2];
+ __u8 buf[2];
+
+ msg[0].addr = CM3217_I2C_ADDR_RD + 1;
+ msg[0].flags = I2C_M_RD;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+ msg[1].addr = CM3217_I2C_ADDR_RD;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ if (i2c_transfer(inf->i2c->adapter, msg, 2) != 2)
+ return -EIO;
+
+ inf->raw_illuminance_val = (__u16)((buf[1] << 8) | buf[0]);
+ return 0;
+}
+
+static int cm3217_i2c_wr(struct cm3217_inf *inf, __u8 cmd1, __u8 cmd2)
+{
+ struct i2c_msg msg[2];
+ __u8 buf[2];
+
+ buf[0] = cmd1;
+ buf[1] = cmd2;
+ msg[0].addr = CM3217_I2C_ADDR_CMD1_WR;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+ msg[1].addr = CM3217_I2C_ADDR_CMD2_WR;
+ msg[1].flags = 0;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ if (i2c_transfer(inf->i2c->adapter, msg, 2) != 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int cm3217_cmd_wr(struct cm3217_inf *inf, __u8 it_t, __u8 fd_it)
+{
+ __u8 cmd1;
+ __u8 cmd2;
+ int err;
+
+ cmd1 = (CM3217_HW_CMD1_DFLT);
+ if (!inf->als_state)
+ cmd1 |= (1 << CM3217_HW_CMD1_BIT_SD);
+ cmd1 |= (it_t << CM3217_HW_CMD1_BIT_IT_T);
+ cmd2 = fd_it << CM3217_HW_CMD2_BIT_FD_IT;
+ err = cm3217_i2c_wr(inf, cmd1, cmd2);
+ return err;
+}
+
+static int cm3217_vreg_dis(struct cm3217_inf *inf, unsigned int i)
+{
+ int err = 0;
+
+ if (inf->vreg[i].ret && (inf->vreg[i].consumer != NULL)) {
+ err = regulator_disable(inf->vreg[i].consumer);
+ if (!err)
+ dev_dbg(&inf->i2c->dev, "%s %s\n",
+ __func__, inf->vreg[i].supply);
+ else
+ dev_err(&inf->i2c->dev, "%s %s ERR\n",
+ __func__, inf->vreg[i].supply);
+ }
+ inf->vreg[i].ret = 0;
+ return err;
+}
+
+static int cm3217_vreg_dis_all(struct cm3217_inf *inf)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = CM3217_MAX_REGULATORS; i > 0; i--)
+ err |= cm3217_vreg_dis(inf, (i - 1));
+ return err;
+}
+
+static int cm3217_vreg_en(struct cm3217_inf *inf, unsigned int i)
+{
+ int err = 0;
+
+ if (!inf->vreg[i].ret && (inf->vreg[i].consumer != NULL)) {
+ err = regulator_enable(inf->vreg[i].consumer);
+ if (!err) {
+ inf->vreg[i].ret = 1;
+ dev_dbg(&inf->i2c->dev, "%s %s\n",
+ __func__, inf->vreg[i].supply);
+ err = 1; /* flag regulator state change */
+ } else {
+ dev_err(&inf->i2c->dev, "%s %s ERR\n",
+ __func__, inf->vreg[i].supply);
+ }
+ }
+ return err;
+}
+
+static int cm3217_vreg_en_all(struct cm3217_inf *inf)
+{
+ unsigned i;
+ int err = 0;
+
+ for (i = 0; i < CM3217_MAX_REGULATORS; i++)
+ err |= cm3217_vreg_en(inf, i);
+ return err;
+}
+
+static void cm3217_vreg_exit(struct cm3217_inf *inf)
+{
+ int i;
+
+ for (i = 0; i < CM3217_MAX_REGULATORS; i++) {
+ regulator_put(inf->vreg[i].consumer);
+ inf->vreg[i].consumer = NULL;
+ }
+}
+
+static int cm3217_vreg_init(struct cm3217_inf *inf)
+{
+ unsigned int i;
+ int err = 0;
+
+ /*
+ * regulator names in order of powering on.
+ * ARRAY_SIZE(cm3217_vregs) must be < CM3217_MAX_REGULATORS
+ */
+ char *cm3217_vregs[] = {
+ "vdd",
+ };
+
+ for (i = 0; i < ARRAY_SIZE(cm3217_vregs); i++) {
+ inf->vreg[i].supply = cm3217_vregs[i];
+ inf->vreg[i].ret = 0;
+ inf->vreg[i].consumer = regulator_get(&inf->i2c->dev,
+ inf->vreg[i].supply);
+ if (IS_ERR(inf->vreg[i].consumer)) {
+ err = PTR_ERR(inf->vreg[i].consumer);
+ dev_err(&inf->i2c->dev, "%s err %d for %s\n",
+ __func__, err, inf->vreg[i].supply);
+ inf->vreg[i].consumer = NULL;
+ }
+ }
+ for (; i < CM3217_MAX_REGULATORS; i++)
+ inf->vreg[i].consumer = NULL;
+ return err;
+}
+
+static ssize_t cm3217_chan_regulator_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct cm3217_inf *inf = iio_priv(indio_dev);
+ unsigned int enable = 0;
+
+ if (inf->als_state != CHIP_POWER_OFF)
+ enable = 1;
+ return sprintf(buf, "%d\n", inf->als_state);
+}
+
+static ssize_t cm3217_chan_regulator_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 enable;
+ int ret = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct cm3217_inf *inf = iio_priv(indio_dev);
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (enable == (inf->als_state != CHIP_POWER_OFF))
+ return 1;
+
+ if (!inf->vreg)
+ goto success;
+
+ if (enable)
+ ret = cm3217_vreg_en_all(inf);
+ else
+ ret = cm3217_vreg_dis_all(inf);
+
+ if (ret != enable) {
+ dev_err(&inf->i2c->dev,
+ "func:%s line:%d err:%d fails\n",
+ __func__, __LINE__, ret);
+ goto fail;
+ }
+
+success:
+ inf->als_state = enable;
+fail:
+ return ret ? ret : 1;
+}
+
+static void cm3217_work(struct work_struct *ws)
+{
+ struct cm3217_inf *inf;
+ struct iio_dev *indio_dev;
+
+ inf = container_of(ws, struct cm3217_inf, dw.work);
+ indio_dev = iio_priv_to_dev(inf);
+ mutex_lock(&indio_dev->mlock);
+ cm3217_i2c_rd(inf);
+ mutex_unlock(&indio_dev->mlock);
+}
+
+static ssize_t cm3217_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct cm3217_inf *inf = iio_priv(indio_dev);
+ unsigned int enable = 0;
+
+ if (inf->als_state == CHIP_POWER_ON_ALS_ON)
+ enable = 1;
+ return sprintf(buf, "%u\n", enable);
+}
+
+static ssize_t cm3217_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct cm3217_inf *inf = iio_priv(indio_dev);
+ u8 enable;
+ int err = 0;
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (enable == (inf->als_state - 1))
+ goto success;
+
+ mutex_lock(&indio_dev->mlock);
+ if (enable) {
+ err = cm3217_cmd_wr(inf, 0, 0);
+ queue_delayed_work(inf->wq, &inf->dw, CM3217_HW_DELAY);
+ } else {
+ cancel_delayed_work_sync(&inf->dw);
+ }
+ mutex_unlock(&indio_dev->mlock);
+ if (err)
+ return err;
+
+success:
+ inf->als_state = enable + 1;
+ return count;
+}
+
+static ssize_t cm3217_raw_illuminance_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct cm3217_inf *inf = iio_priv(indio_dev);
+
+ if (inf->als_state != CHIP_POWER_ON_ALS_ON)
+ return sprintf(buf, "-1\n");
+ queue_delayed_work(inf->wq, &inf->dw, 0);
+ return sprintf(buf, "%d\n", inf->raw_illuminance_val);
+}
+
+static IIO_DEVICE_ATTR(in_illuminance_regulator_enable,
+ S_IRUGO | S_IWUSR | S_IWOTH,
+ cm3217_chan_regulator_enable_show,
+ cm3217_chan_regulator_enable, 0);
+static IIO_DEVICE_ATTR(in_illuminance_enable,
+ S_IRUGO | S_IWUSR | S_IWOTH,
+ cm3217_enable_show, cm3217_enable_store, 0);
+static IIO_DEVICE_ATTR(in_illuminance_raw, S_IRUGO,
+ cm3217_raw_illuminance_val_show, NULL, 0);
+static IIO_CONST_ATTR(vendor, "Capella");
+/* FD_IT = 000b, IT_TIMES = 1/2T i.e., 00b nano secs */
+static IIO_CONST_ATTR(in_illuminance_integration_time, "480000");
+/* WDM = 0b, IT_TIMES = 1/2T i.e., 00b raw_illuminance_val */
+static IIO_CONST_ATTR(in_illuminance_max_range, "78643.2");
+/* WDM = 0b, IT_TIMES = 1/2T i.e., 00b mLux */
+static IIO_CONST_ATTR(in_illuminance_resolution, "307");
+static IIO_CONST_ATTR(in_illuminance_power_consumed, "1670"); /* milli Watt */
+
+static struct attribute *cm3217_attrs[] = {
+ &iio_dev_attr_in_illuminance_enable.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_regulator_enable.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_raw.dev_attr.attr,
+ &iio_const_attr_vendor.dev_attr.attr,
+ &iio_const_attr_in_illuminance_integration_time.dev_attr.attr,
+ &iio_const_attr_in_illuminance_max_range.dev_attr.attr,
+ &iio_const_attr_in_illuminance_resolution.dev_attr.attr,
+ &iio_const_attr_in_illuminance_power_consumed.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group cm3217_attr_group = {
+ .name = CM3217_NAME,
+ .attrs = cm3217_attrs
+};
+
+static const struct iio_info cm3217_iio_info = {
+ .attrs = &cm3217_attr_group,
+ .driver_module = THIS_MODULE
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int cm3217_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm3217_inf *inf = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (inf->als_state != CHIP_POWER_OFF)
+ ret = cm3217_vreg_dis_all(inf);
+
+ if (ret)
+ dev_err(&client->adapter->dev,
+ "%s err in reg enable\n", __func__);
+ return ret;
+}
+
+static int cm3217_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm3217_inf *inf = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (inf->als_state != CHIP_POWER_OFF)
+ ret = cm3217_vreg_en_all(inf);
+
+ if (ret)
+ dev_err(&client->adapter->dev,
+ "%s err in reg enable\n", __func__);
+ if (inf->als_state == CHIP_POWER_ON_ALS_ON)
+ ret = cm3217_cmd_wr(inf, 0, 0);
+ if (ret)
+ dev_err(&client->adapter->dev,
+ "%s err in cm3217 write\n", __func__);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(cm3217_pm_ops, cm3217_suspend, cm3217_resume);
+#define CM3217_PM_OPS (&cm3217_pm_ops)
+#else
+#define CM3217_PM_OPS NULL
+#endif
+
+static int cm3217_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm3217_inf *inf = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ destroy_workqueue(inf->wq);
+ cm3217_vreg_exit(inf);
+ iio_device_free(indio_dev);
+ dev_dbg(&client->adapter->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int cm3217_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cm3217_inf *inf;
+ struct iio_dev *indio_dev;
+ struct lightsensor_spec *ls_spec;
+ int err;
+
+ indio_dev = iio_device_alloc(sizeof(*inf));
+ if (indio_dev == NULL) {
+ dev_err(&client->dev, "%s iio_device_alloc err\n", __func__);
+ return -ENOMEM;
+ }
+
+ inf = iio_priv(indio_dev);
+
+ ls_spec = of_get_ls_spec(&client->dev);
+ if (!ls_spec) {
+ dev_warn(&client->dev,
+ "devname:%s func:%s line:%d invalid meta data, use default\n",
+ id->name, __func__, __LINE__);
+ } else {
+ fill_ls_attrs(ls_spec, cm3217_attrs);
+ }
+
+ inf->wq = create_singlethread_workqueue(CM3217_NAME);
+ if (!inf->wq) {
+ dev_err(&client->dev, "%s workqueue err\n", __func__);
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ inf->i2c = client;
+ err = cm3217_vreg_init(inf);
+ if (err) {
+ dev_info(&client->dev,
+ "%s regulator init failed, assume always on", __func__);
+ goto err_vreg_init;
+ }
+
+ INIT_DELAYED_WORK(&inf->dw, cm3217_work);
+ inf->als_state = 0;
+
+ i2c_set_clientdata(client, indio_dev);
+ indio_dev->info = &cm3217_iio_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(&client->dev, "%s iio_device_register err\n", __func__);
+ goto err_iio_register;
+ }
+
+ dev_info(&client->dev, "%s success\n", __func__);
+ return 0;
+
+err_iio_register:
+ cm3217_vreg_exit(inf);
+err_vreg_init:
+ destroy_workqueue(inf->wq);
+err_wq:
+ iio_device_free(indio_dev);
+ dev_err(&client->dev, "%s err=%d\n", __func__, err);
+ return err;
+}
+
+static const struct i2c_device_id cm3217_i2c_device_id[] = {
+ {"cm3217", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cm3217_i2c_device_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id cm3217_of_match[] = {
+ { .compatible = "capella,cm3217", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cm3217_of_match);
+#endif
+
+static struct i2c_driver cm3217_driver = {
+ .probe = cm3217_probe,
+ .remove = cm3217_remove,
+ .id_table = cm3217_i2c_device_id,
+ .driver = {
+ .name = "cm3217",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cm3217_of_match),
+ .pm = CM3217_PM_OPS,
+ },
+};
+module_i2c_driver(cm3217_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CM3217 driver");
+MODULE_AUTHOR("NVIDIA Corp");
diff --git a/drivers/staging/iio/light/cm3218.c b/drivers/staging/iio/light/cm3218.c
new file mode 100644
index 000000000000..3a0b906e8939
--- /dev/null
+++ b/drivers/staging/iio/light/cm3218.c
@@ -0,0 +1,732 @@
+/*
+ * A iio driver for the light sensors Cm3218 and Cm32181.
+ *
+ * IIO Light driver for monitoring ambient light intensity in lux and proximity
+ * ir.
+ *
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/notifier.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+
+#define CM3218_REG_CONFIGURE 0x00
+#define CM3218_REG_ALS_DATA 0x04
+#define CM3218_REG_MAX 0x04
+
+#define CONFIGURE_ALS_MASK BIT(0)
+#define CONFIGURE_ALS_EN 0x00
+
+#define CONFIGURE_SHDN_MASK CONFIGURE_ALS_MASK
+#define CONFIGURE_SHDN_EN 0x01
+
+#define I2C_MAX_TIMEOUT msecs_to_jiffies(20) /* 20 mSec */
+
+#define CM3218_CONFIGURE_DEFAULT_VAL 0x0400
+#define CM32181_CONFIGURE_DEFAULT_VAL 0x0000
+
+#define CM3218_ALS_PEAK_VAL 0xFFFF
+#define CM3218_ALS_RESOLUTION 0
+#define CM3218_ALS_RESOLUTION_MICRO 10000 /* 10 mLux/lsb */
+#define CM3218_ALS_INTEGRATION_TIME 700 /* mSec */
+#define CM3218_VENDOR "Capella"
+#define CM3218_NAME "cm3218"
+
+enum als_state {
+ CHIP_POWER_OFF,
+ CHIP_POWER_ON_ALS_OFF,
+ CHIP_POWER_ON_ALS_ON,
+};
+
+enum i2c_state {
+ I2C_XFER_NOT_ENABLED,
+ I2C_XFER_OK_REG_NOT_SYNC,
+ I2C_XFER_OK_REG_SYNC,
+};
+
+struct cm3218_chip {
+ struct i2c_client *client;
+ const struct i2c_device_id *id;
+ struct regulator_bulk_data *consumers;
+ struct workqueue_struct *wq;
+ struct delayed_work dw;
+ struct notifier_block regulator_nb;
+ int i2c_xfer_state;
+ struct regmap *regmap;
+
+ u8 als_state;
+ int shutdown_complete;
+};
+
+/* regulators used by the device
+ * vdd_1v8b is used by cm32181 to select i2c address = 0x48
+ * Since, this is always enabled this does not require
+ * explicit handling in driver
+ */
+static struct regulator_bulk_data cm3218_consumers[] = {
+ {
+ .supply = "vdd",
+ },
+};
+
+/* device's regmap configuration for i2c communication */
+/* non cacheable registers*/
+bool cm3218_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == CM3218_REG_ALS_DATA;
+}
+
+bool cm3218_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg == CM3218_REG_CONFIGURE;
+}
+
+bool cm3218_readable_reg(struct device *dev, unsigned int reg)
+{
+ return reg == CM3218_REG_ALS_DATA;
+}
+
+static struct reg_default cm3218_reg_defaults = {
+ .reg = CM3218_REG_CONFIGURE,
+ .def = CM3218_CONFIGURE_DEFAULT_VAL,
+};
+
+/* TODO * in linux-next we have to add
+ * val_format_endian to take care of endianness
+ * use regmap_access_table instead of volatile_reg call backs
+ */
+static const struct regmap_config cm3218_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .readable_reg = cm3218_readable_reg,
+ .writeable_reg = cm3218_writeable_reg,
+ .volatile_reg = cm3218_volatile_reg,
+ .max_register = CM3218_REG_MAX,
+ .reg_defaults = &cm3218_reg_defaults,
+ .num_reg_defaults = 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/* device's read/write functionality and a helper */
+static void change_endianness_16(int *val)
+{
+ u8 *buf = (u8 *)val;
+ u8 temp = buf[0];
+ buf[0] = buf[1];
+ buf[1] = temp;
+}
+
+/* cm3218 needs configure register to
+ * be set to this value on power-on
+ * to make regmap cache value in sync with hw
+ */
+static int cm3218_configure_sync(struct cm3218_chip *chip)
+{
+ int ret;
+ unsigned int val;
+
+ if (chip->i2c_xfer_state == I2C_XFER_OK_REG_SYNC)
+ return 0;
+
+ ret = regmap_read(chip->regmap, CM3218_REG_CONFIGURE, &val);
+ if (ret != 0)
+ return ret;
+
+ change_endianness_16(&val);
+ ret = i2c_smbus_write_word_data(chip->client,
+ CM3218_REG_CONFIGURE, val);
+ if (ret)
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d i2c_write fails\n",
+ chip->id->name, __func__, __LINE__);
+ else
+ chip->i2c_xfer_state = I2C_XFER_OK_REG_SYNC;
+
+ return ret;
+}
+
+static int _cm3218_register_read(struct cm3218_chip *chip, int reg, int *val)
+{
+ int ret;
+ struct iio_dev *indio_dev = iio_priv_to_dev(chip);
+
+ if (!chip->regmap)
+ return -ENODEV;
+
+ if (chip->i2c_xfer_state == I2C_XFER_NOT_ENABLED) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d device not ready for i2c xfer\n",
+ chip->id->name, __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ ret = regmap_read(chip->regmap, reg, val);
+ if (ret)
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d regmap_read fails\n",
+ chip->id->name, __func__, __LINE__);
+
+ change_endianness_16(val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int _cm3218_register_write(struct cm3218_chip *chip, int reg, int mask,
+ int val)
+{
+ int ret;
+ struct iio_dev *indio_dev = iio_priv_to_dev(chip);
+
+ if (!chip->regmap)
+ return -ENODEV;
+
+ if (chip->i2c_xfer_state == I2C_XFER_NOT_ENABLED) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d device not ready for i2c xfer\n",
+ chip->id->name, __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ change_endianness_16(&mask);
+ change_endianness_16(&val);
+
+ ret = cm3218_configure_sync(chip);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d cm3218_sync fails\n",
+ chip->id->name, __func__, __LINE__);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+
+ ret = regmap_update_bits(chip->regmap, reg, mask, val);
+ if (ret)
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d regmap_write fails\n",
+ chip->id->name, __func__, __LINE__);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+/* sync the device's registers with cache after power up during resume */
+static int _cm3218_register_sync(struct cm3218_chip *chip)
+{
+ int ret;
+ struct iio_dev *indio_dev = iio_priv_to_dev(chip);
+
+ if (!chip->regmap)
+ return -ENODEV;
+
+ if (chip->i2c_xfer_state == I2C_XFER_NOT_ENABLED) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d device not ready for i2c xfer\n",
+ chip->id->name, __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ /* regmap sync doesn't work for reg_defaults
+ * hence fall back to i2c write
+ */
+ ret = cm3218_configure_sync(chip);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d cm3218_sync fails\n",
+ chip->id->name, __func__, __LINE__);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+/* device's registration with iio to facilitate user operations */
+static ssize_t cm3218_chan_regulator_enable(struct iio_dev *indio_dev,
+ uintptr_t priv, struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ u8 enable;
+ int ret = 0;
+ struct cm3218_chip *chip = iio_priv(indio_dev);
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ if (enable == (chip->als_state != CHIP_POWER_OFF))
+ goto success;
+
+ if (!chip->consumers)
+ goto success;
+
+ if (enable)
+ ret = regulator_bulk_enable(1, chip->consumers);
+ else
+ ret = regulator_bulk_disable(1, chip->consumers);
+
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d _cm3218_register_read fails\n",
+ chip->id->name, __func__, __LINE__);
+ goto fail;
+ }
+
+success:
+ chip->als_state = enable;
+fail:
+ return ret ? ret : 1;
+}
+
+static ssize_t cm3218_chan_enable(struct iio_dev *indio_dev,
+ uintptr_t priv, struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ u8 enable;
+ int ret = 0;
+ struct cm3218_chip *chip = iio_priv(indio_dev);
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (chip->als_state == CHIP_POWER_OFF) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d please enable regulator first\n",
+ chip->id->name, __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ /* a small optimization
+ * chip->als_state = either CHIP_POWER_ON_ALS_OFF (val = 1)
+ * or CHIP_POWER_ON_ALS_ON (val = 2)
+ * if enable = 0 => request for
+ * CHIP_POWER_ON_ALS_ON -> CHIP_POWER_ON_ALS_OFF has come
+ * if enable = 1 => request for
+ * CHIP_POWER_ON_ALS_OFF -> CHIP_POWER_ON_ALS_ON has come
+ * if there is no state change, goto success
+ */
+ if (enable == (chip->als_state - 1))
+ goto success;
+
+ if (enable) {
+ ret = _cm3218_register_write(chip, CM3218_REG_CONFIGURE,
+ CONFIGURE_ALS_MASK,
+ CONFIGURE_ALS_EN);
+ if (ret)
+ return ret;
+ } else {
+ ret = _cm3218_register_write(chip, CM3218_REG_CONFIGURE,
+ CONFIGURE_ALS_MASK,
+ !CONFIGURE_ALS_EN);
+ if (ret)
+ return ret;
+ }
+
+success:
+ /* a small optimization*/
+ chip->als_state = enable + 1;
+ return ret ? ret : 1;
+}
+
+/* chan_regulator_enable is used to enable regulators used by
+ * particular channel.
+ * chan_enable actually configures various registers to activate
+ * a particular channel.
+ */
+static const struct iio_chan_spec_ext_info cm3218_ext_info[] = {
+ {
+ .name = "regulator_enable",
+ .write = cm3218_chan_regulator_enable,
+ },
+ {
+ .name = "enable",
+ .write = cm3218_chan_enable,
+ },
+ {
+ },
+};
+
+static const struct iio_chan_spec cm3218_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_PEAK) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .ext_info = cm3218_ext_info,
+ },
+};
+
+static int cm3218_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct cm3218_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+ int value;
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *val = CM3218_ALS_RESOLUTION;
+ *val2 = CM3218_ALS_RESOLUTION_MICRO; /* 10 mlux/lsb */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_PEAK:
+ *val = CM3218_ALS_PEAK_VAL;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = CM3218_ALS_INTEGRATION_TIME;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_RAW:
+ if (chip->als_state != CHIP_POWER_ON_ALS_ON)
+ return -EINVAL;
+ ret = _cm3218_register_read(chip, CM3218_REG_ALS_DATA, &value);
+ if (ret)
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d _cm3218_register_read fails\n",
+ chip->id->name, __func__, __LINE__);
+ if (!ret) {
+ *val = value;
+ ret = IIO_VAL_INT;
+ }
+ }
+ return ret;
+}
+
+IIO_CONST_ATTR(vendor, CM3218_VENDOR);
+/*
+ * FIX ME!!! pass the following through Device Tree instead of here.
+ * The following properties can vary depending on the board being used
+ * These values are specific to T124 Ardbeg.
+ */
+static IIO_CONST_ATTR(in_illuminance_integration_time, "600000"); /* 600 ms */
+static IIO_CONST_ATTR(in_illuminance_max_range, "27525.1"); /* lux */
+static IIO_CONST_ATTR(in_illuminance_resolution, "102"); /* mLux/step */
+static IIO_CONST_ATTR(in_illuminance_power_consumed, "1680"); /* milli Watt */
+
+static struct attribute *cm3218_attributes[] = {
+ &iio_const_attr_vendor.dev_attr.attr,
+ &iio_const_attr_in_illuminance_integration_time.dev_attr.attr,
+ &iio_const_attr_in_illuminance_max_range.dev_attr.attr,
+ &iio_const_attr_in_illuminance_resolution.dev_attr.attr,
+ &iio_const_attr_in_illuminance_power_consumed.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cm3218_attr_group = {
+ .attrs = cm3218_attributes,
+};
+
+/* read_raw is used to report a channel's data to user
+ * in non SI units
+ */
+static const struct iio_info cm3218_iio_info = {
+ .attrs = &cm3218_attr_group,
+ .driver_module = THIS_MODULE,
+ .read_raw = cm3218_read_raw,
+};
+
+/* chip's power management helpers */
+static int cm3218_activate_standby_mode(struct cm3218_chip *chip)
+{
+ return _cm3218_register_write(chip, CM3218_REG_CONFIGURE,
+ CONFIGURE_SHDN_MASK,
+ CONFIGURE_SHDN_EN);
+}
+
+static void cm3218_work(struct work_struct *ws)
+{
+ struct cm3218_chip *chip;
+ struct iio_dev *indio_dev;
+
+ chip = container_of(ws, struct cm3218_chip, dw.work);
+ if (!chip->consumers)
+ return;
+
+ indio_dev = iio_priv_to_dev(chip);
+ mutex_lock(&indio_dev->mlock);
+ if (regulator_is_enabled(chip->consumers[0].consumer) &&
+ (chip->als_state == CHIP_POWER_OFF) &&
+ (chip->i2c_xfer_state != I2C_XFER_OK_REG_SYNC)) {
+ if (chip->i2c_xfer_state == I2C_XFER_NOT_ENABLED)
+ chip->i2c_xfer_state = I2C_XFER_OK_REG_NOT_SYNC;
+ cm3218_activate_standby_mode(chip);
+ } else if (!regulator_is_enabled(chip->consumers[0].consumer)) {
+ chip->i2c_xfer_state = I2C_XFER_NOT_ENABLED;
+ }
+ mutex_unlock(&indio_dev->mlock);
+}
+
+/* this detects the regulator enable/disable event and puts
+ * the device to low power state if this device does not use the regulator */
+static int cm3218_power_manager(struct notifier_block *regulator_nb,
+ unsigned long event, void *v)
+{
+ struct cm3218_chip *chip;
+
+ chip = container_of(regulator_nb, struct cm3218_chip, regulator_nb);
+ queue_delayed_work(chip->wq, &chip->dw, 1);
+ return NOTIFY_OK;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cm3218_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm3218_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (!chip->consumers)
+ return 0;
+
+ /* assumes all other devices stop using this regulator */
+ if (chip->als_state != CHIP_POWER_OFF)
+ ret = regulator_bulk_disable(1, chip->consumers);
+
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d regulator_bulk_disable fails",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+ }
+
+ if (chip->i2c_xfer_state != I2C_XFER_NOT_ENABLED)
+ ret = cm3218_activate_standby_mode(chip);
+ if (ret)
+ dev_err(&chip->client->dev,
+ "idname: %s func:%s line:%d activate_standby fails\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+}
+
+static int cm3218_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm3218_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (chip->als_state == CHIP_POWER_OFF)
+ return 0;
+
+ ret = regulator_bulk_enable(1, cm3218_consumers);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d regulator_bulk_enable fails\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ if (chip->als_state == CHIP_POWER_ON_ALS_ON)
+ ret = _cm3218_register_sync(chip);
+ if (ret)
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d _cm3218_register_sync fails\n",
+ chip->id->name, __func__, __LINE__);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(cm3218_pm_ops, cm3218_suspend, cm3218_resume);
+#define CM3218_PM_OPS (&cm3218_pm_ops)
+#else
+#define CM3218_PM_OPS NULL
+#endif
+
+/* device's i2c registration */
+static int cm3218_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct cm3218_chip *chip;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+
+ indio_dev = iio_device_alloc(sizeof(*chip));
+ if (indio_dev == NULL) {
+ dev_err(&client->dev,
+ "idname:%s func:%s line:%d iio_allocate_device fails\n",
+ id->name, __func__, __LINE__);
+ return -ENOMEM;
+ }
+ chip = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ chip->client = client;
+
+ indio_dev->info = &cm3218_iio_info;
+ indio_dev->channels = cm3218_channels;
+ indio_dev->num_channels = 1;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev,
+ "idname:%s func:%s line:%d iio_device_register fails\n",
+ id->name, __func__, __LINE__);
+ goto free_iio_dev;
+ }
+
+ cm3218_reg_defaults.def = id->driver_data ?
+ CM32181_CONFIGURE_DEFAULT_VAL :
+ CM3218_CONFIGURE_DEFAULT_VAL;
+ regmap = devm_regmap_init_i2c(client, &cm3218_regmap_config);
+ if (IS_ERR_OR_NULL(regmap)) {
+ dev_err(&client->dev,
+ "idname:%s func:%s line:%d devm_regmap_init_i2c fails\n",
+ id->name, __func__, __LINE__);
+ ret = -ENOMEM;
+ goto unregister_iio_dev;
+ }
+ chip->regmap = regmap;
+
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(cm3218_consumers),
+ cm3218_consumers);
+ if (ret) {
+ dev_info(&client->dev,
+ "idname:%s func:%s line:%d regulator not found.\n"
+ "Assuming regulator is not needed\n",
+ id->name, __func__, __LINE__);
+ goto finish;
+ } else {
+ chip->consumers = cm3218_consumers;
+ }
+
+ chip->wq = create_singlethread_workqueue(id->name);
+ if (!chip->wq) {
+ dev_err(&client->dev, "%s workqueue err\n", __func__);
+ goto err_wq;
+ }
+
+ INIT_DELAYED_WORK(&chip->dw, cm3218_work);
+
+ chip->regulator_nb.notifier_call = cm3218_power_manager;
+ ret = regulator_register_notifier(chip->consumers[0].consumer,
+ &chip->regulator_nb);
+ if (ret) {
+ dev_err(&client->dev,
+ "idname:%s func:%s line:%d regulator_register_notifier fails\n",
+ id->name, __func__, __LINE__);
+ goto unregister_iio_dev;
+ }
+
+ if (regulator_is_enabled(chip->consumers[0].consumer)) {
+ chip->i2c_xfer_state = I2C_XFER_OK_REG_NOT_SYNC;
+ ret = cm3218_activate_standby_mode(chip);
+ if (ret) {
+ dev_err(&client->dev,
+ "idname:%s func:%s line:%d regulator_register_notifier fails\n",
+ id->name, __func__, __LINE__);
+ goto unregister_regulator_notifier;
+ }
+ }
+
+finish:
+ chip->als_state = I2C_XFER_NOT_ENABLED;
+ chip->id = id;
+ dev_info(&client->dev, "idname:%s func:%s line:%d probe success\n",
+ id->name, __func__, __LINE__);
+ return 0;
+
+
+unregister_regulator_notifier:
+ regulator_unregister_notifier(chip->consumers[0].consumer,
+ &chip->regulator_nb);
+err_wq:
+ destroy_workqueue(chip->wq);
+unregister_iio_dev:
+ iio_device_unregister(indio_dev);
+free_iio_dev:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static void cm3218_shutdown(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm3218_chip *chip = iio_priv(indio_dev);
+ mutex_lock(&indio_dev->mlock);
+ chip->shutdown_complete = 1;
+ mutex_unlock(&indio_dev->mlock);
+}
+
+static int cm3218_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct cm3218_chip *chip = iio_priv(indio_dev);
+
+ destroy_workqueue(chip->wq);
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+ return 0;
+}
+
+static const struct i2c_device_id cm3218_id[] = {
+ {"cm3218", 0},
+ {"cm32181", 1},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cm3218_id);
+
+static const struct of_device_id cm3218_of_match[] = {
+ { .compatible = "capella,cm3218", },
+ { .compatible = "capella,cm32181", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cm3218_of_match);
+
+static struct i2c_driver cm3218_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = CM3218_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cm3218_of_match),
+ .pm = CM3218_PM_OPS,
+ },
+ .id_table = cm3218_id,
+ .probe = cm3218_probe,
+ .remove = cm3218_remove,
+ .shutdown = cm3218_shutdown,
+};
+module_i2c_driver(cm3218_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CM3218 Driver");
+MODULE_AUTHOR("Sri Krishna chowdary <schowdary@nvidia.com>");
diff --git a/drivers/staging/iio/light/iqs253.c b/drivers/staging/iio/light/iqs253.c
new file mode 100644
index 000000000000..ea7480105a34
--- /dev/null
+++ b/drivers/staging/iio/light/iqs253.c
@@ -0,0 +1,856 @@
+/*
+ * A iio driver for the capacitive sensor IQS253.
+ *
+ * IIO Light driver for monitoring proximity.
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/irqchip/tegra.h>
+#include <linux/input.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/light/ls_sysfs.h>
+#include <linux/iio/light/ls_dt.h>
+
+/* registers */
+#define SYSFLAGS 0x10
+#define PROX_STATUS 0x31
+#define TOUCH_STATUS 0x35
+#define TARGET 0xC4
+#define COMP0 0xC5
+#define CH0_ATI_BASE 0xC8
+#define CH1_ATI_BASE 0xC9
+#define CH2_ATI_BASE 0xCA
+#define CH0_PTH 0xCB
+#define CH1_PTH 0xCC
+#define CH2_PTH 0xCD
+#define PROX_SETTINGS0 0xD1
+#define PROX_SETTINGS1 0xD2
+#define PROX_SETTINGS2 0xD3
+#define PROX_SETTINGS3 0xD4
+#define ACTIVE_CHAN 0xD5
+#define LOW_POWER 0xD6
+#define DYCAL_CHANS 0xD8
+#define EVENT_MODE_MASK 0xD9
+#define DEFAULT_COMMS_POINTER 0xDD
+
+#define IQS253_PROD_ID 41
+
+#define PROX_CH0 0x01
+#define PROX_CH1 0x02
+#define PROX_CH2 0x04
+
+#define STYLUS_ONLY PROX_CH0
+#define PROXIMITY_ONLY (PROX_CH1 | PROX_CH2)
+
+#define CH0_COMPENSATION 0x55
+
+#define PROX_TH_CH0 0x04 /* proximity threshold = 0.5 cm */
+#define PROX_TH_CH1 0x01 /* proximity threshold = 2 cm */
+#define PROX_TH_CH2 0x01 /* proximity threshold = 2 cm */
+
+#define DISABLE_DYCAL 0x00
+
+#define CH0_ATI_TH 0x17
+#define CH1_ATI_TH 0x17
+#define CH2_ATI_TH 0x19
+
+#define EVENT_PROX_ONLY 0x01
+
+#define PROX_SETTING_NORMAL 0x25
+#define PROX_SETTING_STYLUS 0x26
+
+#define AUTO_ATI_DISABLE BIT(7)
+#define ATI_IN_PROGRESS 0x04
+
+/* initial values */
+
+#define EVENT_MODE_DISABLE_MASK 0x04
+#define LTA_DISABLE BIT(5)
+#define ACF_DISABLE BIT(4)
+/* LTA always halt */
+#define LTA_HALT_11 (BIT(0) | BIT(1))
+
+#define ATI_ENABLED_MASK 0x80
+
+#define NUM_REG 17
+
+struct iqs253_chip {
+ struct i2c_client *client;
+ const struct i2c_device_id *id;
+ u32 rdy_gpio;
+ u32 wake_gpio;
+ u32 sar_gpio;
+ u32 mode;
+ u32 value;
+ struct regulator *vddhi;
+ u32 using_regulator;
+#if defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+ struct lightsensor_spec *ls_spec;
+#endif
+ struct workqueue_struct *wq;
+ struct delayed_work dw;
+#if !defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+ struct workqueue_struct *sar_wq;
+ struct delayed_work sar_dw;
+#endif
+ struct input_dev *idev;
+ u32 stylus_inserted;
+};
+
+enum mode {
+ MODE_NONE = -1,
+ NORMAL_MODE,
+ STYLUS_MODE,
+ INIT_MODE,
+ FORCE_ATI_MODE,
+ POST_INIT_MODE,
+ NUM_MODE
+};
+
+struct reg_val_pair {
+ u8 reg;
+ u8 val;
+};
+
+struct reg_val_pair reg_val_map[NUM_MODE][NUM_REG] = {
+ {
+ { COMP0, CH0_COMPENSATION},
+ { CH0_ATI_BASE, CH0_ATI_TH},
+ { CH1_ATI_BASE, CH1_ATI_TH},
+ { CH2_ATI_BASE, CH2_ATI_TH},
+ { CH0_PTH, PROX_TH_CH0},
+ { CH1_PTH, PROX_TH_CH1},
+ { PROX_SETTINGS0, PROX_SETTING_NORMAL},
+ { ACTIVE_CHAN, PROXIMITY_ONLY | STYLUS_ONLY},
+ { DYCAL_CHANS, DISABLE_DYCAL},
+ { EVENT_MODE_MASK, EVENT_PROX_ONLY}
+ },
+ {
+ { COMP0, CH0_COMPENSATION},
+ { CH0_ATI_BASE, CH0_ATI_TH},
+ { CH1_ATI_BASE, CH1_ATI_TH},
+ { CH2_ATI_BASE, CH2_ATI_TH},
+ { CH2_PTH, PROX_TH_CH2},
+ { PROX_SETTINGS0, PROX_SETTING_STYLUS},
+ { ACTIVE_CHAN, STYLUS_ONLY},
+ { DYCAL_CHANS, DISABLE_DYCAL},
+ { EVENT_MODE_MASK, EVENT_PROX_ONLY}
+ },
+ { /* init settings */
+ { PROX_SETTINGS2, ACF_DISABLE | EVENT_MODE_DISABLE_MASK},
+ { ACTIVE_CHAN, PROXIMITY_ONLY},
+ { PROX_SETTINGS0, AUTO_ATI_DISABLE},
+ { CH0_PTH, 0x04},
+ { CH1_PTH, 0x04},
+ { CH2_PTH, 0x04},
+ { TARGET, 0x20},
+ },
+ { /* force on ATI */
+ { PROX_SETTINGS0, 0x50}, /* enable ATI and force auto ATI */
+ },
+ {
+ { PROX_SETTINGS0, AUTO_ATI_DISABLE}, /* turn off ATI*/
+ { PROX_SETTINGS2, ACF_DISABLE |
+ EVENT_MODE_DISABLE_MASK | LTA_HALT_11},
+ { DYCAL_CHANS, DISABLE_DYCAL},
+ /* turning on ATI is recommended but it has side effects */
+ },
+};
+
+static void iqs253_i2c_hand_shake(struct iqs253_chip *iqs253_chip)
+{
+ int retry_count = 10;
+ do {
+ gpio_direction_output(iqs253_chip->rdy_gpio, 0);
+ usleep_range(10 * 1000, 10 * 1000);
+ /* put to tristate */
+ gpio_direction_input(iqs253_chip->rdy_gpio);
+ } while (gpio_get_value(iqs253_chip->rdy_gpio) && retry_count--);
+}
+
+static int iqs253_i2c_read_byte(struct iqs253_chip *chip, int reg)
+{
+ int ret = 0, retry_count = 10;
+ do {
+ iqs253_i2c_hand_shake(chip);
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ } while (ret && retry_count--);
+ return ret;
+}
+
+static int iqs253_i2c_write_byte(struct iqs253_chip *chip, int reg, int val)
+{
+ int ret = 0, retry_count = 10;
+ do {
+ iqs253_i2c_hand_shake(chip);
+ ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ } while (ret && retry_count--);
+ return ret;
+}
+
+/* must call holding lock */
+static int iqs253_set(struct iqs253_chip *iqs253_chip, int mode)
+{
+ int ret = 0, i;
+ struct reg_val_pair *reg_val_pair_map;
+
+ if ((mode != NORMAL_MODE) && (mode != STYLUS_MODE))
+ return -EINVAL;
+
+ reg_val_pair_map = reg_val_map[INIT_MODE];
+
+ for (i = 0; i < NUM_REG; i++) {
+ if (!reg_val_pair_map[i].reg && !reg_val_pair_map[i].val)
+ continue;
+
+ ret = iqs253_i2c_write_byte(iqs253_chip,
+ reg_val_pair_map[i].reg,
+ reg_val_pair_map[i].val);
+ if (ret) {
+ dev_err(&iqs253_chip->client->dev,
+ "iqs253 write val:%x to reg:%x failed\n",
+ reg_val_pair_map[i].val,
+ reg_val_pair_map[i].reg);
+ return ret;
+ }
+ }
+
+ reg_val_pair_map = reg_val_map[FORCE_ATI_MODE];
+
+ for (i = 0; i < NUM_REG; i++) {
+ if (!reg_val_pair_map[i].reg && !reg_val_pair_map[i].val)
+ continue;
+
+ ret = iqs253_i2c_write_byte(iqs253_chip,
+ reg_val_pair_map[i].reg,
+ reg_val_pair_map[i].val);
+ if (ret) {
+ dev_err(&iqs253_chip->client->dev,
+ "iqs253 write val:%x to reg:%x failed\n",
+ reg_val_pair_map[i].val,
+ reg_val_pair_map[i].reg);
+ return ret;
+ }
+ }
+ /* wait for ATI to finish */
+ do {
+ usleep_range(10 * 1000, 10 * 1000);
+ ret = iqs253_i2c_read_byte(iqs253_chip, SYSFLAGS);
+ } while (ret & ATI_IN_PROGRESS);
+
+ reg_val_pair_map = reg_val_map[POST_INIT_MODE];
+
+ for (i = 0; i < NUM_REG; i++) {
+ if (!reg_val_pair_map[i].reg && !reg_val_pair_map[i].val)
+ continue;
+
+ ret = iqs253_i2c_write_byte(iqs253_chip,
+ reg_val_pair_map[i].reg,
+ reg_val_pair_map[i].val);
+ if (ret) {
+ dev_err(&iqs253_chip->client->dev,
+ "iqs253 write val:%x to reg:%x failed\n",
+ reg_val_pair_map[i].val,
+ reg_val_pair_map[i].reg);
+ return ret;
+ }
+ }
+ iqs253_chip->mode = mode;
+ return 0;
+}
+
+#if defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+/* device's registration with iio to facilitate user operations */
+static ssize_t iqs253_chan_regulator_enable(
+ struct iio_dev *indio_dev, uintptr_t private,
+ struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ int ret = 0;
+ u8 enable;
+ struct iqs253_chip *chip = iio_priv(indio_dev);
+
+ if (chip->mode == STYLUS_MODE)
+ return -EINVAL;
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (enable == chip->using_regulator)
+ goto success;
+
+ if (enable)
+ ret = regulator_enable(chip->vddhi);
+ else
+ ret = regulator_disable(chip->vddhi);
+
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d enable:%d regulator logic failed\n",
+ chip->id->name, __func__, __LINE__, enable);
+ goto fail;
+ }
+
+success:
+ chip->using_regulator = enable;
+ chip->mode = MODE_NONE;
+fail:
+ return ret ? ret : 1;
+}
+
+static ssize_t iqs253_chan_normal_mode_enable(
+ struct iio_dev *indio_dev, uintptr_t private,
+ struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ int ret = 0;
+ u8 enable;
+ struct iqs253_chip *chip = iio_priv(indio_dev);
+
+ if (chip->mode == STYLUS_MODE)
+ return -EINVAL;
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (!chip->using_regulator)
+ return -EINVAL;
+
+ if (enable)
+ ret = iqs253_set(chip, NORMAL_MODE);
+ else
+ chip->mode = MODE_NONE;
+
+ return ret ? ret : 1;
+}
+
+/*
+ * chan_regulator_enable is used to enable regulators used by
+ * particular channel.
+ * chan_enable actually configures various registers to activate
+ * a particular channel.
+ */
+static const struct iio_chan_spec_ext_info iqs253_ext_info[] = {
+ {
+ .name = "regulator_enable",
+ .write = iqs253_chan_regulator_enable,
+ },
+ {
+ .name = "enable",
+ .write = iqs253_chan_normal_mode_enable,
+ },
+ {
+ },
+};
+
+static const struct iio_chan_spec iqs253_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .ext_info = iqs253_ext_info,
+ },
+};
+
+static int iqs253_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct iqs253_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ if (chip->mode != NORMAL_MODE)
+ return -EINVAL;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ ret = iqs253_i2c_read_byte(chip, PROX_STATUS);
+ chip->value = -1;
+ if (ret >= 0) {
+ if ((ret >= 0) && (chip->mode == NORMAL_MODE)) {
+ ret = ret & PROXIMITY_ONLY;
+ /*
+ * if both channel detect proximity => distance = 0;
+ * if one channel detects proximity => distance = 1;
+ * if no channel detects proximity => distance = 2;
+ */
+ chip->value = (ret == (PROX_CH1 | PROX_CH2)) ? 0 :
+ ret ? 1 : 2;
+ }
+ }
+ if (chip->value == -1)
+ return -EINVAL;
+
+ *val = chip->value; /* cm */
+
+ /* provide input to SAR */
+ if (chip->value/2)
+ gpio_direction_output(chip->sar_gpio, 0);
+ else
+ gpio_direction_output(chip->sar_gpio, 1);
+
+ return IIO_VAL_INT;
+}
+
+static IIO_CONST_ATTR(vendor, "Azoteq");
+static IIO_CONST_ATTR(in_proximity_integration_time,
+ "16000000"); /* 16 msec */
+static IIO_CONST_ATTR(in_proximity_max_range, "2"); /* cm */
+static IIO_CONST_ATTR(in_proximity_power_consumed, "1.67"); /* mA */
+
+static struct attribute *iqs253_attrs[] = {
+ &iio_const_attr_vendor.dev_attr.attr,
+ &iio_const_attr_in_proximity_integration_time.dev_attr.attr,
+ &iio_const_attr_in_proximity_max_range.dev_attr.attr,
+ &iio_const_attr_in_proximity_power_consumed.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group iqs253_attr_group = {
+ .name = "iqs253",
+ .attrs = iqs253_attrs
+};
+
+static struct iio_info iqs253_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &iqs253_read_raw,
+ .attrs = &iqs253_attr_group,
+};
+
+#else
+static void iqs253_sar_proximity_detect_work(struct work_struct *ws)
+{
+ int ret;
+ struct iqs253_chip *chip;
+
+ chip = container_of(ws, struct iqs253_chip, sar_dw.work);
+
+ if (!chip->using_regulator) {
+ ret = regulator_enable(chip->vddhi);
+ if (ret)
+ goto finish;
+ chip->using_regulator = true;
+ }
+
+ if (chip->mode != NORMAL_MODE) {
+ ret = iqs253_set(chip, NORMAL_MODE);
+ if (ret)
+ goto finish;
+ }
+
+ ret = iqs253_i2c_read_byte(chip, PROX_STATUS);
+ chip->value = -1;
+ if (ret >= 0) {
+ if ((ret >= 0) && (chip->mode == NORMAL_MODE)) {
+ ret = ret & PROXIMITY_ONLY;
+ /*
+ * if both channel detect proximity => distance = 0;
+ * if one channel detects proximity => distance = 1;
+ * if no channel detects proximity => distance = 2;
+ */
+ chip->value = (ret == (PROX_CH1 | PROX_CH2)) ? 0 :
+ ret ? 1 : 2;
+ }
+ }
+ if (chip->value == -1)
+ goto finish;
+ /* provide input to SAR */
+ if (chip->value/2)
+ gpio_direction_output(chip->sar_gpio, 0);
+ else
+ gpio_direction_output(chip->sar_gpio, 1);
+
+ ret = regulator_disable(chip->vddhi);
+ if (ret)
+ goto finish;
+ chip->using_regulator = false;
+
+finish:
+ queue_delayed_work(chip->sar_wq, &chip->sar_dw, msecs_to_jiffies(1000));
+}
+
+#endif /* CONFIG_SENSORS_IQS253_AS_PROXIMITY */
+
+#ifdef CONFIG_PM_SLEEP
+static int iqs253_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iqs253_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (!chip->using_regulator)
+ ret = regulator_enable(chip->vddhi);
+
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d regulator enable fails\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+ }
+
+ ret = iqs253_set(chip, STYLUS_MODE);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d can not enable stylus mode\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+ }
+ return tegra_pm_irq_set_wake(tegra_gpio_to_wake(chip->wake_gpio), 1);
+}
+
+static int iqs253_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iqs253_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (chip->using_regulator) {
+ ret = iqs253_set(chip, NORMAL_MODE);
+ } else {
+ chip->mode = MODE_NONE;
+ ret = regulator_disable(chip->vddhi);
+ }
+
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d regulator enable fails\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(iqs253_pm_ops, iqs253_suspend, iqs253_resume);
+#define IQS253_PM_OPS (&iqs253_pm_ops)
+#else
+#define IQS253_PM_OPS NULL
+#endif
+
+static void iqs253_stylus_detect_work(struct work_struct *ws)
+{
+ int ret;
+ struct iqs253_chip *chip;
+
+ chip = container_of(ws, struct iqs253_chip, dw.work);
+
+ if (!chip->using_regulator) {
+ ret = regulator_enable(chip->vddhi);
+ if (ret)
+ goto finish;
+ chip->using_regulator = true;
+ }
+
+ if (chip->mode != NORMAL_MODE) {
+ ret = iqs253_set(chip, NORMAL_MODE);
+ if (ret)
+ goto finish;
+
+ ret = iqs253_i2c_read_byte(chip, PROX_STATUS);
+ chip->stylus_inserted = (ret & STYLUS_ONLY);
+ input_report_switch(chip->idev, SW_TABLET_MODE,
+ !chip->stylus_inserted);
+ input_sync(chip->idev);
+ }
+
+ ret = iqs253_i2c_read_byte(chip, PROX_STATUS);
+ chip->value = -1;
+ if (ret >= 0) {
+ ret &= STYLUS_ONLY;
+ if (ret && !chip->stylus_inserted) {
+ chip->stylus_inserted = true;
+ input_report_switch(chip->idev, SW_TABLET_MODE, false);
+ input_sync(chip->idev);
+ } else if (!ret && chip->stylus_inserted) {
+ chip->stylus_inserted = false;
+ input_report_switch(chip->idev, SW_TABLET_MODE, true);
+ input_sync(chip->idev);
+ }
+ }
+
+finish:
+ queue_delayed_work(chip->wq, &chip->dw, msecs_to_jiffies(2000));
+}
+
+static struct input_dev *iqs253_stylus_input_init(struct iqs253_chip *chip)
+{
+ int ret;
+ struct input_dev *idev = input_allocate_device();
+ if (!idev)
+ return NULL;
+
+ idev->name = "stylus_detect";
+ set_bit(EV_SW, idev->evbit);
+ input_set_capability(idev, EV_SW, SW_TABLET_MODE);
+ ret = input_register_device(idev);
+ if (ret) {
+ input_free_device(idev);
+ return ERR_PTR(ret);
+ }
+
+ chip->wq = create_freezable_workqueue("iqs253");
+ if (!chip->wq) {
+ dev_err(&chip->client->dev, "unable to create work queue\n");
+ input_unregister_device(idev);
+ input_free_device(idev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_DELAYED_WORK(&chip->dw, iqs253_stylus_detect_work);
+
+ queue_delayed_work(chip->wq, &chip->dw, 0);
+
+ return idev;
+}
+
+static int iqs253_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iqs253_chip *iqs253_chip;
+ struct iio_dev *indio_dev;
+ struct input_dev *idev;
+ int rdy_gpio = -1, wake_gpio = -1, sar_gpio = -1;
+ struct property *stylus_detect = NULL;
+
+ rdy_gpio = of_get_named_gpio(client->dev.of_node, "rdy-gpio", 0);
+ if (rdy_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (!gpio_is_valid(rdy_gpio))
+ return -EINVAL;
+
+ wake_gpio = of_get_named_gpio(client->dev.of_node, "wake-gpio", 0);
+ if (wake_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (!gpio_is_valid(wake_gpio))
+ return -EINVAL;
+
+ sar_gpio = of_get_named_gpio(client->dev.of_node, "sar-gpio", 0);
+ if (sar_gpio == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ ret = gpio_request_one(sar_gpio, GPIOF_OUT_INIT_LOW, NULL);
+ if (ret < 0)
+ return -EINVAL;
+
+ indio_dev = iio_device_alloc(sizeof(*iqs253_chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, indio_dev);
+ iqs253_chip = iio_priv(indio_dev);
+
+#if defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+ iqs253_chip->ls_spec = of_get_ls_spec(&client->dev);
+ if (!iqs253_chip->ls_spec) {
+ dev_err(&client->dev,
+ "devname:%s func:%s line:%d invalid meta data\n",
+ id->name, __func__, __LINE__);
+ return -ENODATA;
+ }
+
+ fill_ls_attrs(iqs253_chip->ls_spec, iqs253_attrs);
+ indio_dev->channels = iqs253_channels;
+ indio_dev->info = &iqs253_iio_info;
+ indio_dev->num_channels = 1;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev,
+ "devname:%s func:%s line:%d iio_device_register fail\n",
+ id->name, __func__, __LINE__);
+ goto err_iio_register;
+ }
+#endif
+
+ iqs253_chip->client = client;
+ iqs253_chip->id = id;
+ iqs253_chip->mode = MODE_NONE;
+ iqs253_chip->vddhi = devm_regulator_get(&client->dev, "vddhi");
+ if (IS_ERR(iqs253_chip->vddhi)) {
+ dev_err(&client->dev,
+ "devname:%s func:%s regulator vddhi not found\n",
+ id->name, __func__);
+ goto err_regulator_get;
+ }
+
+ ret = gpio_request(rdy_gpio, "iqs253");
+ if (ret) {
+ dev_err(&client->dev,
+ "devname:%s func:%s regulator vddhi not found\n",
+ id->name, __func__);
+ goto err_gpio_request;
+ }
+ iqs253_chip->rdy_gpio = rdy_gpio;
+ iqs253_chip->wake_gpio = wake_gpio;
+ iqs253_chip->sar_gpio = sar_gpio;
+
+ ret = regulator_enable(iqs253_chip->vddhi);
+ if (ret) {
+ dev_err(&client->dev,
+ "devname:%s func:%s regulator enable failed\n",
+ id->name, __func__);
+ goto err_gpio_request;
+ }
+
+ ret = iqs253_i2c_read_byte(iqs253_chip, 0);
+ if (ret != IQS253_PROD_ID) {
+ dev_err(&client->dev,
+ "devname:%s func:%s device not present\n",
+ id->name, __func__);
+ goto err_gpio_request;
+
+ }
+
+ ret = regulator_disable(iqs253_chip->vddhi);
+ if (ret) {
+ dev_err(&client->dev,
+ "devname:%s func:%s regulator disable failed\n",
+ id->name, __func__);
+ goto err_gpio_request;
+ }
+
+ stylus_detect = of_find_property(client->dev.of_node,
+ "stylus-detect", NULL);
+ if (!stylus_detect)
+ goto finish;
+
+ idev = iqs253_stylus_input_init(iqs253_chip);
+ if (IS_ERR_OR_NULL(idev))
+ goto err_gpio_request;
+ iqs253_chip->idev = idev;
+
+finish:
+
+#if !defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+ iqs253_chip->sar_wq = create_freezable_workqueue("iqs253_sar");
+ if (!iqs253_chip->sar_wq) {
+ dev_err(&iqs253_chip->client->dev, "unable to create work queue\n");
+ goto err_gpio_request;
+ }
+
+ INIT_DELAYED_WORK(&iqs253_chip->sar_dw,
+ iqs253_sar_proximity_detect_work);
+
+ queue_delayed_work(iqs253_chip->sar_wq, &iqs253_chip->sar_dw, 0);
+#endif
+
+ dev_info(&client->dev, "devname:%s func:%s line:%d probe success\n",
+ id->name, __func__, __LINE__);
+
+ return 0;
+
+err_gpio_request:
+#if !defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+ if (iqs253_chip->sar_wq)
+ destroy_workqueue(iqs253_chip->sar_wq);
+#endif
+err_regulator_get:
+#if defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+ iio_device_unregister(indio_dev);
+err_iio_register:
+#endif
+ iio_device_free(indio_dev);
+
+ dev_err(&client->dev, "devname:%s func:%s line:%d probe failed\n",
+ id->name, __func__, __LINE__);
+ return ret;
+}
+
+static int iqs253_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iqs253_chip *chip = iio_priv(indio_dev);
+ gpio_free(chip->rdy_gpio);
+#if !defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+ if (chip->sar_wq)
+ destroy_workqueue(chip->sar_wq);
+#endif
+ if (chip->wq)
+ destroy_workqueue(chip->wq);
+ if (chip->idev) {
+ input_unregister_device(chip->idev);
+ input_free_device(chip->idev);
+ }
+#if defined(CONFIG_SENSORS_IQS253_AS_PROXIMITY)
+ iio_device_unregister(indio_dev);
+#endif
+ iio_device_free(indio_dev);
+ return 0;
+}
+
+static void iqs253_shutdown(struct i2c_client *client)
+{
+
+}
+
+static const struct i2c_device_id iqs253_id[] = {
+ {"iqs253", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, iqs253_id);
+
+static const struct of_device_id iqs253_of_match[] = {
+ { .compatible = "azoteq,iqs253", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, iqs253_of_match);
+
+static struct i2c_driver iqs253_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "iqs253",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(iqs253_of_match),
+ },
+ .probe = iqs253_probe,
+ .remove = iqs253_remove,
+ .shutdown = iqs253_shutdown,
+ .id_table = iqs253_id,
+};
+
+module_i2c_driver(iqs253_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IQS253 Driver");
+MODULE_AUTHOR("Sri Krishna chowdary <schowdary@nvidia.com>");
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 82478a59e42e..946175ec190c 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -4,21 +4,16 @@
* IIO driver for monitoring ambient light intensity in luxi, proximity
* sensing and infrared sensing.
*
- * Copyright (c) 2010, NVIDIA Corporation.
+ * Copyright (c) 2010, NVIDIA CORPORATION. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
+ * This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h>
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 8bb0d03627f2..ff8c25443689 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -2,7 +2,7 @@
* IIO driver for the light sensor ISL29028.
* ISL29028 is Concurrent Ambient Light and Proximity Sensor
*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
@@ -392,7 +389,8 @@ static const struct iio_chan_spec isl29028_channels[] = {
{
.type = IIO_LIGHT,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_RAW),
}, {
.type = IIO_INTENSITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
diff --git a/drivers/staging/iio/light/jsa1127.c b/drivers/staging/iio/light/jsa1127.c
new file mode 100644
index 000000000000..4141f7f55363
--- /dev/null
+++ b/drivers/staging/iio/light/jsa1127.c
@@ -0,0 +1,617 @@
+/*
+ * A iio driver for the light sensor JSA-1127.
+ *
+ * IIO Light driver for monitoring ambient light intensity in lux and proximity
+ * ir.
+ *
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/notifier.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/iio/light/jsa1127.h>
+#include <linux/random.h>
+
+#define DEV_ERR(err_string) \
+ dev_err(&chip->client->dev, \
+ "idname:%s func:%s line:%d %s\n", \
+ chip->id->name, __func__, __LINE__, err_string)
+
+#define JSA1127_VENDOR "Solteam-opto"
+
+enum als_state {
+ CHIP_POWER_OFF,
+ CHIP_POWER_ON_ALS_OFF,
+ CHIP_POWER_ON_ALS_ON,
+};
+
+#define JSA1127_OPMODE_CONTINUOUS 0x0C
+#define JSA1127_ONE_TIME_INTEGRATION_OPMODE 0x04
+#define JSA1127_CMD_START_INTERGATION 0x08
+#define JSA1127_CMD_STOP_INTERGATION 0x30
+#define JSA1127_CMD_STANDBY 0x8C
+#define JSA1127_POWER_ON_DELAY 60 /* msec */
+
+struct jsa1127_chip {
+ struct i2c_client *client;
+ const struct i2c_device_id *id;
+ struct regulator *regulator;
+
+ int rint;
+ int integration_time;
+ int noisy;
+
+ struct workqueue_struct *wq;
+ struct delayed_work dw;
+
+ bool use_internal_integration_timing;
+ u8 als_state;
+ u16 als_raw_value;
+ u16 tint_coeff;
+};
+
+#define N_DATA_BYTES 2
+#define RETRY_COUNT 3
+#define JSA1127_RETRY_TIME 100 /* msec */
+#define JSA1127_VALID_MASK BIT(15)
+#define JSA1127_DATA_MASK (JSA1127_VALID_MASK - 1)
+#define JSA1127_IS_DATA_VALID(val) (val & JSA1127_VALID_MASK)
+#define JSA1127_CONV_TO_DATA(val) (val & JSA1127_DATA_MASK)
+/* returns 0 on success, -errno on failure*/
+static int jsa1127_try_update_als_reading_locked(struct jsa1127_chip *chip)
+{
+ int ret = 0;
+ u16 val = 0;
+ char buf[N_DATA_BYTES] = {0, 0};
+ int retry_count = RETRY_COUNT;
+ struct iio_dev *indio_dev = iio_priv_to_dev(chip);
+ unsigned char rndnum;
+
+ mutex_lock(&indio_dev->mlock);
+ do {
+ ret = i2c_master_recv(chip->client, buf, N_DATA_BYTES);
+ if (ret != N_DATA_BYTES) {
+ DEV_ERR("i2c_master_recv failed");
+ } else {
+ val = buf[1];
+ val = (val << 8) | buf[0];
+ if (JSA1127_IS_DATA_VALID(val)) {
+ chip->als_raw_value = JSA1127_CONV_TO_DATA(val);
+ if (chip->noisy) {
+ get_random_bytes(&rndnum, 1);
+ if (rndnum < 128)
+ chip->als_raw_value++;
+ }
+ break;
+ } else {
+ msleep(JSA1127_RETRY_TIME);
+ DEV_ERR("data invalid");
+ ret = -EINVAL;
+ }
+ }
+ } while (!JSA1127_IS_DATA_VALID(val) && (--retry_count));
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret == N_DATA_BYTES ? 0 : ret;
+}
+#undef N_DATA_BYTES
+#undef RETRY_COUNT
+#undef JSA1127_RETRY_TIME
+#undef JSA1127_VALID_MASK
+#undef JSA1127_DATA_MASK
+#undef JSA1127_IS_DATA_VALID
+#undef JSA1127_CONV_TO_DATA
+
+#define N_MSG_SEND 1
+static int jsa1127_send_cmd_locked(struct jsa1127_chip *chip, char command)
+{
+ int ret = -EAGAIN;
+ struct iio_dev *indio_dev = iio_priv_to_dev(chip);
+ char cmd[N_MSG_SEND];
+ cmd[0] = command;
+ mutex_lock(&indio_dev->mlock);
+ while (ret == -EAGAIN)
+ ret = i2c_master_send(chip->client, cmd, N_MSG_SEND);
+ if (ret != N_MSG_SEND)
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d i2c_master_send fails\n",
+ chip->id->name, __func__, __LINE__);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret == N_MSG_SEND ? 0 : ret;
+}
+#undef N_MSG_SEND
+
+/* device's registration with iio to facilitate user operations */
+static ssize_t jsa1127_chan_regulator_enable(struct iio_dev *indio_dev,
+ uintptr_t priv, struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ u8 enable;
+ int ret = 0;
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ if (enable == (chip->als_state != CHIP_POWER_OFF))
+ goto success;
+
+ if (!chip->regulator)
+ goto success;
+
+ if (enable)
+ ret = regulator_enable(chip->regulator);
+ else
+ ret = regulator_disable(chip->regulator);
+
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d _jsa1127_register_read fails\n",
+ chip->id->name, __func__, __LINE__);
+ goto fail;
+ }
+
+success:
+ chip->als_state = enable;
+fail:
+ return ret ? ret : 1;
+}
+
+static ssize_t jsa1127_chan_enable(struct iio_dev *indio_dev,
+ uintptr_t priv, struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ u8 enable;
+ int ret = 0;
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (chip->als_state == CHIP_POWER_OFF) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d please enable regulator first\n",
+ chip->id->name, __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ if (enable) {
+ chip->als_raw_value = 0;
+ chip->als_state = CHIP_POWER_ON_ALS_ON;
+ queue_delayed_work(chip->wq, &chip->dw, JSA1127_POWER_ON_DELAY);
+ } else {
+ cancel_delayed_work_sync(&chip->dw);
+ chip->als_state = CHIP_POWER_ON_ALS_OFF;
+ }
+
+ return ret ? ret : 1;
+}
+
+/* chan_regulator_enable is used to enable regulators used by
+ * particular channel.
+ * chan_enable actually configures various registers to activate
+ * a particular channel.
+ */
+static const struct iio_chan_spec_ext_info jsa1127_ext_info[] = {
+ {
+ .name = "regulator_enable",
+ .write = jsa1127_chan_regulator_enable,
+ },
+ {
+ .name = "enable",
+ .write = jsa1127_chan_enable,
+ },
+ {
+ },
+};
+
+static const struct iio_chan_spec jsa1127_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .ext_info = jsa1127_ext_info,
+ },
+};
+
+static int jsa1127_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chip->als_state != CHIP_POWER_ON_ALS_ON)
+ return -EINVAL;
+
+ if (chip->als_raw_value != -EINVAL) {
+ *val = chip->als_raw_value;
+ ret = IIO_VAL_INT;
+ }
+
+ queue_delayed_work(chip->wq, &chip->dw, 0);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+
+/* integration time in msec corresponding to resistor RINT */
+static ssize_t jsa1127_integration_time(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", chip->integration_time);
+}
+
+/* max detection range in lux corresponding to ressistor RINT
+ * units = lux */
+static ssize_t jsa1127_max_range(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+ int max_range = 0;
+
+ if (chip->rint == 50)
+ max_range = 109000;
+ else if (chip->rint == 100)
+ max_range = 54000;
+ else if (chip->rint == 200)
+ max_range = 27000;
+ else if (chip->rint == 400)
+ max_range = 13000;
+ else if (chip->rint == 800)
+ max_range = 6500;
+ else
+ DEV_ERR("invalid RINT");
+ return sprintf(buf, "%d\n", max_range);
+}
+
+
+/* resolution in lux/count corresponding to ressistor RINT
+ * units = mLux/lsb */
+static ssize_t jsa1127_resolution(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+ int resolution = 0;
+
+ if (chip->rint == 50)
+ resolution = 3330;
+ else if (chip->rint == 100)
+ resolution = 1670;
+ else if (chip->rint == 200)
+ resolution = 830;
+ else if (chip->rint == 400)
+ resolution = 420;
+ else if (chip->rint == 800)
+ resolution = 210;
+ else
+ DEV_ERR("invalid RINT");
+ return sprintf(buf, "%d\n", resolution * chip->tint_coeff);
+}
+
+#define JSA1127_POWER_CONSUMED "1.65" /* mWatt */
+
+static IIO_CONST_ATTR(vendor, JSA1127_VENDOR);
+static IIO_DEVICE_ATTR(in_illuminance_resolution, S_IRUGO,
+ jsa1127_resolution, NULL, 0);
+static IIO_DEVICE_ATTR(in_illuminance_integration_time, S_IRUGO,
+ jsa1127_integration_time, NULL, 0);
+static IIO_CONST_ATTR(in_illuminance_power_consumed, JSA1127_POWER_CONSUMED);
+static IIO_DEVICE_ATTR(in_illuminance_max_range, S_IRUGO,
+ jsa1127_max_range, NULL, 0);
+
+static struct attribute *jsa1127_attributes[] = {
+ &iio_const_attr_vendor.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_resolution.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_integration_time.dev_attr.attr,
+ &iio_const_attr_in_illuminance_power_consumed.dev_attr.attr,
+ &iio_dev_attr_in_illuminance_max_range.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group jsa1127_attr_group = {
+ .attrs = jsa1127_attributes,
+};
+
+/* read_raw is used to report a channel's data to user
+ * in non SI units
+ */
+static const struct iio_info jsa1127_iio_info = {
+ .attrs = &jsa1127_attr_group,
+ .driver_module = THIS_MODULE,
+ .read_raw = jsa1127_read_raw,
+};
+
+static void jsa1127_work_func(struct work_struct *ws)
+{
+ struct jsa1127_chip *chip = container_of(ws,
+ struct jsa1127_chip, dw.work);
+ int ret = 0;
+
+ if (chip->als_state != CHIP_POWER_ON_ALS_ON)
+ return;
+
+ if (chip->use_internal_integration_timing) {
+ ret = jsa1127_send_cmd_locked(chip, JSA1127_OPMODE_CONTINUOUS);
+ if (ret)
+ goto fail;
+ msleep(chip->integration_time);
+ } else {
+ ret = jsa1127_send_cmd_locked(chip,
+ JSA1127_ONE_TIME_INTEGRATION_OPMODE);
+ if (ret)
+ goto fail;
+ ret = jsa1127_send_cmd_locked(chip,
+ JSA1127_CMD_START_INTERGATION);
+ if (ret)
+ goto fail;
+ msleep(chip->integration_time);
+ ret = jsa1127_send_cmd_locked(chip,
+ JSA1127_CMD_STOP_INTERGATION);
+ if (ret)
+ goto fail;
+ }
+ ret = jsa1127_try_update_als_reading_locked(chip);
+ if (ret)
+ goto fail;
+ jsa1127_send_cmd_locked(chip, JSA1127_CMD_STANDBY);
+ return;
+fail:
+ chip->als_raw_value = -EINVAL;
+}
+
+#if 0
+static int jsa1127_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (chip->regulator && (chip->als_state != CHIP_POWER_OFF))
+ ret = regulator_disable(chip->regulator);
+
+ if (ret) {
+ DEV_ERR("regulator_disable fails");
+ return ret;
+ }
+
+ if (!chip->regulator || regulator_is_enabled(chip->regulator))
+ jsa1127_send_cmd_locked(chip, JSA1127_CMD_STANDBY);
+ return ret;
+}
+
+static int jsa1127_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (chip->als_state == CHIP_POWER_OFF)
+ return 0;
+
+ ret = regulator_enable(chip->regulator);
+ if (ret) {
+ DEV_ERR("regulator_bulk_enable fails");
+ return ret;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ queue_delayed_work(chip->wq, &chip->dw, 0);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(jsa1127_pm_ops, jsa1127_suspend, jsa1127_resume);
+#define JSA1127_PM_OPS (&jsa1127_pm_ops)
+#else
+#define JSA1127_PM_OPS NULL
+#endif
+
+/* device's i2c registration */
+static int jsa1127_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct jsa1127_chip *chip;
+ struct iio_dev *indio_dev;
+ struct jsa1127_platform_data *jsa1127_platform_data;
+ u32 rint = UINT_MAX, use_internal_integration_timing = UINT_MAX;
+ u32 integration_time = UINT_MAX;
+ u32 tint = UINT_MAX;
+ u32 noisy = UINT_MAX;
+
+ if (client->dev.of_node) {
+ of_property_read_u32(client->dev.of_node,
+ "solteam-opto,rint", &rint);
+ of_property_read_u32(client->dev.of_node,
+ "solteam-opto,integration-time",
+ &integration_time);
+ of_property_read_u32(client->dev.of_node,
+ "solteam-opto,use-internal-integration-timing",
+ &use_internal_integration_timing);
+ of_property_read_u32(client->dev.of_node,
+ "solteam-opto,tint-coeff", &tint);
+ of_property_read_u32(client->dev.of_node,
+ "solteam-opto,noisy", &noisy);
+ } else {
+ jsa1127_platform_data = client->dev.platform_data;
+ rint = jsa1127_platform_data->rint;
+ integration_time = jsa1127_platform_data->integration_time;
+ use_internal_integration_timing =
+ jsa1127_platform_data->use_internal_integration_timing;
+ tint =
+ jsa1127_platform_data->tint_coeff;
+ noisy =
+ jsa1127_platform_data->noisy;
+ }
+
+ if ((rint == UINT_MAX) ||
+ (use_internal_integration_timing == UINT_MAX) ||
+ (rint%50 != 0) || (tint == UINT_MAX)) {
+ pr_err("func:%s failed due to invalid platform data", __func__);
+ return -EINVAL;
+ }
+
+ indio_dev = iio_device_alloc(sizeof(*chip));
+ if (indio_dev == NULL) {
+ dev_err(&client->dev,
+ "idname:%s func:%s line:%d iio_allocate_device fails\n",
+ id->name, __func__, __LINE__);
+ return -ENOMEM;
+ }
+ chip = iio_priv(indio_dev);
+
+ chip->rint = rint;
+ chip->integration_time = integration_time;
+ chip->use_internal_integration_timing = use_internal_integration_timing;
+ chip->tint_coeff = tint;
+ chip->noisy = noisy;
+
+ i2c_set_clientdata(client, indio_dev);
+ chip->client = client;
+
+ chip->regulator = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(chip->regulator)) {
+ dev_info(&client->dev,
+ "idname:%s func:%s line:%d regulator not found.\n"
+ "Assuming regulator is not needed\n",
+ id->name, __func__, __LINE__);
+ chip->regulator = NULL;
+ goto finish;
+ }
+
+ indio_dev->info = &jsa1127_iio_info;
+ indio_dev->channels = jsa1127_channels;
+ indio_dev->num_channels = 1;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev,
+ "idname:%s func:%s line:%d iio_device_register fails\n",
+ id->name, __func__, __LINE__);
+ goto free_iio_dev;
+ }
+
+ chip->wq = alloc_workqueue(id->name, WQ_FREEZABLE |
+ WQ_NON_REENTRANT | WQ_UNBOUND, 1);
+ INIT_DELAYED_WORK(&chip->dw, jsa1127_work_func);
+
+ if (regulator_is_enabled(chip->regulator))
+ jsa1127_send_cmd_locked(chip, JSA1127_CMD_STANDBY);
+ if (ret)
+ goto destroy_wq;
+
+finish:
+ chip->als_state = CHIP_POWER_OFF;
+ chip->id = id;
+ dev_info(&client->dev, "idname:%s func:%s line:%d probe success\n",
+ id->name, __func__, __LINE__);
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(chip->wq);
+ iio_device_unregister(indio_dev);
+free_iio_dev:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int jsa1127_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct jsa1127_chip *chip = iio_priv(indio_dev);
+ int ret;
+
+ iio_device_unregister(indio_dev);
+ destroy_workqueue(chip->wq);
+ if (chip->regulator && (chip->als_state != CHIP_POWER_OFF))
+ regulator_disable(chip->regulator);
+
+ if (!chip->regulator || regulator_is_enabled(chip->regulator))
+ ret = jsa1127_send_cmd_locked(chip, JSA1127_CMD_STANDBY);
+
+ iio_device_free(indio_dev);
+ return 0;
+}
+#undef SEND
+
+static const struct i2c_device_id jsa1127_id[] = {
+ {"jsa1127", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, jsa1127_id);
+
+static const struct of_device_id jsa1127_of_match[] = {
+ { .compatible = "solteam-opto,jsa1127", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, jsa1127_of_match);
+
+static struct i2c_driver jsa1127_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = JSA1127_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(jsa1127_of_match),
+ .pm = JSA1127_PM_OPS,
+ },
+ .id_table = jsa1127_id,
+ .probe = jsa1127_probe,
+ .remove = jsa1127_remove,
+};
+module_i2c_driver(jsa1127_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("jsa1127 Driver");
+MODULE_AUTHOR("Sri Krishna chowdary <schowdary@nvidia.com>");
diff --git a/drivers/staging/iio/light/ls_dt.c b/drivers/staging/iio/light/ls_dt.c
new file mode 100644
index 000000000000..1d95a38455dc
--- /dev/null
+++ b/drivers/staging/iio/light/ls_dt.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/iio/light/ls_sysfs.h>
+
+static const char *propname[NUM_PROP] = {
+ "vendor"
+};
+
+static const char *dt_chan_sysfs_lut[MAX_CHAN][MAX_CHAN_PROP] = {
+ {
+ "illuminance,max-range",
+ "illuminance,integration-time",
+ "illuminance,resolution",
+ "illuminance,power-consumed"
+ },
+ {
+ "proximity,max-range",
+ "proximity,integration-time",
+ "proximity,power-consumed"
+ }
+};
+
+/*
+ * 1. create instance of lightsensor_spec,
+ * 2. parse the DT node for the dev * and fill the lightsensor_spec
+ * 3. return the filled lightsensor_spec if success.
+ */
+struct lightsensor_spec *of_get_ls_spec(struct device *dev)
+{
+ const char *prop_value;
+ int i, j, ret;
+ struct lightsensor_spec *ls_spec;
+ bool is_empty = true;
+
+ if (!dev->of_node)
+ return NULL;
+
+ ls_spec = devm_kzalloc(dev,
+ sizeof(struct lightsensor_spec), GFP_KERNEL);
+ if (!ls_spec)
+ return NULL;
+
+ /* fill values of dt properties in propname -> ls_spec->propname */
+ for (i = 0; i < NUM_PROP; i++) {
+ ret = of_property_read_string(dev->of_node, propname[i],
+ &prop_value);
+ if (!ret) {
+ ls_spec->prop[i] = prop_value;
+ is_empty = false;
+ }
+ }
+
+ /*
+ * fill values of dt properties in dt_chan_sysfs_lut to
+ * ls_spec->chan_prop
+ */
+ for (i = 0; i < MAX_CHAN; i++)
+ for (j = 0; j < MAX_CHAN_PROP; j++)
+ if (dt_chan_sysfs_lut[i][j]) {
+ ret = of_property_read_string(dev->of_node,
+ dt_chan_sysfs_lut[i][j], &prop_value);
+ if (!ret) {
+ ls_spec->chan_prop[i][j] = prop_value;
+ is_empty = false;
+ }
+ }
+
+ if (is_empty) {
+ devm_kfree(dev, ls_spec);
+ ls_spec = NULL;
+ }
+
+ return ls_spec;
+}
diff --git a/drivers/staging/iio/light/ls_sysfs.c b/drivers/staging/iio/light/ls_sysfs.c
new file mode 100644
index 000000000000..0bab6dad2702
--- /dev/null
+++ b/drivers/staging/iio/light/ls_sysfs.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/iio/light/ls_sysfs.h>
+
+static const char *propname[NUM_PROP] = {
+ "vendor"
+};
+
+/* look up table for light sensor's per channel sysfs */
+static const char *sysfs_ls_lut[MAX_CHAN][MAX_CHAN_PROP] = {
+ {
+ "in_illuminance_max_range",
+ "in_illuminance_integration_time",
+ "in_illuminance_resolution",
+ "in_illuminance_power_consumed"
+ },
+ {
+ "in_proximity_max_range",
+ "in_proximity_integration_time",
+ "in_proximity_power_consumed"
+ }
+};
+
+static const char *get_ls_spec_val(struct lightsensor_spec *ls_spec,
+ const char *sysfs_name)
+{
+ int i, j;
+
+ if (sysfs_name == NULL)
+ return NULL;
+
+ if (!ls_spec)
+ return NULL;
+
+ for (i = 0; i < NUM_PROP; i++)
+ if (strstr(sysfs_name, propname[i]))
+ return ls_spec->prop[i];
+
+ for (i = 0; i < MAX_CHAN; i++)
+ for (j = 0; j < MAX_CHAN_PROP; j++)
+ if (sysfs_ls_lut[i][j] &&
+ strstr(sysfs_name, sysfs_ls_lut[i][j]))
+ return ls_spec->chan_prop[i][j];
+
+ return NULL;
+}
+
+void fill_ls_attrs(struct lightsensor_spec *ls_spec,
+ struct attribute **attrs)
+{
+ struct attribute *attr;
+ struct device_attribute *dev_attr;
+ struct iio_const_attr *iio_const_attr;
+ int i;
+ const char *val;
+
+ if (!ls_spec || !attrs)
+ return;
+
+ for (i = 0, attr = attrs[i]; attr; attr = attrs[i++]) {
+ dev_attr = container_of(attr, struct device_attribute, attr);
+ iio_const_attr = to_iio_const_attr(dev_attr);
+ val = get_ls_spec_val(ls_spec, attr->name);
+ if (val)
+ iio_const_attr->string = val;
+ }
+}
diff --git a/drivers/staging/iio/light/ltr558als.c b/drivers/staging/iio/light/ltr558als.c
new file mode 100644
index 000000000000..e1c79a7b9a55
--- /dev/null
+++ b/drivers/staging/iio/light/ltr558als.c
@@ -0,0 +1,934 @@
+/* Lite-On LTR-558ALS Linux Driver
+ *
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/wakelock.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <asm/uaccess.h>
+
+#include "ltr558als.h"
+
+#define DRIVER_VERSION "1.0"
+#define DEVICE_NAME "ltr558"
+
+enum {
+ VDD = 0,
+ LED
+};
+
+struct ltr558_chip {
+ struct i2c_client *client;
+ struct mutex lock;
+ int irq;
+
+ struct regulator *supply[2];
+
+ bool is_als_enable;
+ bool als_enabled_before_suspend;
+ int als_gainrange;
+ int als_persist;
+ int als_reading;
+ int als_high_thres;
+ int als_low_thres;
+
+ bool is_prox_enable;
+ bool prox_enabled_before_suspend;
+ int ps_gainrange;
+ int prox_persist;
+ int prox_reading;
+ int prox_low_thres;
+ int prox_high_thres;
+};
+
+static int ltr558_i2c_read_reg(struct i2c_client *client, u8 regnum)
+{
+ return i2c_smbus_read_byte_data(client, regnum);
+}
+
+static int ltr558_i2c_write_reg(struct i2c_client *client, u8 regnum, u8 value)
+{
+ int writeerror;
+
+ writeerror = i2c_smbus_write_byte_data(client, regnum, value);
+ if (writeerror < 0)
+ return writeerror;
+ else
+ return 0;
+}
+
+static int ltr558_ps_enable(struct i2c_client *client, int gainrange)
+{
+ int error;
+ int setgain;
+
+ switch (gainrange) {
+ case PS_RANGE4:
+ setgain = MODE_PS_ON_Gain4;
+ break;
+
+ case PS_RANGE8:
+ setgain = MODE_PS_ON_Gain8;
+ break;
+
+ case PS_RANGE16:
+ setgain = MODE_PS_ON_Gain16;
+ break;
+
+ case PS_RANGE1:
+ default:
+ setgain = MODE_PS_ON_Gain1;
+ break;
+ }
+
+ /*
+ * Per HW Suggestion, LED Current: 100mA, Duty Cycle: 100%, PMF: 30KHz
+ * LED Pulse Count: 5, Measurement Repeat Rate: 200ms
+ */
+ error = ltr558_i2c_write_reg(client, LTR558_PS_LED,
+ PS_LED_PMF_30KHZ | PS_LED_CUR_DUTY_100 |
+ PS_LED_CUR_LEVEL_100);
+ if (!error)
+ error = ltr558_i2c_write_reg(client, LTR558_PS_N_PULSES,
+ PS_N_PULSES_5);
+ if (!error)
+ error = ltr558_i2c_write_reg(client, LTR558_PS_MEAS_RATE,
+ PS_MEAS_RATE_200MS);
+ if (!error) {
+ error = ltr558_i2c_write_reg(client,
+ LTR558_PS_CONTR, BIT(5) | setgain);
+ mdelay(WAKEUP_DELAY);
+ }
+ return error;
+}
+
+static int ltr558_ps_disable(struct i2c_client *client)
+{
+ return ltr558_i2c_write_reg(client, LTR558_PS_CONTR, MODE_PS_StdBy);
+}
+
+static int ltr558_ps_read(struct i2c_client *client)
+{
+ int psval_lo = 0, psval_hi = 0, psdata = 0;
+ psval_lo = ltr558_i2c_read_reg(client, LTR558_PS_DATA_0);
+ if (psval_lo < 0){
+ psdata = psval_lo;
+ goto out;
+ }
+
+ psval_hi = ltr558_i2c_read_reg(client, LTR558_PS_DATA_1);
+ if (psval_hi < 0){
+ psdata = psval_hi;
+ goto out;
+ }
+
+ /* PS should never saturate */
+ /* FIX ME: enable WARN_ON once sensor is calibrated */
+ /* WARN_ON(psval_hi & BIT(7)); */
+
+ psdata = ((psval_hi & 0x07) * 256) + psval_lo;
+out:
+ return psdata;
+}
+
+static int ltr558_als_enable(struct i2c_client *client, int gainrange)
+{
+ int error = -1;
+
+ if (gainrange == ALS_RANGE1_320)
+ error = ltr558_i2c_write_reg(client, LTR558_ALS_CONTR,
+ MODE_ALS_ON_Range1);
+ else if (gainrange == ALS_RANGE2_64K)
+ error = ltr558_i2c_write_reg(client, LTR558_ALS_CONTR,
+ MODE_ALS_ON_Range2);
+
+ msleep(WAKEUP_DELAY);
+ return error;
+}
+
+static int ltr558_als_disable(struct i2c_client *client)
+{
+ return ltr558_i2c_write_reg(client, LTR558_ALS_CONTR, MODE_ALS_StdBy);
+}
+
+static int ltr558_als_read(struct i2c_client *client)
+{
+ int alsval_ch0_lo, alsval_ch0_hi;
+ int alsval_ch1_lo, alsval_ch1_hi;
+ unsigned int alsval_ch0 = 0, alsval_ch1 = 0;
+ int luxdata = 0, ratio = 0;
+ long ch0_coeff = 0, ch1_coeff = 0;
+
+ alsval_ch1_lo = ltr558_i2c_read_reg(client, LTR558_ALS_DATA_CH1_0);
+ alsval_ch1_hi = ltr558_i2c_read_reg(client, LTR558_ALS_DATA_CH1_1);
+ alsval_ch1 = (alsval_ch1_hi * 256) + alsval_ch1_lo;
+
+ alsval_ch0_lo = ltr558_i2c_read_reg(client, LTR558_ALS_DATA_CH0_0);
+ alsval_ch0_hi = ltr558_i2c_read_reg(client, LTR558_ALS_DATA_CH0_1);
+ alsval_ch0 = (alsval_ch0_hi * 256) + alsval_ch0_lo;
+
+ if (alsval_ch0 == 0 && alsval_ch1 == 0)
+ return 0;
+
+ /* lux formula */
+ ratio = (100 * alsval_ch1)/(alsval_ch1 + alsval_ch0);
+
+ if (ratio < 45) {
+ ch0_coeff = 17743;
+ ch1_coeff = -11059;
+ }
+ else if ((ratio >= 45) && (ratio < 64)) {
+ ch0_coeff = 37725;
+ ch1_coeff = 13363;
+ }
+ else if ((ratio >= 64) && (ratio < 85)) {
+ ch0_coeff = 16900;
+ ch1_coeff = 1690;
+ }
+ else if (ratio >= 85) {
+ ch0_coeff = 0;
+ ch1_coeff = 0;
+ }
+
+ luxdata = ((alsval_ch0 * ch0_coeff) - (alsval_ch1 * ch1_coeff))/10000;
+ return luxdata;
+}
+
+static bool ltr558_set_proxim_high_threshold(struct i2c_client *client,
+ u32 thresh)
+{
+ bool st;
+ st = ltr558_i2c_write_reg(client, LTR558_PS_THRES_UP_0,
+ thresh & 0xFF);
+ if (!st)
+ st = ltr558_i2c_write_reg(client, LTR558_PS_THRES_UP_1,
+ (thresh >> 8) & 0x07);
+ return st;
+}
+
+static bool ltr558_set_proxim_low_threshold(struct i2c_client *client,
+ u32 thresh)
+{
+ bool st;
+ st = ltr558_i2c_write_reg(client, LTR558_PS_THRES_LOW_0,
+ thresh & 0xFF);
+ if (!st)
+ st = ltr558_i2c_write_reg(client, LTR558_PS_THRES_LOW_1,
+ (thresh >> 8) & 0x07);
+ return st;
+}
+
+static bool ltr558_set_als_high_threshold(struct i2c_client *client, u32 thresh)
+{
+ bool st;
+ st = ltr558_i2c_write_reg(client, LTR558_ALS_THRES_UP_0,
+ thresh & 0xFF);
+ if (!st)
+ st = ltr558_i2c_write_reg(client, LTR558_ALS_THRES_UP_1,
+ (thresh >> 8) & 0xFF);
+ return st;
+}
+
+static bool ltr558_set_als_low_threshold(struct i2c_client *client, u32 thresh)
+{
+ bool st;
+ st = ltr558_i2c_write_reg(client, LTR558_ALS_THRES_LOW_0,
+ thresh & 0xFF);
+ if (!st)
+ st = ltr558_i2c_write_reg(client, LTR558_ALS_THRES_LOW_1,
+ ((thresh >> 8) & 0xFF));
+ return st;
+}
+
+/* Sysfs interface */
+
+/* proximity enable/disable */
+static ssize_t show_prox_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ if (chip->is_prox_enable)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_prox_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+ int err = 0;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+ if ((lval != 1) && (lval != 0)) {
+ dev_err(dev, "illegal value %lu\n", lval);
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ if (lval == 1) {
+ if (chip->supply[VDD]) {
+ err = regulator_enable(chip->supply[VDD]);
+ if (err)
+ dev_err(dev, "vdd regulator enable failed\n");
+ }
+ if (chip->supply[LED]) {
+ err = regulator_enable(chip->supply[LED]);
+ if (err)
+ dev_err(dev, "led regulator enable failed\n");
+ }
+ err = ltr558_ps_enable(client, PS_RANGE1);
+ } else {
+ err = ltr558_ps_disable(client);
+ if (chip->supply[VDD]) {
+ err = regulator_disable(chip->supply[VDD]);
+ if (err)
+ dev_err(dev, "vdd regulator disable failed\n");
+ }
+ if (chip->supply[LED]) {
+ err = regulator_disable(chip->supply[LED]);
+ if (err)
+ dev_err(dev, "led regulator disable failed\n");
+ }
+ }
+
+ if (err < 0)
+ dev_err(dev, "Error in enabling proximity\n");
+ else
+ chip->is_prox_enable = (lval) ? true : false;
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Proximity low thresholds */
+static ssize_t show_proxim_low_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->prox_low_thres);
+}
+
+static ssize_t store_proxim_low_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0x7FF) || (lval < 0x0)) {
+ dev_err(dev, "The threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ st = ltr558_set_proxim_low_threshold(client, (u8)lval);
+ if (!st)
+ chip->prox_low_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting proximity low threshold\n");
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Proximity high thresholds */
+static ssize_t show_proxim_high_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->prox_high_thres);
+}
+
+static ssize_t store_proxim_high_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0x7FF) || (lval < 0x0)) {
+ dev_err(dev, "The threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ st = ltr558_set_proxim_high_threshold(client, lval);
+ if (!st)
+ chip->prox_high_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting proximity high threshold\n");
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* als enable/disable */
+static ssize_t show_als_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ if (chip->is_als_enable)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_als_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+ int err = 0;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+ if ((lval != 1) && (lval != 0)) {
+ dev_err(dev, "illegal value %lu\n", lval);
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ if (lval == 1)
+ err = ltr558_als_enable(client, chip->als_gainrange);
+ else
+ err = ltr558_als_disable(client);
+
+ if (err < 0)
+ dev_err(dev, "Error in enabling ALS\n");
+ else
+ chip->is_als_enable = (lval) ? true : false;
+
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* als low thresholds */
+static ssize_t show_als_low_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->als_low_thres);
+}
+
+static ssize_t store_als_low_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0xFFFF) || (lval < 0x0)) {
+ dev_err(dev, "The ALS threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ st = ltr558_set_als_low_threshold(client, (int)lval);
+ if (!st)
+ chip->als_low_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting als low threshold\n");
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Als high thresholds */
+static ssize_t show_als_high_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->als_high_thres);
+}
+
+static ssize_t store_als_high_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+ bool st;
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 0xFFFF) || (lval < 0x0)) {
+ dev_err(dev, "The als threshold is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ st = ltr558_set_als_high_threshold(client, (int)lval);
+ if (!st)
+ chip->als_high_thres = (int)lval;
+ else
+ dev_err(dev, "Error in setting als high threshold\n");
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Proximity persist */
+static ssize_t show_proxim_persist(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->prox_persist);
+}
+
+static ssize_t store_proxim_persist(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 16) || (lval < 0x0)) {
+ dev_err(dev, "The proximity persist is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ chip->prox_persist = (int)lval;
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* als/ir persist */
+static ssize_t show_als_persist(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ return sprintf(buf, "%d\n", chip->als_persist);
+}
+
+static ssize_t store_als_persist(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ unsigned long lval;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ if (strict_strtoul(buf, 10, &lval))
+ return -EINVAL;
+
+ if ((lval > 16) || (lval < 0x0)) {
+ dev_err(dev, "The als persist is not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&chip->lock);
+ chip->als_persist = (int)lval;
+ mutex_unlock(&chip->lock);
+ return count;
+}
+
+/* Display proxim data */
+static ssize_t show_proxim_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ int prox_data = 0;
+ ssize_t buf_count = 0;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+ mutex_lock(&chip->lock);
+
+ if (chip->is_prox_enable) {
+ prox_data = ltr558_ps_read(chip->client);
+ chip->prox_reading = prox_data;
+ buf_count = sprintf(buf, "%d\n", prox_data);
+ }
+ else
+ buf_count = sprintf(buf, "%d\n", chip->prox_reading);
+ mutex_unlock(&chip->lock);
+ return buf_count;
+}
+
+/* Display als data */
+static ssize_t show_als_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ ssize_t buf_count = 0;
+ int als_data = 0;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ mutex_lock(&chip->lock);
+ if (chip->is_als_enable) {
+ als_data = ltr558_als_read(chip->client);
+ buf_count = sprintf(buf, "%d\n", als_data);
+ chip->als_reading = als_data;
+ }
+ else
+ buf_count = sprintf(buf, "%d\n", chip->als_reading);
+ mutex_unlock(&chip->lock);
+
+ return buf_count;
+}
+
+/* Read name */
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", DEVICE_NAME);
+}
+
+static IIO_DEVICE_ATTR(proximity_low_threshold, S_IRUGO | S_IWUSR,
+ show_proxim_low_threshold, store_proxim_low_threshold, 0);
+static IIO_DEVICE_ATTR(proximity_high_threshold, S_IRUGO | S_IWUSR,
+ show_proxim_high_threshold, store_proxim_high_threshold, 0);
+static IIO_DEVICE_ATTR(proximity_persist, S_IRUGO | S_IWUSR,
+ show_proxim_persist, store_proxim_persist, 0);
+static IIO_DEVICE_ATTR(proximity_enable, S_IRUGO | S_IWUSR,
+ show_prox_enable, store_prox_enable, 0);
+static IIO_DEVICE_ATTR(proximity_value, S_IRUGO,
+ show_proxim_data, NULL, 0);
+
+static IIO_DEVICE_ATTR(als_low_threshold, S_IRUGO | S_IWUSR,
+ show_als_low_threshold, store_als_low_threshold, 0);
+static IIO_DEVICE_ATTR(als_high_threshold, S_IRUGO | S_IWUSR,
+ show_als_high_threshold, store_als_high_threshold, 0);
+static IIO_DEVICE_ATTR(als_persist, S_IRUGO | S_IWUSR,
+ show_als_persist, store_als_persist, 0);
+static IIO_DEVICE_ATTR(als_enable, S_IRUGO | S_IWUSR,
+ show_als_enable, store_als_enable, 0);
+static IIO_DEVICE_ATTR(als_value, S_IRUGO,
+ show_als_data, NULL, 0);
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+
+static struct attribute *ltr558_attributes[] = {
+ &iio_dev_attr_name.dev_attr.attr,
+
+ &iio_dev_attr_als_low_threshold.dev_attr.attr,
+ &iio_dev_attr_als_high_threshold.dev_attr.attr,
+ &iio_dev_attr_als_enable.dev_attr.attr,
+ &iio_dev_attr_als_persist.dev_attr.attr,
+ &iio_dev_attr_als_value.dev_attr.attr,
+
+ &iio_dev_attr_proximity_low_threshold.dev_attr.attr,
+ &iio_dev_attr_proximity_high_threshold.dev_attr.attr,
+ &iio_dev_attr_proximity_enable.dev_attr.attr,
+ &iio_dev_attr_proximity_persist.dev_attr.attr,
+ &iio_dev_attr_proximity_value.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ltr558_group = {
+ .attrs = ltr558_attributes,
+};
+
+static int ltr558_chip_init(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+ int error = 0;
+
+ msleep(PON_DELAY);
+
+ chip->is_prox_enable = 0;
+ chip->prox_low_thres = 0;
+ chip->prox_high_thres = 0x7FF;
+ chip->prox_reading = 0;
+
+ chip->als_low_thres = 0;
+ chip->als_high_thres = 0xFFFF;
+ chip->als_reading = 0;
+
+ chip->is_als_enable = 0;
+ chip->prox_persist = 0;
+ chip->als_persist = 0;
+
+ /* Enable PS to Gain1 at startup */
+ chip->ps_gainrange = PS_RANGE1;
+ error = ltr558_ps_enable(client, chip->ps_gainrange);
+ if (error < 0)
+ goto out;
+
+
+ /* Enable ALS to Full Range at startup */
+ chip->als_gainrange = ALS_RANGE2_64K;
+ error = ltr558_als_enable(client, chip->als_gainrange);
+ if (error < 0)
+ goto out;
+
+out:
+ return error;
+}
+
+static const struct iio_info ltr558_info = {
+ .attrs = &ltr558_group,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t threshold_isr(int irq, void *irq_data)
+{
+ struct ltr558_chip *chip = (struct ltr558_chip *)irq_data;
+ s32 int_reg;
+ struct i2c_client *client = chip->client;
+
+ int_reg = i2c_smbus_read_byte_data(client, LTR558_ALS_PS_STATUS);
+ if (int_reg < 0) {
+ dev_err(&client->dev, "Error in reading register %d, error %d\n",
+ LTR558_ALS_PS_STATUS, int_reg);
+ return IRQ_HANDLED;
+ }
+
+ if (int_reg & STATUS_ALS_INT_TRIGGER) {
+ if (int_reg & STATUS_ALS_NEW_DATA)
+ chip->als_reading = ltr558_als_read(client);
+ }
+
+ if (int_reg & STATUS_PS_INT_TRIGGER) {
+ if (int_reg & STATUS_PS_NEW_DATA)
+ chip->prox_reading = ltr558_ps_read(client);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ltr558_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct ltr558_chip *chip;
+ struct iio_dev *indio_dev;
+
+ /* data memory allocation */
+ indio_dev = iio_device_alloc(sizeof(*chip));
+ if (indio_dev == NULL) {
+ dev_err(&client->dev, "iio allocation fails\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ chip = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ chip->client = client;
+ chip->irq = client->irq;
+
+ if (chip->irq > 0) {
+ ret = request_threaded_irq(chip->irq, NULL, threshold_isr,
+ IRQF_SHARED, "LTR558_ALS", chip);
+ if (ret) {
+ dev_err(&client->dev, "Unable to register irq %d; "
+ "ret %d\n", chip->irq, ret);
+ goto exit_iio_free;
+ }
+ }
+
+ mutex_init(&chip->lock);
+
+ ret = ltr558_chip_init(client);
+ if (ret)
+ goto exit_irq;
+
+ indio_dev->info = &ltr558_info;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "iio registration fails\n");
+ goto exit_irq;
+ }
+
+ chip->supply[VDD] = regulator_get(&client->dev, "vdd");
+
+ if (IS_ERR(chip->supply[VDD])) {
+ dev_err(&client->dev, "could not get vdd regulator\n");
+ ret = PTR_ERR(chip->supply[VDD]);
+ goto exit_irq;
+ }
+
+ chip->supply[LED] = regulator_get(&client->dev, "vled");
+
+ if (IS_ERR(chip->supply[LED])) {
+ dev_err(&client->dev, "could not get vled regulator\n");
+ ret = PTR_ERR(chip->supply[LED]);
+ goto exit_irq;
+ }
+
+ ret = regulator_enable(chip->supply[VDD]);
+ if (ret) {
+ dev_err(&client->dev,
+ "func:%s regulator enable failed\n", __func__);
+ goto exit_irq;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, LTR558_MANUFACTURER_ID);
+ if (ret < 0) {
+ dev_err(&client->dev, "Err in reading register %d, error %d\n",
+ LTR558_MANUFACTURER_ID, ret);
+ goto exit_irq;
+ }
+
+ if (ret != LTR_MANUFACTURER_ID) {
+ dev_err(&client->dev, "sensor not found\n");
+ goto exit_irq;
+ }
+
+ ret = regulator_disable(chip->supply[VDD]);
+ if (ret) {
+ dev_err(&client->dev,
+ "func:%s regulator disable failed\n", __func__);
+ goto exit_irq;
+ }
+
+ dev_dbg(&client->dev, "%s() success\n", __func__);
+ return 0;
+
+exit_irq:
+ if (chip->irq > 0)
+ free_irq(chip->irq, chip);
+exit_iio_free:
+ iio_device_free(indio_dev);
+exit:
+ return ret;
+}
+
+
+static int ltr558_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ltr558_chip *chip = iio_priv(indio_dev);
+
+ dev_dbg(&client->dev, "%s()\n", __func__);
+ if (chip->irq > 0)
+ free_irq(chip->irq, chip);
+ ltr558_ps_disable(client);
+ ltr558_als_disable(client);
+ iio_device_unregister(indio_dev);
+ return 0;
+}
+
+static const struct i2c_device_id ltr558_id[] = {
+ { DEVICE_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltr558_id);
+
+static const struct of_device_id ltr558_of_match[] = {
+ { .compatible = "lite-on,ltr558", },
+ { .compatible = "lite-on,ltr659", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ltr558_of_match);
+
+static struct i2c_driver ltr558_driver = {
+ .class = I2C_CLASS_HWMON,
+ .probe = ltr558_probe,
+ .remove = ltr558_remove,
+ .id_table = ltr558_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_NAME,
+ .of_match_table = of_match_ptr(ltr558_of_match),
+ },
+};
+
+
+static int __init ltr558_driverinit(void)
+{
+ return i2c_add_driver(&ltr558_driver);
+}
+
+
+static void __exit ltr558_driverexit(void)
+{
+ i2c_del_driver(&ltr558_driver);
+}
+
+
+module_init(ltr558_driverinit)
+module_exit(ltr558_driverexit)
diff --git a/drivers/staging/iio/light/ltr558als.h b/drivers/staging/iio/light/ltr558als.h
new file mode 100644
index 000000000000..8ffabb013df6
--- /dev/null
+++ b/drivers/staging/iio/light/ltr558als.h
@@ -0,0 +1,126 @@
+/* Lite-On LTR-558ALS Linux Driver
+ *
+ * Copyright (c) 2012-2014, NVIDIA Corporation. All Rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _LTR558_H
+#define _LTR558_H
+
+
+/* LTR-558 Registers */
+#define LTR558_ALS_CONTR 0x80
+#define LTR558_PS_CONTR 0x81
+#define LTR558_PS_LED 0x82
+#define LTR558_PS_N_PULSES 0x83
+#define LTR558_PS_MEAS_RATE 0x84
+#define LTR558_ALS_MEAS_RATE 0x85
+#define LTR558_MANUFACTURER_ID 0x87
+
+#define LTR558_INTERRUPT 0x8F
+#define LTR558_PS_THRES_UP_0 0x90
+#define LTR558_PS_THRES_UP_1 0x91
+#define LTR558_PS_THRES_LOW_0 0x92
+#define LTR558_PS_THRES_LOW_1 0x93
+
+#define LTR558_ALS_THRES_UP_0 0x97
+#define LTR558_ALS_THRES_UP_1 0x98
+#define LTR558_ALS_THRES_LOW_0 0x99
+#define LTR558_ALS_THRES_LOW_1 0x9A
+
+#define LTR558_INTERRUPT_PERSIST 0x9E
+
+/* 558's Read Only Registers */
+#define LTR558_ALS_DATA_CH1_0 0x88
+#define LTR558_ALS_DATA_CH1_1 0x89
+#define LTR558_ALS_DATA_CH0_0 0x8A
+#define LTR558_ALS_DATA_CH0_1 0x8B
+#define LTR558_ALS_PS_STATUS 0x8C
+#define LTR558_PS_DATA_0 0x8D
+#define LTR558_PS_DATA_1 0x8E
+
+/* ALS PS STATUS 0x8C */
+#define STATUS_ALS_GAIN_RANGE1 0x10
+#define STATUS_ALS_INT_TRIGGER 0x08
+#define STATUS_ALS_NEW_DATA 0x04
+#define STATUS_PS_INT_TRIGGER 0x02
+#define STATUS_PS_NEW_DATA 0x01
+
+/* Basic Operating Modes */
+#define MODE_ALS_ON_Range1 0x0B
+#define MODE_ALS_ON_Range2 0x03
+#define MODE_ALS_StdBy 0x00
+
+#define MODE_PS_ON_Gain1 0x03
+#define MODE_PS_ON_Gain4 0x07
+#define MODE_PS_ON_Gain8 0x0B
+#define MODE_PS_ON_Gain16 0x0F
+#define MODE_PS_StdBy 0x00
+
+#define PS_LED_CUR_LEVEL_5 0x00
+#define PS_LED_CUR_LEVEL_10 0x01
+#define PS_LED_CUR_LEVEL_20 0x02
+#define PS_LED_CUR_LEVEL_50 0x03
+#define PS_LED_CUR_LEVEL_100 0x07
+
+#define PS_LED_CUR_DUTY_25 0x00
+#define PS_LED_CUR_DUTY_50 0x08
+#define PS_LED_CUR_DUTY_75 0x10
+#define PS_LED_CUR_DUTY_100 0x18
+
+#define PS_LED_PMF_30KHZ 0x0
+#define PS_LED_PMF_40KHZ 0x20
+#define PS_LED_PMF_50KHZ 0x40
+#define PS_LED_PMF_60KHZ 0x60
+#define PS_LED_PMF_70KHZ 0x80
+#define PS_LED_PMF_80KHZ 0xA0
+#define PS_LED_PMF_90KHZ 0xC0
+#define PS_LED_PMF_100KHZ 0xE0
+
+#define PS_N_PULSES_1 0x01
+#define PS_N_PULSES_2 0x02
+#define PS_N_PULSES_3 0x03
+#define PS_N_PULSES_4 0x04
+#define PS_N_PULSES_5 0x05
+#define PS_N_PULSES_6 0x06
+#define PS_N_PULSES_7 0x07
+#define PS_N_PULSES_8 0x08
+#define PS_N_PULSES_9 0x09
+#define PS_N_PULSES_10 0x0A
+#define PS_N_PULSES_11 0x0B
+#define PS_N_PULSES_12 0x0C
+#define PS_N_PULSES_13 0x0D
+#define PS_N_PULSES_14 0x0E
+#define PS_N_PULSES_15 0x0F
+
+#define PS_MEAS_RATE_50MS 0x00
+#define PS_MEAS_RATE_70MS 0x01
+#define PS_MEAS_RATE_100MS 0x02
+#define PS_MEAS_RATE_200MS 0x03
+#define PS_MEAS_RATE_500MS 0x04
+#define PS_MEAS_RATE_1000MS 0x05
+#define PS_MEAS_RATE_2000MS 0x07
+
+#define PS_RANGE1 1
+#define PS_RANGE4 2
+#define PS_RANGE8 4
+#define PS_RANGE16 8
+
+#define ALS_RANGE1_320 1
+#define ALS_RANGE2_64K 2
+
+#define LTR_MANUFACTURER_ID 0x05
+
+/* Power On response time in ms */
+#define PON_DELAY 600
+#define WAKEUP_DELAY 10
+
+#endif
diff --git a/drivers/staging/iio/light/max44005.c b/drivers/staging/iio/light/max44005.c
new file mode 100644
index 000000000000..de11f26c1607
--- /dev/null
+++ b/drivers/staging/iio/light/max44005.c
@@ -0,0 +1,717 @@
+/*
+ * A iio driver for the light sensor MAX44005.
+ *
+ * IIO Light driver for monitoring ambient light intensity in lux and proximity
+ * ir.
+ *
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define INT_STATUS_REG_ADDR 0x00
+#define MAIN_CONF_REG_ADDR 0x01
+#define AMB_CONF_REG_ADDR 0x02
+#define PROX_CONF_REG_ADDR 0x03
+#define AMB_CLEAR_HIGH_ADDR 0x04
+#define AMB_RED_HIGH_ADDR 0x06
+#define AMB_GREEN_HIGH_ADDR 0x08
+#define AMB_BLUE_HIGH_ADDR 0x0A
+#define IR_HIGH_ADDR 0x0C
+#define PROX_HIGH_ADDR 0x10
+
+#define MAX_SHDN_ENABLE 0x08
+#define MAX_SHDN_DISABLE 0x00
+
+#define MODE_SHIFT 4
+#define MODE_CLEAR_ONLY 0x0
+#define MODE_CLEAR_IR 0x1
+#define MODE_CRGB 0x2
+#define MODE_CLEAR_PROX 0x3
+#define MODE_ALL 0x4
+#define MODE_PROX_ONLY 0x5
+
+#define COMP_ENABLE 0x40
+
+#define AMB_PGA_1x 0x00
+#define AMB_PGA_4x 0x01
+#define AMB_PGA_16x 0x02
+#define AMB_PGA_256x 0x03
+
+#define LED_DRV_SHIFT 4
+#define LED_DRV_STRENGTH 110 /* mA */
+
+#define POWER_ON_DELAY 20 /* 20ms */
+
+#define MAX44005_SYSFS_SHOW(en, reg_addr, nbytes) \
+ do { \
+ int ret; \
+ int value; \
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev); \
+ struct max44005_chip *chip = iio_priv(indio_dev); \
+ if (!en) \
+ return sprintf(buf, "-1"); \
+ mutex_lock(&chip->lock); \
+ ret = max44005_read(chip, &value, reg_addr, nbytes); \
+ if (ret < 0) { \
+ mutex_unlock(&chip->lock); \
+ return sprintf(buf, "-1"); \
+ } \
+ mutex_unlock(&chip->lock); \
+ value &= 0x3FFFF; \
+ return sprintf(buf, "%d", value); \
+ } while (0); \
+
+#define CLEAR_ENABLED (chip->using_als)
+
+#define PROXIMITY_ENABLED (chip->using_proximity)
+
+
+enum {
+ CHIP = 0,
+ LED
+};
+
+static struct class *sensor_class;
+
+struct max44005_chip {
+ struct i2c_client *client;
+ struct mutex lock;
+
+ struct regulator *supply[2];
+ bool power_utilization[2];
+
+ bool using_als;
+ bool using_proximity;
+
+ bool is_standby;
+ int shutdown_complete;
+
+ u32 gain;
+ const char *als_resolution;
+};
+
+static int max44005_read(struct max44005_chip *chip, int *rval, u8 reg_addr,
+ int nbytes)
+{
+ u8 val[2];
+ int ret;
+
+ if (chip->supply[CHIP] && !regulator_is_enabled(chip->supply[CHIP]))
+ return -EINVAL;
+
+ if (chip->shutdown_complete)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_i2c_block_data(chip->client, reg_addr,
+ nbytes, val);
+
+ if (ret != nbytes) {
+ dev_err(&chip->client->dev, "[MAX44005] i2c_read_failed" \
+ "in func: %s\n", __func__);
+ if (ret < 0)
+ return ret;
+ return -EINVAL;
+ }
+
+ *rval = val[0];
+ if (nbytes == 2)
+ *rval = ((*rval) << 8) | val[1];
+ return 0;
+}
+
+static int max44005_write(struct max44005_chip *chip, u8 val, u8 reg_addr)
+{
+ int ret;
+
+ if (chip->supply[CHIP] && !regulator_is_enabled(chip->supply[CHIP]))
+ return -EINVAL;
+
+ if (chip->shutdown_complete)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(chip->client, reg_addr, val);
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "[MAX44005] i2c_write_failed" \
+ "in func: %s\n", __func__);
+ }
+ return ret;
+}
+
+/* assumes chip is power on */
+static void max44005_standby(struct max44005_chip *chip, bool shutdown)
+{
+ int ret = 0;
+
+ if (chip->is_standby == shutdown)
+ return;
+
+ if (shutdown == chip->power_utilization[CHIP])
+ return;
+
+ if (shutdown == false) {
+ ret = max44005_write(chip, MAX_SHDN_DISABLE,
+ INT_STATUS_REG_ADDR);
+ if (!ret)
+ chip->is_standby = false;
+ } else {
+ ret = max44005_write(chip, MAX_SHDN_ENABLE,
+ INT_STATUS_REG_ADDR);
+ if (!ret)
+ chip->is_standby = true;
+ }
+}
+
+static bool set_main_conf(struct max44005_chip *chip, int mode)
+{
+ return max44005_write(chip, mode << MODE_SHIFT,
+ MAIN_CONF_REG_ADDR) == 0;
+}
+
+/* current is in mA */
+static bool set_led_drive_strength(struct max44005_chip *chip, int cur)
+{
+ int ret = 0;
+ if (!chip->supply[LED])
+ goto finish;
+
+ if (cur && !chip->power_utilization[LED])
+ ret = regulator_enable(chip->supply[LED]);
+ else if (!cur && chip->power_utilization[LED])
+ ret = regulator_disable(chip->supply[LED]);
+
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "%s: regulator %s failed\n", __func__,
+ cur ? "enable" : "disable");
+ return false;
+ }
+finish:
+ chip->power_utilization[LED] = cur ? 1 : 0;
+ return max44005_write(chip, 0xA1, PROX_CONF_REG_ADDR) == 0;
+}
+
+static bool max44005_power(struct max44005_chip *chip, int power_on)
+{
+ int was_regulator_already_on = false;
+
+ if (power_on && chip->power_utilization[CHIP])
+ return true;
+
+ if (power_on) {
+ if (chip->supply[CHIP]) {
+ was_regulator_already_on =
+ regulator_is_enabled(chip->supply[CHIP]);
+ if (regulator_enable(chip->supply[CHIP]))
+ return false;
+ if (!was_regulator_already_on)
+ msleep(POWER_ON_DELAY);
+ }
+ chip->power_utilization[CHIP] = 1;
+
+ /* wakeup if still in shutdown state */
+ max44005_standby(chip, false);
+ return true;
+ }
+
+ /* power off request */
+ /* disable the power source as chip doesnot need it anymore */
+ if (chip->supply[CHIP] && chip->power_utilization[CHIP] &&
+ regulator_disable(chip->supply[CHIP]))
+ return false;
+ chip->power_utilization[CHIP] = 0;
+ /* chip doesnt utilize power now, power being
+ * supplied is being wasted, so put the device to standby
+ * to reduce wastage */
+ max44005_standby(chip, true);
+
+ return true;
+}
+
+/* assumes power is on */
+static bool max44005_restore_state(struct max44005_chip *chip)
+{
+ int ret = false;
+
+ if (PROXIMITY_ENABLED)
+ ret = set_led_drive_strength(chip, LED_DRV_STRENGTH);
+ else
+ ret = set_led_drive_strength(chip, 0);
+
+ if (!ret)
+ return false;
+
+ switch ((CLEAR_ENABLED << 1) | PROXIMITY_ENABLED) {
+ case 0:
+ ret = max44005_power(chip, false);
+ break;
+ case 1:
+ ret = set_main_conf(chip, MODE_PROX_ONLY);
+ break;
+ case 2:
+ ret = set_main_conf(chip, MODE_CRGB);
+ break;
+ case 3:
+ ret = set_main_conf(chip, MODE_CLEAR_PROX);
+ break;
+ }
+
+ return ret;
+}
+
+/* sysfs name begin */
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max44005_chip *chip = iio_priv(indio_dev);
+ return sprintf(buf, "%s\n", chip->client->name);
+}
+/* sysfs name end */
+
+/* amb clear begin */
+static ssize_t show_amb_clear_value(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ MAX44005_SYSFS_SHOW(CLEAR_ENABLED, AMB_CLEAR_HIGH_ADDR, 2);
+}
+
+static ssize_t show_amb_red_value(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ MAX44005_SYSFS_SHOW(CLEAR_ENABLED, AMB_RED_HIGH_ADDR, 2);
+}
+
+static ssize_t show_amb_green_value(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ MAX44005_SYSFS_SHOW(CLEAR_ENABLED, AMB_GREEN_HIGH_ADDR, 2);
+}
+
+static ssize_t show_amb_blue_value(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ MAX44005_SYSFS_SHOW(CLEAR_ENABLED, AMB_BLUE_HIGH_ADDR, 2);
+}
+
+static ssize_t show_ir_value(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ MAX44005_SYSFS_SHOW(CLEAR_ENABLED, IR_HIGH_ADDR, 2);
+}
+
+static ssize_t amb_clear_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ u32 lval;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max44005_chip *chip = iio_priv(indio_dev);
+
+ if (kstrtou32(buf, 10, &lval))
+ return -EINVAL;
+
+ if (lval && (lval != 1))
+ return -EINVAL;
+
+ if (lval == chip->using_als)
+ return count;
+
+ mutex_lock(&chip->lock);
+
+ if (lval) {
+ if (!max44005_power(chip, true))
+ goto fail;
+
+ if (max44005_write(chip, chip->gain, AMB_CONF_REG_ADDR))
+ goto fail;
+
+ if (!PROXIMITY_ENABLED &&
+ set_main_conf(chip, MODE_CRGB))
+ goto success;
+
+ /* if clear not enabled and LED enabled
+ * change the mode to CLEAR+LED enabled*/
+ if (PROXIMITY_ENABLED &&
+ set_main_conf(chip, MODE_CLEAR_PROX))
+ goto success;
+ /* CLEAR channel remains intact due to lost communication */
+ goto fail;
+ } else {
+ if (PROXIMITY_ENABLED && set_main_conf(chip, MODE_PROX_ONLY))
+ goto success;
+
+ if (!PROXIMITY_ENABLED && max44005_power(chip, false))
+ goto success;
+
+ goto fail;
+ }
+
+success:
+ chip->using_als = lval;
+ mutex_unlock(&chip->lock);
+ return count;
+fail:
+ mutex_unlock(&chip->lock);
+ return -EBUSY;
+}
+/* amb clear end */
+
+/* amb LED begin */
+static ssize_t show_prox_value(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ MAX44005_SYSFS_SHOW(PROXIMITY_ENABLED, PROX_HIGH_ADDR, 2);
+}
+
+static ssize_t prox_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ u32 lval;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max44005_chip *chip = iio_priv(indio_dev);
+
+ if (kstrtou32(buf, 10, &lval))
+ return -EINVAL;
+
+ if (lval && (lval != 1))
+ return -EINVAL;
+
+ if (lval == PROXIMITY_ENABLED)
+ return count;
+
+ mutex_lock(&chip->lock);
+ if (lval) {
+ if (!max44005_power(chip, true))
+ goto fail;
+
+ if (!set_led_drive_strength(chip, LED_DRV_STRENGTH))
+ goto fail;
+
+ if (CLEAR_ENABLED && set_main_conf(chip, MODE_CLEAR_PROX))
+ goto success;
+
+ if (!CLEAR_ENABLED && set_main_conf(chip, MODE_PROX_ONLY))
+ goto success;
+
+ goto fail;
+ } else {
+ /* power off if no other channel is active */
+ if (!CLEAR_ENABLED && max44005_power(chip, false))
+ goto success;
+
+ if (CLEAR_ENABLED && set_led_drive_strength(chip, 0) &&
+ set_main_conf(chip, MODE_CLEAR_ONLY))
+ goto success;
+
+ goto fail;
+ }
+
+success:
+ chip->using_proximity = lval;
+ mutex_unlock(&chip->lock);
+ return count;
+fail:
+ mutex_unlock(&chip->lock);
+ return -EBUSY;
+}
+/* amb LED end */
+
+
+/* amb LED begin */
+static ssize_t show_resolution(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct max44005_chip *chip = iio_priv(indio_dev);
+ if (chip->als_resolution)
+ return sprintf(buf, chip->als_resolution);
+ return sprintf(buf, "1.75");
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+static IIO_DEVICE_ATTR(amb_clear, S_IRUGO | S_IWUSR, show_amb_clear_value,
+ amb_clear_enable, 0);
+static IIO_DEVICE_ATTR(red, S_IRUGO, show_amb_red_value,
+ NULL, 0);
+static IIO_DEVICE_ATTR(green, S_IRUGO, show_amb_green_value,
+ NULL, 0);
+static IIO_DEVICE_ATTR(blue, S_IRUGO, show_amb_blue_value,
+ NULL, 0);
+static IIO_DEVICE_ATTR(ir, S_IRUGO, show_ir_value,
+ NULL, 0);
+static IIO_DEVICE_ATTR(proximity, S_IRUGO | S_IWUSR, show_prox_value,
+ prox_enable, 0);
+static IIO_DEVICE_ATTR(als_resolution, S_IRUGO | S_IWUSR, show_resolution,
+ NULL , 0);
+
+/* sysfs attr */
+static struct attribute *max44005_iio_attr[] = {
+ &iio_dev_attr_name.dev_attr.attr,
+ &iio_dev_attr_amb_clear.dev_attr.attr,
+ &iio_dev_attr_proximity.dev_attr.attr,
+ &iio_dev_attr_red.dev_attr.attr,
+ &iio_dev_attr_green.dev_attr.attr,
+ &iio_dev_attr_blue.dev_attr.attr,
+ &iio_dev_attr_ir.dev_attr.attr,
+ &iio_dev_attr_als_resolution.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group max44005_iio_attr_group = {
+ .attrs = max44005_iio_attr,
+};
+
+static const struct iio_info max44005_iio_info = {
+ .attrs = &max44005_iio_attr_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int max44005_sysfs_init(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct device *class_device;
+
+ sensor_class = class_create(THIS_MODULE, "sensors");
+ if (!sensor_class) {
+ dev_err(&client->dev, "create /sys/class/sensors fails\n");
+ return -EINVAL;
+ }
+
+ class_device = device_create(sensor_class, &indio_dev->dev,
+ 0, NULL, "%s", "light");
+ if (!class_device) {
+ dev_err(&client->dev, "create ...sensors/light fails\n");
+ class_destroy(sensor_class);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max44005_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct max44005_chip *chip;
+ int err;
+ const char *prop_value = NULL;
+ u32 gain = 0;
+
+ if (client->dev.of_node) {
+ of_property_read_u32(client->dev.of_node,
+ "maxim,gain", &gain);
+ err = of_property_read_string(client->dev.of_node,
+ "maxim,als-resolution", &prop_value);
+ }
+
+ indio_dev = iio_device_alloc(sizeof(struct max44005_chip));
+ if (indio_dev == NULL) {
+ dev_err(&client->dev, "iio allocation fails\n");
+ return -ENOMEM;
+ }
+
+ chip = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ chip->client = client;
+ mutex_init(&chip->lock);
+
+ indio_dev->info = &max44005_iio_info;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(&client->dev, "iio registration fails\n");
+ goto free_iio_dev;
+ }
+
+ err = max44005_sysfs_init(client);
+ if (err) {
+ dev_err(&client->dev, "max44005 sysfs init fails\n");
+ goto unregister_iio_dev;
+ }
+
+ chip->supply[CHIP] = regulator_get(&client->dev, "vcc");
+
+ if (IS_ERR(chip->supply[CHIP])) {
+ dev_err(&client->dev, "could not get vcc regulator\n");
+ err = PTR_ERR(chip->supply[CHIP]);
+ goto unregister_sysfs;
+ }
+
+ /* MAX44006 ALS does not use vled. */
+ if (of_device_is_compatible(client->dev.of_node, "maxim,max44006"))
+ goto finish;
+
+ chip->supply[LED] = regulator_get(&client->dev, "vled");
+
+ if (IS_ERR(chip->supply[LED])) {
+ dev_err(&client->dev, "could not get vled regulator\n");
+ err = PTR_ERR(chip->supply[LED]);
+ goto release_regulator;
+ }
+
+finish:
+ switch (gain) {
+ case AMB_PGA_1x:
+ case AMB_PGA_4x:
+ case AMB_PGA_16x:
+ case AMB_PGA_256x:
+ break;
+ default:
+ gain = AMB_PGA_256x;
+ };
+ chip->gain = gain;
+ chip->als_resolution = prop_value;
+
+ mutex_lock(&chip->lock);
+ max44005_power(chip, false);
+ mutex_unlock(&chip->lock);
+
+ chip->using_als = false;
+ chip->using_proximity = false;
+ chip->shutdown_complete = 0;
+ dev_info(&client->dev, "%s() success\n", __func__);
+ return 0;
+
+release_regulator:
+ regulator_put(chip->supply[CHIP]);
+unregister_sysfs:
+ device_destroy(sensor_class, 0);
+ class_destroy(sensor_class);
+unregister_iio_dev:
+ iio_device_unregister(indio_dev);
+free_iio_dev:
+ iio_device_free(indio_dev);
+ mutex_destroy(&chip->lock);
+ return err;
+
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max44005_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct max44005_chip *chip = iio_priv(indio_dev);
+ mutex_lock(&chip->lock);
+ max44005_power(chip, false);
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static int max44005_resume(struct device *dev)
+{
+ int ret = 0;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct max44005_chip *chip = iio_priv(indio_dev);
+ mutex_lock(&chip->lock);
+ max44005_power(chip, true);
+ max44005_restore_state(chip);
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(max44005_pm_ops, max44005_suspend, max44005_resume);
+#define MAX44005_PM_OPS (&max44005_pm_ops)
+#else
+#define MAX44005_PM_OPS NULL
+#endif
+
+static int max44005_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct max44005_chip *chip = iio_priv(indio_dev);
+ dev_dbg(&client->dev, "%s()\n", __func__);
+ if (chip->supply[CHIP])
+ regulator_put(chip->supply[CHIP]);
+ mutex_destroy(&chip->lock);
+ device_destroy(sensor_class, 0);
+ class_destroy(sensor_class);
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+ return 0;
+}
+
+static void max44005_shutdown(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct max44005_chip *chip = iio_priv(indio_dev);
+ mutex_lock(&chip->lock);
+ if (chip->supply[CHIP])
+ regulator_put(chip->supply[CHIP]);
+ chip->shutdown_complete = 1;
+ mutex_unlock(&chip->lock);
+ mutex_destroy(&chip->lock);
+ device_destroy(sensor_class, 0);
+ class_destroy(sensor_class);
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+}
+
+static const struct i2c_device_id max44005_id[] = {
+ {"max44005", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max44005_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id max44005_of_match[] = {
+ {.compatible = "maxim,max44005", },
+ {.compatible = "maxim,max44006", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max44005_of_match);
+#endif
+
+static struct i2c_driver max44005_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max44005",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(max44005_of_match),
+ .pm = MAX44005_PM_OPS,
+ },
+ .probe = max44005_probe,
+ .shutdown = max44005_shutdown,
+ .remove = max44005_remove,
+ .id_table = max44005_id,
+};
+
+static int __init max44005_init(void)
+{
+ return i2c_add_driver(&max44005_driver);
+}
+
+static void __exit max44005_exit(void)
+{
+ i2c_del_driver(&max44005_driver);
+}
+
+module_init(max44005_init);
+module_exit(max44005_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MAX44005 Driver");
+MODULE_AUTHOR("Sri Krishna chowdary <schowdary@nvidia.com>");
diff --git a/drivers/staging/iio/light/stm8t143.c b/drivers/staging/iio/light/stm8t143.c
new file mode 100644
index 000000000000..b144cdee2a8d
--- /dev/null
+++ b/drivers/staging/iio/light/stm8t143.c
@@ -0,0 +1,392 @@
+/*
+ * A iio driver for the light sensor STM8T143.
+ *
+ * IIO Light driver for monitoring proximity.
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/of_gpio.h>
+
+
+#define NUM_REGULATORS 1
+#define NUM_CHANNELS 1
+
+enum channels {
+ PROX,
+};
+
+enum channel_state {
+ CHIP_POWER_OFF,
+ CHIP_POWER_ON,
+};
+
+struct stm8t143_chip {
+ struct platform_device *pdev;
+ int pout_gpio;
+ int tout_gpio;
+ struct regulator_bulk_data consumers[NUM_REGULATORS];
+
+ u8 state[NUM_CHANNELS];
+};
+
+
+/* device's registration with iio to facilitate user operations */
+static ssize_t stm8t143_chan_regulator_enable(
+ struct iio_dev *indio_dev, uintptr_t private,
+ struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ u8 enable;
+ int ret = 0;
+ struct stm8t143_chip *chip = iio_priv(indio_dev);
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (enable == (chip->state[PROX] != CHIP_POWER_OFF))
+ return 1;
+
+ if (chip->consumers[0].supply == NULL)
+ goto success;
+
+ if (enable)
+ ret = regulator_bulk_enable(1, chip->consumers);
+ else
+ ret = regulator_bulk_disable(1, chip->consumers);
+
+ if (ret) {
+ dev_err(&chip->pdev->dev,
+ "devname:%s func:%s line:%d err:_stm8t143_register_read fails\n",
+ chip->pdev->name, __func__, __LINE__);
+ goto fail;
+ }
+
+success:
+ chip->state[PROX] = enable;
+fail:
+ return ret ? ret : 1;
+}
+
+/*
+ * chan_regulator_enable is used to enable regulators used by
+ * particular channel.
+ * chan_enable actually configures various registers to activate
+ * a particular channel.
+ */
+static const struct iio_chan_spec_ext_info stm8t143_ext_info[] = {
+ {
+ .name = "regulator_enable",
+ .shared = true,
+ .write = stm8t143_chan_regulator_enable,
+ },
+ {
+ },
+};
+
+static const struct iio_chan_spec stm8t143_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .ext_info = stm8t143_ext_info,
+ },
+};
+
+static int stm8t143_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct stm8t143_chip *chip = iio_priv(indio_dev);
+
+ /* get_chan_num != -EINVAL <=> get_data_reg != -EINVAL */
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (chip->state[PROX] != CHIP_POWER_ON)
+ return -EINVAL;
+
+ *val = !gpio_get_value(chip->tout_gpio);
+ pr_debug("proximity value:%d\n", *val);
+
+ return IIO_VAL_INT;
+}
+
+static IIO_CONST_ATTR(vendor, "STMicroelectronics");
+static IIO_CONST_ATTR(in_proximity_integration_time,
+ "500000000"); /* 500 msec */
+static IIO_CONST_ATTR(in_proximity_max_range, "20"); /* cm */
+static IIO_CONST_ATTR(in_proximity_power_consumed,
+ "38"); /* milli Watt */
+
+static struct attribute *stm8t143_attrs[] = {
+ &iio_const_attr_vendor.dev_attr.attr,
+ &iio_const_attr_in_proximity_integration_time.dev_attr.attr,
+ &iio_const_attr_in_proximity_max_range.dev_attr.attr,
+ &iio_const_attr_in_proximity_power_consumed.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group stm8t143_attr_group = {
+ .name = "stm8t143",
+ .attrs = stm8t143_attrs
+};
+
+/*
+ * read_raw is used to report a channel's data to user
+ * in non SI units
+ */
+static const struct iio_info stm8t143_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &stm8t143_read_raw,
+ .attrs = &stm8t143_attr_group,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int stm8t143_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct stm8t143_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (chip->consumers[0].supply && chip->state[PROX])
+ ret |= regulator_bulk_disable(1, chip->consumers);
+
+ if (ret) {
+ dev_err(&chip->pdev->dev,
+ "devname:%s func:%s line:%d err:regulator_disable fails\n",
+ chip->pdev->name, __func__, __LINE__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int stm8t143_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct stm8t143_chip *chip = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (chip->consumers[0].supply && chip->state[PROX])
+ ret |= regulator_bulk_enable(1, chip->consumers);
+
+ if (ret) {
+ dev_err(&chip->pdev->dev,
+ "devname:%s func:%s line:%d err:regulator_enable fails\n",
+ chip->pdev->name, __func__, __LINE__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm8t143_pm_ops, stm8t143_suspend, stm8t143_resume);
+#define STM8T143_PM_OPS (&stm8t143_pm_ops)
+#else
+#define STM8T143_PM_OPS NULL
+#endif
+
+/* parses DT for gpio information */
+static int stm8t143_gpio(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct stm8t143_chip *chip = iio_priv(indio_dev);
+ int *pout_gpio = &chip->pout_gpio;
+ int *tout_gpio = &chip->tout_gpio;
+ int ret;
+
+ *pout_gpio = of_get_named_gpio(np, "pout-gpio", 0);
+ if (IS_ERR_VALUE(*pout_gpio)) {
+ pr_err("could not pout gpio from DT");
+ return *pout_gpio;
+ }
+
+ *tout_gpio = of_get_named_gpio(np, "tout-gpio", 0);
+ if (IS_ERR_VALUE(*tout_gpio)) {
+ pr_err("could not tout gpio from DT");
+ return *tout_gpio;
+ }
+
+ /* essentially we are not considering the pout as a input at all */
+ ret = gpio_request_one(*pout_gpio,
+ GPIOF_OUT_INIT_LOW,
+ "pout_gpio");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "GPIO request for gpio %d failed %d\n",
+ *pout_gpio, ret);
+ return ret;
+ }
+
+ ret = gpio_request_one(*tout_gpio,
+ GPIOF_IN,
+ "tout_gpio");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "GPIO request for gpio %d failed %d\n",
+ *tout_gpio, ret);
+ gpio_free(*pout_gpio);
+ return ret;
+ }
+ return 0;
+}
+
+static void init_stm8t143_regulators(struct platform_device *pdev)
+{
+ int i, ret = 0;
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct stm8t143_chip *chip = iio_priv(indio_dev);
+
+ struct regulator_bulk_data stm8t143_consumers[] = {
+ {
+ .supply = "vdd",
+ },
+ };
+
+ if (!ARRAY_SIZE(stm8t143_consumers))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(stm8t143_consumers); i++) {
+ chip->consumers[i].supply = stm8t143_consumers[i].supply;
+ chip->consumers[i].consumer = stm8t143_consumers[i].consumer;
+ chip->consumers[i].ret = stm8t143_consumers[i].ret;
+ }
+
+ ret = devm_regulator_bulk_get(&pdev->dev,
+ ARRAY_SIZE(stm8t143_consumers),
+ chip->consumers);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "devname:%s func:%s line:%d regulators not found\n",
+ pdev->name, __func__, __LINE__);
+ for (i = 0; i < ARRAY_SIZE(stm8t143_consumers); i++) {
+ chip->consumers[i].supply = NULL;
+ chip->consumers[i].consumer = NULL;
+ chip->consumers[i].ret = 0;
+ }
+ }
+}
+
+static int stm8t143_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct stm8t143_chip *chip;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_device_alloc(sizeof(*chip));
+ if (indio_dev == NULL) {
+ dev_err(&pdev->dev,
+ "devname:%s func:%s line:%d err:iio_device_alloc fails\n",
+ pdev->name, __func__, __LINE__);
+ return -ENOMEM;
+ }
+ chip = iio_priv(indio_dev);
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = stm8t143_gpio(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "devname:%s func:%s line:%d err:gpio_init fails\n",
+ pdev->name, __func__, __LINE__);
+ goto err_gpio_irq_init;
+ }
+
+ init_stm8t143_regulators(pdev);
+
+ indio_dev->info = &stm8t143_iio_info;
+ indio_dev->channels = stm8t143_channels;
+ indio_dev->num_channels = 1;
+ indio_dev->name = pdev->name;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "devname:%s func:%s line:%d err:iio_device_register fails\n",
+ pdev->name, __func__, __LINE__);
+ goto err_iio_register;
+ }
+
+ chip->state[PROX] = CHIP_POWER_OFF;
+
+ dev_info(&pdev->dev, "devname:%s func:%s line:%d probe success\n",
+ pdev->name, __func__, __LINE__);
+
+ return 0;
+
+err_iio_register:
+ gpio_free(chip->tout_gpio);
+ gpio_free(chip->pout_gpio);
+err_gpio_irq_init:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int stm8t143_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct stm8t143_chip *chip = iio_priv(indio_dev);
+
+ if (chip->consumers[0].supply && chip->state[PROX])
+ regulator_bulk_disable(1, chip->consumers);
+
+ gpio_free(chip->tout_gpio);
+ gpio_free(chip->pout_gpio);
+
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+ return 0;
+}
+
+static void stm8t143_shutdown(struct platform_device *pdev)
+{
+ stm8t143_remove(pdev);
+}
+
+static const struct of_device_id stm8t143_of_match[] = {
+ { .compatible = "stm,stm8t143", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, stm8t143_of_match);
+
+static struct platform_driver stm8t143_driver = {
+ .driver = {
+ .name = "stm8t143",
+ .owner = THIS_MODULE,
+ .of_match_table = stm8t143_of_match,
+ .pm = STM8T143_PM_OPS,
+ },
+ .probe = stm8t143_probe,
+ .remove = stm8t143_remove,
+ .shutdown = stm8t143_shutdown,
+};
+
+module_platform_driver(stm8t143_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("stm8t143 Driver");
+MODULE_AUTHOR("Sri Krishna chowdary <schowdary@nvidia.com>");
diff --git a/drivers/staging/iio/light/tcs3772.c b/drivers/staging/iio/light/tcs3772.c
new file mode 100644
index 000000000000..f71aba45a67b
--- /dev/null
+++ b/drivers/staging/iio/light/tcs3772.c
@@ -0,0 +1,695 @@
+/*
+ * A iio driver for the light sensor TCS3772.
+ *
+ * IIO Light driver for monitoring ambient light intensity in lux and proximity
+ * ir.
+ *
+ * Copyright (c) 2013 - 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/notifier.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define TCS3772_REG_CONFIGURE 0x80
+#define TCS3772_REG_PPULSE 0x8E
+#define TCS3772_REG_STATUS 0x93
+#define TCS3772_REG_ALS_DATA_L 0x94
+#define TCS3772_REG_ALS_DATA_H 0x95
+#define TCS3772_REG_PROX_DATA_L 0x9C
+#define TCS3772_REG_PROX_DATA_H 0x9D
+#define TCS3772_REG_MAX 0x9D
+
+#define CONFIGURE_SHTDWN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define CONFIGURE_SHTDWN_EN 0x0
+
+#define CONFIGURE_ALS_MASK (BIT(0) | BIT(1))
+#define CONFIGURE_ALS_MASK_PROX_ON BIT(1)
+#define CONFIGURE_ALS_EN 0x3
+
+#define CONFIGURE_PROX_MASK (BIT(0) | BIT(2))
+#define CONFIGURE_PROX_MASK_ALS_ON BIT(2)
+#define CONFIGURE_PROX_EN 0x5
+
+#define STATUS_ALS_VALID 0x1
+#define STATUS_PROX_VALID 0x2
+
+#define PPULSE_MASK 0xff
+#define PPULSE_NUM 0x1
+
+#define TCS3772_POLL_DELAY 100 /* mSec */
+#define I2C_MAX_TIMEOUT msecs_to_jiffies(20) /* 20 mSec */
+
+#define TCS3772_N_CHANNELS 2
+
+#define get_chan_num(type) (type == IIO_LIGHT ? ALS : \
+ type == IIO_PROXIMITY ? PROX : -EINVAL)
+
+#define get_data_reg(type) (type == IIO_LIGHT ? \
+ TCS3772_REG_ALS_DATA_L : \
+ type == IIO_PROXIMITY ? \
+ TCS3772_REG_PROX_DATA_L : -EINVAL)
+#define get_valid_mask(type) (type == IIO_LIGHT ? \
+ STATUS_ALS_VALID : \
+ type == IIO_PROXIMITY ? \
+ STATUS_PROX_VALID : -EINVAL)
+
+enum channels {
+ ALS,
+ PROX,
+};
+
+enum channel_state {
+ CHIP_POWER_OFF,
+ CHIP_POWER_ON_CHAN_OFF,
+ CHIP_POWER_ON_CHAN_ON,
+};
+
+struct tcs3772_chip {
+ struct i2c_client *client;
+ struct i2c_device_id *id;
+ struct mutex lock;
+ struct regulator_bulk_data *consumers;
+ struct notifier_block regulator_nb;
+ wait_queue_head_t i2c_wait_queue;
+ int i2c_xfer_ready;
+ struct regmap *regmap;
+
+ u8 state[TCS3772_N_CHANNELS];
+ int shutdown_complete;
+};
+
+/* regulators used by the device */
+static struct regulator_bulk_data tcs3772_consumers[] = {
+ {
+ .supply = "vdd",
+ },
+};
+
+/* device's regmap configuration for i2c communication */
+/* non cacheable registers*/
+bool tcs3772_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return (reg >= TCS3772_REG_ALS_DATA_L) &&
+ (reg <= TCS3772_REG_PROX_DATA_H);
+}
+
+static const struct reg_default tcs3772_reg_defaults[] = {
+ {
+ .reg = TCS3772_REG_CONFIGURE,
+ .def = 0x00,
+ },
+ {
+ .reg = TCS3772_REG_PPULSE,
+ .def = 0x00,
+ },
+};
+
+static const struct regmap_config tcs3772_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = &tcs3772_volatile_reg,
+ .max_register = TCS3772_REG_MAX,
+ .reg_defaults = tcs3772_reg_defaults,
+ .num_reg_defaults = 2,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/* device's read/write functionality */
+static int _tcs3772_register_read(struct tcs3772_chip *chip, int reg_l,
+ int nreg, int *val)
+{
+ int ret;
+ int temp, i = 0;
+
+ if (!chip->regmap)
+ return -ENODEV;
+
+ ret = wait_event_timeout(chip->i2c_wait_queue,
+ chip->i2c_xfer_ready, I2C_MAX_TIMEOUT);
+ if (!ret) {
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg: device not ready for i2c xfer\n",
+ chip->id->name, __func__, __LINE__);
+ return -ETIMEDOUT;
+ }
+
+ mutex_lock(&chip->lock);
+ *val = 0;
+ while (i < nreg) {
+ ret = regmap_read(chip->regmap, reg_l + i, &temp);
+ if (ret)
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d i:%d" \
+ "error_msg:regmap_read fails\n",
+ chip->id->name, __func__, __LINE__, i);
+ *val |= temp << (i * 8);
+ i++;
+ }
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+static int _tcs3772_register_write(struct tcs3772_chip *chip, int reg, int mask,
+ int val)
+{
+ int ret;
+
+ if (!chip->regmap)
+ return -ENODEV;
+
+ ret = wait_event_timeout(chip->i2c_wait_queue,
+ chip->i2c_xfer_ready, I2C_MAX_TIMEOUT);
+ if (!ret) {
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg: device not ready for i2c xfer\n",
+ chip->id->name, __func__, __LINE__);
+ return -ETIMEDOUT;
+ }
+
+ mutex_lock(&chip->lock);
+ ret = regmap_update_bits(chip->regmap, reg, mask, val);
+ if (ret)
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:regmap_write fails\n",
+ chip->id->name, __func__, __LINE__);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+/* sync the device's registers with cache after power up during resume */
+static int _tcs3772_register_sync(struct tcs3772_chip *chip)
+{
+ int ret;
+
+ if (!chip->regmap)
+ return -ENODEV;
+
+ ret = wait_event_timeout(chip->i2c_wait_queue,
+ chip->i2c_xfer_ready, I2C_MAX_TIMEOUT);
+ if (!ret) {
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg: device not ready for i2c xfer\n",
+ chip->id->name, __func__, __LINE__);
+ return -ETIMEDOUT;
+ }
+
+ mutex_lock(&chip->lock);
+ regcache_mark_dirty(chip->regmap);
+ ret = regcache_sync(chip->regmap);
+ if (ret)
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:regmap_write fails\n",
+ chip->id->name, __func__, __LINE__);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+/* device's registration with iio to facilitate user operations */
+static ssize_t tcs3772_chan_regulator_enable(struct iio_dev *indio_dev,
+ uintptr_t private, struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ u8 enable;
+ int ret = 0;
+ struct tcs3772_chip *chip = iio_priv(indio_dev);
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (get_chan_num(chan->type) == -EINVAL)
+ return -EINVAL;
+
+ if (enable == (chip->state[get_chan_num(chan->type)] != CHIP_POWER_OFF))
+ return 1;
+
+ if (!chip->consumers)
+ goto success;
+
+ if (enable)
+ ret = regulator_bulk_enable(1, chip->consumers);
+ else
+ ret = regulator_bulk_disable(1, chip->consumers);
+
+ if (ret) {
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:_tcs3772_register_read fails\n",
+ chip->id->name, __func__, __LINE__);
+ goto fail;
+ }
+
+success:
+ chip->state[get_chan_num(chan->type)] = enable;
+fail:
+ return ret ? ret : 1;
+}
+
+static ssize_t tcs3772_chan_enable(struct iio_dev *indio_dev,
+ uintptr_t private, struct iio_chan_spec const *chan,
+ const char *buf, size_t len)
+{
+ u8 enable;
+ int ret;
+ struct tcs3772_chip *chip = iio_priv(indio_dev);
+ int state = chip->state[get_chan_num(chan->type)];
+
+ if (kstrtou8(buf, 10, &enable))
+ return -EINVAL;
+
+ if ((enable != 0) && (enable != 1))
+ return -EINVAL;
+
+ if (get_chan_num(chan->type) == -EINVAL)
+ return -EINVAL;
+
+ if (state == CHIP_POWER_OFF) {
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:please enable regulator first\n",
+ chip->id->name, __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (!((enable && (state == CHIP_POWER_ON_CHAN_OFF)) ||
+ (!enable && (state == CHIP_POWER_ON_CHAN_ON))))
+ return -EINVAL;
+
+ /* a small optimization*/
+ if (enable == (state - 1))
+ goto success;
+
+ if (chan->type == IIO_LIGHT) {
+ if (enable) {
+ ret = _tcs3772_register_write(chip,
+ TCS3772_REG_CONFIGURE,
+ CONFIGURE_ALS_MASK,
+ CONFIGURE_ALS_EN);
+ if (ret)
+ return ret;
+ } else {
+ if (chip->state[PROX] == CHIP_POWER_ON_CHAN_ON) {
+ ret = _tcs3772_register_write(chip,
+ TCS3772_REG_CONFIGURE,
+ CONFIGURE_ALS_MASK_PROX_ON,
+ !CONFIGURE_ALS_EN);
+ if (ret)
+ return ret;
+ } else {
+ ret = _tcs3772_register_write(chip,
+ TCS3772_REG_CONFIGURE,
+ CONFIGURE_ALS_MASK,
+ !CONFIGURE_ALS_EN);
+ if (ret)
+ return ret;
+ }
+ }
+ } else {
+ /* chan->type == IIO_PROXIMITY */
+ if (enable) {
+ ret = _tcs3772_register_write(chip,
+ TCS3772_REG_CONFIGURE,
+ CONFIGURE_PROX_MASK,
+ CONFIGURE_PROX_EN);
+ if (ret)
+ return ret;
+ ret = _tcs3772_register_write(chip, TCS3772_REG_PPULSE,
+ PPULSE_MASK,
+ PPULSE_NUM);
+ if (ret)
+ return ret;
+ } else {
+ if (chip->state[ALS] == CHIP_POWER_ON_CHAN_ON) {
+ ret = _tcs3772_register_write(chip,
+ TCS3772_REG_CONFIGURE,
+ CONFIGURE_PROX_MASK_ALS_ON,
+ !CONFIGURE_PROX_EN);
+ if (ret)
+ return ret;
+ } else {
+ ret = _tcs3772_register_write(chip,
+ TCS3772_REG_CONFIGURE,
+ CONFIGURE_PROX_MASK,
+ !CONFIGURE_PROX_EN);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+success:
+ /* success on enable = 1 => state = CHIP_POWER_ON_CHAN_ON (2)
+ * success on enable = 0 => state = CHIP_POWER_ON_CHAN_OFF (1)
+ * from enum channel_state. Hence a small optimization */
+ chip->state[get_chan_num(chan->type)] = enable + 1;
+ return ret ? ret : 1;
+}
+
+/* chan_regulator_enable is used to enable regulators used by
+ * particular channel.
+ * chan_enable actually configures various registers to activate
+ * a particular channel.
+ */
+static const struct iio_chan_spec_ext_info tcs3772_ext_info[] = {
+ {
+ .name = "regulator_enable",
+ .shared = true,
+ .write = tcs3772_chan_regulator_enable,
+ },
+ {
+ .name = "enable",
+ .shared = true,
+ .write = tcs3772_chan_enable,
+ },
+ {
+ },
+};
+
+static const struct iio_chan_spec tcs3772_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = IIO_CHAN_INFO_RAW,
+ .ext_info = tcs3772_ext_info,
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = IIO_CHAN_INFO_RAW,
+ .ext_info = tcs3772_ext_info,
+ },
+};
+
+static int tcs3772_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct tcs3772_chip *chip = iio_priv(indio_dev);
+ int ret;
+ int value;
+
+ /* get_chan_num != -EINVAL <=> get_data_reg != -EINVAL */
+ if (get_chan_num(chan->type) == -EINVAL)
+ return -EINVAL;
+
+ if (chip->state[get_chan_num(chan->type)] != CHIP_POWER_ON_CHAN_ON)
+ return -EINVAL;
+
+ do {
+ ret = _tcs3772_register_read(chip, TCS3772_REG_STATUS, 1,
+ &value);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d " \
+ "error_msg:_tcs3772_register_read fails\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+ }
+ msleep(TCS3772_POLL_DELAY);
+ } while (!(value | get_valid_mask(chan->type)));
+
+ ret = _tcs3772_register_read(chip, get_data_reg(chan->type), 2, &value);
+ if (ret)
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:_tcs3772_register_read fails\n",
+ chip->id->name, __func__, __LINE__);
+
+ if (!ret) {
+ *val = value;
+ ret = IIO_VAL_INT;
+ }
+ return ret;
+}
+
+/* read_raw is used to report a channel's data to user
+ * in non SI units
+ */
+static const struct iio_info tcs3772_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &tcs3772_read_raw,
+};
+
+/* chip's power management helpers */
+static int tcs3772_activate_standby_mode(struct tcs3772_chip *chip)
+{
+ int ret;
+ ret = _tcs3772_register_write(chip, TCS3772_REG_CONFIGURE,
+ CONFIGURE_SHTDWN_MASK,
+ CONFIGURE_SHTDWN_EN);
+ return 0;
+}
+
+/* this detects the regulator enable/disable event and puts
+ * the device to low power state if this device does not use the regulator */
+static int tcs3772_power_manager(struct notifier_block *regulator_nb,
+ unsigned long event, void *v)
+{
+ struct tcs3772_chip *chip;
+
+ chip = container_of(regulator_nb, struct tcs3772_chip, regulator_nb);
+
+ if (event & (REGULATOR_EVENT_POST_ENABLE |
+ REGULATOR_EVENT_OUT_POSTCHANGE)) {
+ chip->i2c_xfer_ready = 1;
+ tcs3772_activate_standby_mode(chip);
+ } else if (event & (REGULATOR_EVENT_DISABLE |
+ REGULATOR_EVENT_FORCE_DISABLE)) {
+ chip->i2c_xfer_ready = 0;
+ }
+ return NOTIFY_OK;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tcs3772_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct tcs3772_chip *chip = iio_priv(indio_dev);
+ int ret = 0, i;
+
+ if (!chip->consumers)
+ return 0;
+
+ /* assumes all other devices stop using this regulator */
+ for (i = 0; i < TCS3772_N_CHANNELS; i++)
+ if (chip->state[i] != CHIP_POWER_OFF)
+ ret |= regulator_bulk_disable(1, chip->consumers);
+
+ if (ret) {
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:regulator_bulk_disable fails\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+ }
+
+ if (regulator_is_enabled(chip->consumers[0].consumer))
+ ret = tcs3772_activate_standby_mode(chip);
+ if (ret)
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:tcs3772_activate_standby fails\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+}
+
+static int tcs3772_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct tcs3772_chip *chip = iio_priv(indio_dev);
+ int ret = 0, i;
+
+ for (i = 0; i < TCS3772_N_CHANNELS; i++)
+ if (chip->state[i] != CHIP_POWER_OFF)
+ ret |= regulator_bulk_enable(1, tcs3772_consumers);
+
+ if (ret) {
+ dev_err(&chip->client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:regulator_bulk_enable fails\n",
+ chip->id->name, __func__, __LINE__);
+ return ret;
+ }
+
+ if (chip->state[ALS] == CHIP_POWER_ON_CHAN_ON ||
+ chip->state[PROX] == CHIP_POWER_ON_CHAN_ON) {
+ ret = _tcs3772_register_sync(chip);
+ if (ret)
+ dev_err(&chip->client->dev,
+ "idname:%s func:%s line:%d " \
+ "error_msg:restore_state fails\n",
+ chip->id->name, __func__, __LINE__);
+ }
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(tcs3772_pm_ops, tcs3772_suspend, tcs3772_resume);
+#define TCS3772_PM_OPS (&tcs3772_pm_ops)
+#else
+#define TCS3772_PM_OPS NULL
+#endif
+
+/* device's i2c registration */
+static int tcs3772_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct tcs3772_chip *chip;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+
+ indio_dev = iio_device_alloc(sizeof(*chip));
+ if (indio_dev == NULL) {
+ dev_err(&client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:iio_device_alloc fails\n",
+ id->name, __func__, __LINE__);
+ return -ENOMEM;
+ }
+ chip = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ chip->client = client;
+ mutex_init(&chip->lock);
+
+ regmap = devm_regmap_init_i2c(client, &tcs3772_regmap_config);
+ if (IS_ERR_OR_NULL(regmap)) {
+ dev_err(&client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:devm_regmap_init_i2c fails\n",
+ id->name, __func__, __LINE__);
+ return -ENOMEM;
+ }
+ chip->regmap = regmap;
+
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(tcs3772_consumers),
+ tcs3772_consumers);
+ if (ret)
+ dev_err(&client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:regulator_get fails\n",
+ id->name, __func__, __LINE__);
+ else
+ chip->consumers = tcs3772_consumers;
+
+ if (chip->consumers) {
+ chip->regulator_nb.notifier_call = tcs3772_power_manager;
+ ret = regulator_register_notifier(chip->consumers[0].consumer,
+ &chip->regulator_nb);
+ if (ret)
+ dev_err(&client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:regulator_register_notifier fails\n",
+ id->name, __func__, __LINE__);
+ }
+
+ indio_dev->info = &tcs3772_iio_info;
+ indio_dev->channels = tcs3772_channels;
+ indio_dev->num_channels = 2;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "idname:%s func:%s line:%d " \
+ "error_msg:iio_device_register fails\n",
+ id->name, __func__, __LINE__);
+ goto err_iio_free;
+ }
+
+ init_waitqueue_head(&chip->i2c_wait_queue);
+ chip->state[ALS] = CHIP_POWER_OFF;
+ chip->state[PROX] = CHIP_POWER_OFF;
+
+ if (regulator_is_enabled(chip->consumers[0].consumer)) {
+ chip->i2c_xfer_ready = 1;
+ tcs3772_activate_standby_mode(chip);
+ }
+
+ dev_info(&client->dev, "idname:%s func:%s line:%d " \
+ "probe success\n",
+ id->name, __func__, __LINE__);
+
+ return 0;
+
+err_iio_free:
+ mutex_destroy(&chip->lock);
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static void tcs3772_shutdown(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct tcs3772_chip *chip = iio_priv(indio_dev);
+ mutex_lock(&chip->lock);
+ chip->shutdown_complete = 1;
+ mutex_unlock(&chip->lock);
+}
+
+static int tcs3772_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct tcs3772_chip *chip = iio_priv(indio_dev);
+
+ mutex_destroy(&chip->lock);
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+ return 0;
+}
+
+static const struct i2c_device_id tcs3772_id[] = {
+ {"tcs3772", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tcs3772_id);
+
+static const struct of_device_id tcs3772_of_match[] = {
+ { .compatible = "taos,tcs3772", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tcs3772_of_match);
+
+static struct i2c_driver tcs3772_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "tcs3772",
+ .owner = THIS_MODULE,
+ .of_match_table = tcs3772_of_match,
+ .pm = TCS3772_PM_OPS,
+ },
+ .id_table = tcs3772_id,
+ .probe = tcs3772_probe,
+ .remove = tcs3772_remove,
+ .shutdown = tcs3772_shutdown,
+};
+
+static int __init tcs3772_init(void)
+{
+ return i2c_add_driver(&tcs3772_driver);
+}
+
+static void __exit tcs3772_exit(void)
+{
+ i2c_del_driver(&tcs3772_driver);
+}
+
+module_init(tcs3772_init);
+module_exit(tcs3772_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("tcs3772 Driver");
+MODULE_AUTHOR("Sri Krishna chowdary <schowdary@nvidia.com>");
diff --git a/drivers/staging/iio/magnetometer/Makefile b/drivers/staging/iio/magnetometer/Makefile
index f9bfb2e11d7d..659e0553d2e9 100644
--- a/drivers/staging/iio/magnetometer/Makefile
+++ b/drivers/staging/iio/magnetometer/Makefile
@@ -2,4 +2,6 @@
# Makefile for industrial I/O Magnetometer sensors
#
+GCOV_PROFILE := y
+
obj-$(CONFIG_SENSORS_HMC5843) += hmc5843.o
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
index e53274b64ae1..5aa46a2d5011 100644
--- a/drivers/staging/iio/meter/Kconfig
+++ b/drivers/staging/iio/meter/Kconfig
@@ -60,4 +60,28 @@ config ADE7854_SPI
To compile this driver as a module, choose M here: the
module will be called ade7854-spi.
+config INA219
+ tristate "TI INA219 bidirectional current/power monitor"
+ depends on I2C
+ help
+ TI INA219 is High- or Low-Side Measurement,Bidirectional
+ CURRENT/POWER MONITOR with I2C Interface.
+ Say Y here if you have INA219 hooked to a I2C bus.
+
+config INA230
+ tristate "TI INA230 bidirectional current/power monitor"
+ depends on I2C
+ help
+ TI INA230 is High- or Low-Side Measurement,Bidirectional
+ CURRENT/POWER MONITOR with I2C Interface.
+ Say Y here if you have INA230 hooked to a I2C bus.
+
+config INA3221
+ tristate "TI INA3221 3-Channel Shunt and Bus Voltage Monitor"
+ depends on I2C
+ help
+ TI INA3221 is Triple-Channel, High-Side Measurement, Shunt and Bus
+ Voltage Monitor with I2C Interface
+ Say Y here if you have INA3221 hooked to a I2C bus.
+
endmenu
diff --git a/drivers/staging/iio/meter/Makefile b/drivers/staging/iio/meter/Makefile
index de3863d6b078..55d7f604d3e8 100644
--- a/drivers/staging/iio/meter/Makefile
+++ b/drivers/staging/iio/meter/Makefile
@@ -13,3 +13,6 @@ obj-$(CONFIG_ADE7759) += ade7759.o
obj-$(CONFIG_ADE7854) += ade7854.o
obj-$(CONFIG_ADE7854_I2C) += ade7854-i2c.o
obj-$(CONFIG_ADE7854_SPI) += ade7854-spi.o
+obj-$(CONFIG_INA219) += ina219.o
+obj-$(CONFIG_INA230) += ina230.o
+obj-$(CONFIG_INA3221) += ina3221.o
diff --git a/drivers/staging/iio/meter/ina219.c b/drivers/staging/iio/meter/ina219.c
new file mode 100644
index 000000000000..b75a9bf94dcc
--- /dev/null
+++ b/drivers/staging/iio/meter/ina219.c
@@ -0,0 +1,754 @@
+/*
+ * ina219.c - driver for TI INA219
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Based on hwmon driver:
+ * drivers/hwmon/ina219.c
+ * and contributed by:
+ * venu byravarasu <vbyravarasu@nvidia.com>
+ * Anshul Jain <anshulj@nvidia.com>
+ * Deepak Nibade <dnibade@nvidia.com>
+ *
+ * This program is free software. you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+
+#define INA219_CONFIG 0
+#define INA219_SHUNT 1
+#define INA219_VOLTAGE 2
+#define INA219_POWER 3
+#define INA219_CURRENT 4
+#define INA219_CAL 5
+
+#define INA219_RESET (1 << 15)
+
+struct ina219_platform_data {
+ const char *rail_name;
+ unsigned int calibration_data;
+ unsigned int power_lsb;
+ u32 trig_conf_data;
+ u32 cont_conf_data;
+ u32 divisor;
+ unsigned int shunt_resistor;
+ unsigned int precision_multiplier;
+};
+
+struct ina219_chip {
+ struct device *dev;
+ struct i2c_client *client;
+ struct ina219_platform_data *pdata;
+ struct mutex mutex;
+ bool state;
+ struct notifier_block nb;
+};
+
+enum {
+ CHANNEL_NAME,
+ CHANNEL_STATE,
+};
+
+enum {
+ STOPPED,
+ RUNNING,
+};
+
+#define busv_register_to_mv(x) (((x) >> 3) * 4)
+#define shuntv_register_to_uv(x) ((x) * 10)
+
+static inline struct ina219_chip *to_ina219_chip(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ return iio_priv(indio_dev);
+}
+
+static int ina219_power_down(struct ina219_chip *chip)
+{
+ int ret;
+
+ ret = i2c_smbus_write_word_data(chip->client, INA219_CONFIG, 0);
+ if (ret < 0)
+ dev_err(chip->dev, "INA power down failed: %d\n", ret);
+ return ret;
+}
+
+static int ina219_power_up(struct ina219_chip *chip, u16 config_data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_word_data(chip->client, INA219_CONFIG,
+ __constant_cpu_to_be16(config_data));
+ if (ret < 0)
+ goto exit;
+
+ ret = i2c_smbus_write_word_data(chip->client, INA219_CAL,
+ __constant_cpu_to_be16(chip->pdata->calibration_data));
+ if (ret < 0)
+ goto exit;
+
+ return 0;
+
+exit:
+ dev_err(chip->dev, "INA power up failed: %d\n", ret);
+ return ret;
+
+}
+
+static int ina219_get_bus_voltage(struct ina219_chip *chip, int *volt_mv)
+{
+ int voltage_mv;
+ int cur_state;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ cur_state = chip->state;
+
+ if (chip->state == STOPPED) {
+ ret = ina219_power_up(chip, chip->pdata->trig_conf_data);
+ if (ret < 0)
+ goto exit;
+ }
+
+ /* getting voltage readings in milli volts*/
+ voltage_mv = (s16)be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_VOLTAGE));
+
+ if (voltage_mv < 0)
+ goto exit;
+
+ *volt_mv = busv_register_to_mv(voltage_mv);
+
+ if (cur_state == STOPPED) {
+ ret = ina219_power_down(chip);
+ if (ret < 0)
+ goto exit;
+ }
+
+ mutex_unlock(&chip->mutex);
+ return 0;
+
+exit:
+ mutex_unlock(&chip->mutex);
+ dev_err(chip->dev, "%s: failed\n", __func__);
+ return -EAGAIN;
+}
+
+static int ina219_get_shunt_voltage(struct ina219_chip *chip, int *volt_uv)
+{
+ int voltage_uv;
+ int cur_state;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ cur_state = chip->state;
+ if (chip->state == STOPPED) {
+ ret = ina219_power_up(chip, chip->pdata->trig_conf_data);
+ if (ret < 0)
+ goto exit;
+ }
+
+ voltage_uv = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_SHUNT));
+
+ if (voltage_uv < 0)
+ goto exit;
+
+ *volt_uv = shuntv_register_to_uv(voltage_uv);
+
+ if (cur_state == STOPPED) {
+ ret = ina219_power_down(chip);
+ if (ret < 0)
+ goto exit;
+ }
+
+ mutex_unlock(&chip->mutex);
+ return 0;
+exit:
+ mutex_unlock(&chip->mutex);
+ dev_err(chip->dev, "%s: failed\n", __func__);
+ return -EAGAIN;
+}
+
+static int ina219_get_shunt_power(struct ina219_chip *chip, int *shunt_power_mw)
+{
+ int power_mw;
+ int voltage_shunt_uv;
+ int voltage_bus_mv;
+ int inverse_shunt_resistor;
+ int cur_state;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ cur_state = chip->state;
+ if (chip->state == STOPPED) {
+ ret = ina219_power_up(chip, chip->pdata->trig_conf_data);
+ if (ret < 0)
+ goto exit;
+ }
+
+ voltage_shunt_uv = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_SHUNT));
+ if (voltage_shunt_uv < 0)
+ goto exit;
+
+ voltage_shunt_uv = shuntv_register_to_uv(voltage_shunt_uv);
+
+ voltage_bus_mv = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_VOLTAGE));
+ if (voltage_bus_mv < 0)
+ goto exit;
+
+ voltage_bus_mv = busv_register_to_mv(voltage_bus_mv);
+
+ /*avoid overflow*/
+ inverse_shunt_resistor = 1000/(chip->pdata->shunt_resistor);
+ power_mw = voltage_shunt_uv * inverse_shunt_resistor; /*current uAmps*/
+ power_mw = power_mw / 1000; /*current mAmps*/
+ power_mw = power_mw * (voltage_bus_mv); /*Power uW*/
+ power_mw = power_mw / 1000; /*Power mW*/
+
+ if (cur_state == STOPPED) {
+ ret = ina219_power_down(chip);
+ if (ret < 0)
+ goto exit;
+ }
+
+ mutex_unlock(&chip->mutex);
+ *shunt_power_mw = power_mw;
+ return 0;
+exit:
+ mutex_unlock(&chip->mutex);
+ dev_err(chip->dev, "%s: failed\n", __func__);
+ return -EAGAIN;
+}
+
+static int ina219_get_bus_power(struct ina219_chip *chip, int *bus_power_mw)
+{
+ int power_mw;
+ int voltage_mv;
+ int overflow, conversion;
+ int cur_state;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ cur_state = chip->state;
+ if (chip->state == STOPPED) {
+ ret = ina219_power_up(chip, chip->pdata->trig_conf_data);
+ if (ret < 0) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+ } else {
+ mutex_unlock(&chip->mutex);
+ return ina219_get_shunt_power(chip, bus_power_mw);
+ }
+
+ /* check if the readings are valid */
+ do {
+ /* read power register to clear conversion bit */
+ ret = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_POWER));
+ if (ret < 0) {
+ dev_err(chip->dev, "POWER read failed: %d\n", ret);
+ goto exit;
+ }
+
+ voltage_mv = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_VOLTAGE));
+ overflow = voltage_mv & 1;
+ if (overflow) {
+ dev_err(chip->dev, "overflow error\n");
+ goto exit;
+ }
+ conversion = (voltage_mv >> 1) & 1;
+ } while (!conversion);
+
+ /* getting power readings in milli watts*/
+ power_mw = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_POWER));
+ power_mw *= chip->pdata->power_lsb;
+ if (chip->pdata->precision_multiplier)
+ power_mw /= chip->pdata->precision_multiplier;
+ if (power_mw < 0)
+ goto exit;
+
+ /* set ina219 to power down mode */
+ ret = ina219_power_down(chip);
+ if (ret < 0)
+ goto exit;
+
+ mutex_unlock(&chip->mutex);
+ *bus_power_mw = power_mw;
+ return 0;
+
+exit:
+ mutex_unlock(&chip->mutex);
+ dev_err(chip->dev, "%s: failed\n", __func__);
+ return ret;
+}
+
+static int ina219_get_shunt_current(struct ina219_chip *chip, int *shunt_cur_ma)
+{
+ int current_ma;
+ int voltage_uv;
+ int inverse_shunt_resistor;
+ int cur_state;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ cur_state = chip->state;
+ if (chip->state == STOPPED) {
+ ret = ina219_power_up(chip, chip->pdata->trig_conf_data);
+ if (ret < 0)
+ goto exit;
+ }
+
+ voltage_uv = (s16)be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_SHUNT));
+ if (voltage_uv < 0)
+ goto exit;
+
+ inverse_shunt_resistor = 1000/(chip->pdata->shunt_resistor);
+ voltage_uv = shuntv_register_to_uv(voltage_uv);
+ current_ma = voltage_uv * inverse_shunt_resistor;
+ current_ma = current_ma / 1000;
+
+ if (cur_state == STOPPED) {
+ ret = ina219_power_down(chip);
+ if (ret < 0)
+ goto exit;
+ }
+
+ mutex_unlock(&chip->mutex);
+ *shunt_cur_ma = current_ma;
+ return 0;
+exit:
+ dev_err(chip->dev, "%s: failed\n", __func__);
+ mutex_unlock(&chip->mutex);
+ return -EAGAIN;
+}
+
+static int ina219_get_bus_current(struct ina219_chip *chip, int *raw_cur_ma)
+{
+ int current_ma;
+ int voltage_mv;
+ int overflow, conversion;
+ int cur_state;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+
+ cur_state = chip->state;
+ if (chip->state == STOPPED) {
+ ret = ina219_power_up(chip, chip->pdata->trig_conf_data);
+ if (ret < 0) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+ } else {
+ mutex_unlock(&chip->mutex);
+ return ina219_get_shunt_current(chip, raw_cur_ma);
+ }
+
+ /* check if the readings are valid */
+ do {
+ /* read power register to clear conversion bit */
+ ret = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_POWER));
+ if (ret < 0) {
+ dev_err(chip->dev, "POWER read failed: %d\n", ret);
+ goto exit;
+ }
+
+ voltage_mv = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_VOLTAGE));
+ overflow = voltage_mv & 1;
+ if (overflow) {
+ dev_err(chip->dev, "overflow error\n");
+ goto exit;
+ }
+ conversion = (voltage_mv >> 1) & 1;
+ } while (!conversion);
+
+ /* getting current readings in milli amps*/
+ current_ma = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA219_CURRENT));
+ if (current_ma < 0)
+ goto exit;
+
+ current_ma = (current_ma * chip->pdata->power_lsb) /
+ chip->pdata->divisor;
+ if (chip->pdata->precision_multiplier)
+ current_ma /= chip->pdata->precision_multiplier;
+
+ ret = ina219_power_down(chip);
+ if (ret < 0)
+ goto exit;
+
+ mutex_unlock(&chip->mutex);
+ *raw_cur_ma = current_ma;
+ return 0;
+
+exit:
+ mutex_unlock(&chip->mutex);
+ dev_err(chip->dev, "%s: failed\n", __func__);
+ return ret;
+}
+
+static int ina219_set_state(struct ina219_chip *chip, long new_state)
+{
+ int ret = -1;
+
+ mutex_lock(&chip->mutex);
+
+ if ((new_state > 0) && (chip->state == STOPPED))
+ ret = ina219_power_up(chip, chip->pdata->cont_conf_data);
+ else if ((new_state == 0) && (chip->state == RUNNING))
+ ret = ina219_power_down(chip);
+
+ if (ret < 0) {
+ dev_err(chip->dev, "Switching INA on/off failed: %d", ret);
+ mutex_unlock(&chip->mutex);
+ return -EAGAIN;
+ }
+
+ if (new_state)
+ chip->state = RUNNING;
+ else
+ chip->state = STOPPED;
+
+ mutex_unlock(&chip->mutex);
+ return 1;
+}
+
+static int ina219_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct ina219_chip *chip = iio_priv(indio_dev);
+ int type = chan->type;
+ int address = chan->address;
+ int ret = 0;
+
+ if (mask != IIO_CHAN_INFO_PROCESSED) {
+ dev_err(chip->dev, "Invalid mask 0x%08lx\n", mask);
+ return -EINVAL;
+ }
+
+ switch (address) {
+ case 0:
+ switch (type) {
+ case IIO_VOLTAGE:
+ ret = ina219_get_bus_voltage(chip, val);
+ break;
+
+ case IIO_CURRENT:
+ ret = ina219_get_bus_current(chip, val);
+ break;
+
+ case IIO_POWER:
+ ret = ina219_get_bus_power(chip, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+
+ case 1:
+ switch (type) {
+ case IIO_VOLTAGE:
+ ret = ina219_get_shunt_voltage(chip, val);
+ break;
+
+ case IIO_CURRENT:
+ ret = ina219_get_shunt_current(chip, val);
+ break;
+
+ case IIO_POWER:
+ ret = ina219_get_shunt_power(chip, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ ret = IIO_VAL_INT;
+ return ret;
+}
+
+static ssize_t ina219_show_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ina219_chip *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int count;
+
+ switch (this_attr->address) {
+ case CHANNEL_NAME:
+ return sprintf(buf, "%s\n", chip->pdata->rail_name);
+
+ case CHANNEL_STATE:
+ mutex_lock(&chip->mutex);
+ count = sprintf(buf, "%d\n", chip->state);
+ mutex_unlock(&chip->mutex);
+ return count;
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static ssize_t ina219_set_channel(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ina219_chip *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int mode = this_attr->address;
+ long val;
+
+ switch (mode) {
+ case CHANNEL_STATE:
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+ return ina219_set_state(chip, val);
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(rail_name, S_IRUGO,
+ ina219_show_channel, NULL, CHANNEL_NAME);
+
+static IIO_DEVICE_ATTR(state, S_IRUGO | S_IWUSR,
+ ina219_show_channel, ina219_set_channel, CHANNEL_STATE);
+
+static struct attribute *ina219_attributes[] = {
+ &iio_dev_attr_rail_name.dev_attr.attr,
+ &iio_dev_attr_state.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ina219_groups = {
+ .attrs = ina219_attributes,
+};
+
+static const struct iio_chan_spec ina219_channels_spec[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .address = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_VOLTAGE,
+ .address = 1,
+ .extend_name = "shunt",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_CURRENT,
+ .address = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_CURRENT,
+ .address = 1,
+ .extend_name = "shunt",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_POWER,
+ .address = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_POWER,
+ .address = 1,
+ .extend_name = "shunt",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static const struct iio_info ina219_info = {
+ .attrs = &ina219_groups,
+ .driver_module = THIS_MODULE,
+ .read_raw = &ina219_read_raw,
+};
+
+static struct ina219_platform_data *ina219_get_platform_data_dt(
+ struct i2c_client *client)
+{
+ struct ina219_platform_data *pdata;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ u32 pval;
+ int ret;
+
+ if (!np) {
+ dev_err(dev, "Only DT supported\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "pdata allocation failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->rail_name = of_get_property(np, "ti,rail-name", NULL);
+ if (!pdata->rail_name)
+ dev_err(dev, "Rail name is not provided on node %s\n",
+ np->full_name);
+
+ ret = of_property_read_u32(np, "ti,continuous-config", &pval);
+ if (!ret)
+ pdata->cont_conf_data = (u16)pval;
+
+ ret = of_property_read_u32(np, "ti,trigger-config", &pval);
+ if (!ret)
+ pdata->trig_conf_data = (u16)pval;
+
+ ret = of_property_read_u32(np, "ti,calibration-data", &pval);
+ if (!ret)
+ pdata->calibration_data = pval;
+
+ ret = of_property_read_u32(np, "ti,power-lsb", &pval);
+ if (!ret)
+ pdata->power_lsb = pval;
+
+ ret = of_property_read_u32(np, "ti,divisor", &pval);
+ if (!ret)
+ pdata->divisor = pval;
+
+ ret = of_property_read_u32(np, "ti,shunt-resistor-mohm", &pval);
+ if (!ret)
+ pdata->shunt_resistor = pval;
+
+ ret = of_property_read_u32(np, "ti,precision-multiplier", &pval);
+ if (!ret)
+ pdata->precision_multiplier = pval;
+
+ return pdata;
+}
+
+static int ina219_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ina219_chip *chip;
+ struct iio_dev *indio_dev;
+ struct ina219_platform_data *pdata;
+ int ret;
+
+ pdata = ina219_get_platform_data_dt(client);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ dev_err(&client->dev, "platform data processing failed %d\n",
+ ret);
+ return ret;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation fails\n");
+ return -ENOMEM;
+ }
+
+ chip = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ chip->client = client;
+ chip->dev = &client->dev;
+ chip->pdata = pdata;
+ chip->state = STOPPED;
+ mutex_init(&chip->mutex);
+
+ indio_dev->info = &ina219_info;
+ indio_dev->channels = ina219_channels_spec;
+ indio_dev->num_channels = ARRAY_SIZE(ina219_channels_spec);
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = devm_iio_device_register(chip->dev, indio_dev);
+ if (ret < 0) {
+ dev_err(chip->dev, "iio registration fails with error %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_write_word_data(client, INA219_CONFIG,
+ __constant_cpu_to_be16(INA219_RESET));
+ if (ret < 0) {
+ dev_err(&client->dev, "ina219 reset failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = ina219_power_down(chip);
+ if (ret < 0) {
+ dev_err(&client->dev, "INA power down failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ina219_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ina219_chip *chip = iio_priv(indio_dev);
+
+ ina219_power_down(chip);
+ chip->state = STOPPED;
+ return 0;
+}
+
+static const struct i2c_device_id ina219_id[] = {
+ {"ina219x", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ina219_id);
+
+static struct i2c_driver ina219_driver = {
+ .driver = {
+ .name = "ina219x",
+ },
+ .probe = ina219_probe,
+ .remove = ina219_remove,
+ .id_table = ina219_id,
+};
+
+module_i2c_driver(ina219_driver);
+
+MODULE_DESCRIPTION("TI INA219 bidirectional current/power Monitor");
+MODULE_AUTHOR("venu byravarasu <vbyravarasu@nvidia.com>");
+MODULE_AUTHOR("Anshul Jain <anshulj@nvidia.com>");
+MODULE_AUTHOR("Deepak Nibade <dnibade@nvidia.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ina230.c b/drivers/staging/iio/meter/ina230.c
new file mode 100644
index 000000000000..d37ad274f626
--- /dev/null
+++ b/drivers/staging/iio/meter/ina230.c
@@ -0,0 +1,973 @@
+/*
+ * ina230.c - driver for TI INA230
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Based on hwmon driver:
+ * drivers/hwmon/ina230.c
+ * and contributed by:
+ * Peter Boonstoppel <pboonstoppel@nvidia.com>
+ * Deepak Nibade <dnibade@nvidia.com>
+ * Timo Alho <talho@nvidia.com>
+ *
+ * This program is free software. you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+
+
+/* ina230 (/ ina226)register offsets */
+#define INA230_CONFIG 0
+#define INA230_SHUNT 1
+#define INA230_VOLTAGE 2
+#define INA230_POWER 3
+#define INA230_CURRENT 4
+#define INA230_CAL 5
+#define INA230_MASK 6
+#define INA230_ALERT 7
+
+/*
+Mask register for ina230 (/ina 226):
+D15|D14|D13|D12|D11 D10 D09 D08 D07 D06 D05 D04 D03 D02 D01 D00
+SOL|SUL|BOL|BUL|POL|CVR|- - - - - |AFF|CVF|OVF|APO|LEN
+*/
+#define INA230_MASK_SOL (1 << 15)
+#define INA230_MASK_SUL (1 << 14)
+#define INA230_MASK_CVF (1 << 3)
+#define INA230_MAX_CONVERSION_TRIALS 50
+
+/*
+Config register for ina230 (/ ina226):
+Some of these values may be needed to calculate platform_data values
+D15|D14 D13 D12|D11 D10 D09|D08 D07 D06|D05 D04 D03|D02 D01 D00
+rst|- - - |AVG |Vbus_CT |Vsh_CT |MODE
+*/
+#define INA230_RESET (1 << 15)
+#define INA230_VBUS_CT (0 << 6) /* Vbus 140us conversion time */
+#define INA230_VSH_CT (0 << 3) /* Vshunt 140us conversion time */
+
+#define INA230_CONT_MODE 7 /* Continuous Bus and shunt measure */
+#define INA230_TRIG_MODE 3 /* Triggered Bus and shunt measure */
+#define INA230_POWER_DOWN 0
+
+enum {
+ CHANNEL_NAME = 0,
+ CURRENT_THRESHOLD,
+ ALERT_FLAG,
+ VBUS_VOLTAGE_CURRENT,
+};
+
+struct ina230_platform_data {
+ const char *rail_name;
+ int current_threshold;
+ int resistor;
+ int min_cores_online;
+ unsigned int calibration_data;
+ unsigned int power_lsb;
+ u32 trig_conf_data;
+ u32 cont_conf_data;
+ u32 divisor;
+ unsigned int shunt_resistor;
+ unsigned int precision_multiplier;
+ bool shunt_polarity_inverted; /* 0: not invert, 1: inverted */
+ bool alert_latch_enable;
+};
+
+struct ina230_chip {
+ struct device *dev;
+ struct i2c_client *client;
+ struct ina230_platform_data *pdata;
+ struct mutex mutex;
+ bool running;
+ struct notifier_block nb;
+};
+
+
+/* bus voltage resolution: 1.25mv */
+#define busv_register_to_mv(x) (((x) * 5) >> 2)
+
+/* shunt voltage resolution: 2.5uv */
+#define shuntv_register_to_uv(x) (((x) * 5) >> 1)
+#define uv_to_alert_register(x) (((x) << 1) / 5)
+
+static inline struct ina230_chip *to_ina230_chip(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ return iio_priv(indio_dev);
+}
+
+static int ina230_ensure_enabled_start(struct ina230_chip *chip)
+{
+ int ret;
+
+ if (chip->running)
+ return 0;
+
+ ret = i2c_smbus_write_word_data(chip->client, INA230_CONFIG,
+ __constant_cpu_to_be16(chip->pdata->trig_conf_data));
+ if (ret < 0)
+ dev_err(chip->dev, "CONFIG write failed: %d\n", ret);
+
+ return ret;
+}
+
+static void ina230_ensure_enabled_end(struct ina230_chip *chip)
+{
+ int ret;
+
+ if (chip->running)
+ return;
+
+ ret = i2c_smbus_write_word_data(chip->client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_POWER_DOWN));
+ if (ret < 0)
+ dev_err(chip->dev, "CONFIG write failed: %d\n", ret);
+}
+
+static int __locked_ina230_power_down(struct ina230_chip *chip)
+{
+ int ret;
+
+ if (!chip->running)
+ return 0;
+
+ ret = i2c_smbus_write_word_data(chip->client, INA230_MASK, 0);
+ if (ret < 0)
+ dev_err(chip->dev, "Mask write failed: %d\n", ret);
+
+ ret = i2c_smbus_write_word_data(chip->client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_POWER_DOWN));
+ if (ret < 0)
+ dev_err(chip->dev, "CONFIG write failed: %d\n", ret);
+
+ chip->running = false;
+ return ret;
+}
+
+static int ina230_power_down(struct ina230_chip *chip)
+{
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = __locked_ina230_power_down(chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int __locked_ina230_start_current_mon(struct ina230_chip *chip)
+{
+ int ret;
+ s32 shunt_uv;
+ s16 shunt_limit;
+ s16 alert_mask;
+ int mask_len;
+
+ if (!chip->pdata->current_threshold) {
+ dev_err(chip->dev, "no current threshold specified\n");
+ return -EINVAL;
+ }
+
+ ret = i2c_smbus_write_word_data(chip->client, INA230_CONFIG,
+ __constant_cpu_to_be16(chip->pdata->cont_conf_data));
+ if (ret < 0) {
+ dev_err(chip->dev, "CONFIG write failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->pdata->resistor) {
+ shunt_uv = chip->pdata->resistor;
+ shunt_uv *= chip->pdata->current_threshold;
+ } else {
+ s32 v;
+ /* no resistor value defined, compute shunt_uv the hard way */
+ v = chip->pdata->precision_multiplier * 5120 * 25;
+ v /= chip->pdata->calibration_data;
+ v *= chip->pdata->current_threshold;
+ v /= chip->pdata->power_lsb;
+ shunt_uv = (s16)(v & 0xffff);
+ }
+ if (chip->pdata->shunt_polarity_inverted)
+ shunt_uv *= -1;
+
+ shunt_limit = (s16) uv_to_alert_register(shunt_uv);
+
+ ret = i2c_smbus_write_word_data(chip->client, INA230_ALERT,
+ cpu_to_be16(shunt_limit));
+ if (ret < 0) {
+ dev_err(chip->dev, "ALERT write failed: %d\n", ret);
+ return ret;
+ }
+
+ mask_len = chip->pdata->alert_latch_enable ? 0x1 : 0x0;
+ alert_mask = shunt_limit >= 0 ? INA230_MASK_SOL + mask_len :
+ INA230_MASK_SUL + mask_len;
+ ret = i2c_smbus_write_word_data(chip->client, INA230_MASK,
+ cpu_to_be16(alert_mask));
+ if (ret < 0) {
+ dev_err(chip->dev, "MASK write failed: %d\n", ret);
+ return ret;
+ }
+ chip->running = true;
+ return 0;
+}
+
+static void __locked_ina230_evaluate_state(struct ina230_chip *chip)
+{
+ int cpus = num_online_cpus();
+
+ if (chip->running) {
+ if (cpus < chip->pdata->min_cores_online ||
+ !chip->pdata->current_threshold)
+ __locked_ina230_power_down(chip);
+ } else {
+ if (cpus >= chip->pdata->min_cores_online &&
+ chip->pdata->current_threshold)
+ __locked_ina230_start_current_mon(chip);
+ }
+}
+
+static void ina230_evaluate_state(struct ina230_chip *chip)
+{
+ mutex_lock(&chip->mutex);
+ __locked_ina230_evaluate_state(chip);
+ mutex_unlock(&chip->mutex);
+}
+
+static int ina230_get_bus_voltage(struct ina230_chip *chip, int *volt_mv)
+{
+ int ret;
+ int voltage_mv;
+
+ mutex_lock(&chip->mutex);
+ ret = ina230_ensure_enabled_start(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ /* getting voltage readings in milli volts*/
+ voltage_mv = (s16)be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA230_VOLTAGE));
+
+ ina230_ensure_enabled_end(chip);
+ mutex_unlock(&chip->mutex);
+
+ if (voltage_mv < 0) {
+ dev_err(chip->dev, "%s: failed: %d\n", __func__, voltage_mv);
+ return -EINVAL;
+ }
+ *volt_mv = busv_register_to_mv(voltage_mv);
+ return 0;
+}
+
+static int ina230_get_shunt_voltage(struct ina230_chip *chip, int *volt_uv)
+{
+ int voltage_uv;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = ina230_ensure_enabled_start(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ voltage_uv = (s16)be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA230_SHUNT));
+
+ ina230_ensure_enabled_end(chip);
+ mutex_unlock(&chip->mutex);
+
+ *volt_uv = shuntv_register_to_uv(voltage_uv);
+ return 0;
+}
+
+static int __locked_wait_for_conversion(struct ina230_chip *chip)
+{
+ int ret, conversion, trials = 0;
+
+ /* wait till conversion ready bit is set */
+ do {
+ ret = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA230_MASK));
+ if (ret < 0) {
+ dev_err(chip->dev, "MASK read failed: %d\n", ret);
+ return ret;
+ }
+ conversion = ret & INA230_MASK_CVF;
+ } while ((!conversion) && (++trials < INA230_MAX_CONVERSION_TRIALS));
+
+ if (trials == INA230_MAX_CONVERSION_TRIALS) {
+ dev_err(chip->dev, "maximum retries exceeded\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int ina230_get_bus_current(struct ina230_chip *chip, int *curr_ma)
+{
+ int current_ma;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = ina230_ensure_enabled_start(chip);
+ if (ret < 0)
+ goto out;
+
+ /* fill calib data */
+ ret = i2c_smbus_write_word_data(chip->client, INA230_CAL,
+ __constant_cpu_to_be16(chip->pdata->calibration_data));
+ if (ret < 0) {
+ dev_err(chip->dev, "CAL read failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = __locked_wait_for_conversion(chip);
+ if (ret)
+ goto out;
+
+ /* getting current readings in milli amps*/
+ ret = i2c_smbus_read_word_data(chip->client, INA230_CURRENT);
+ if (ret < 0)
+ goto out;
+
+ current_ma = (s16) be16_to_cpu(ret);
+
+ ina230_ensure_enabled_end(chip);
+ mutex_unlock(&chip->mutex);
+
+ if (chip->pdata->shunt_polarity_inverted)
+ current_ma *= -1;
+
+ current_ma *= (s16) chip->pdata->power_lsb;
+ if (chip->pdata->divisor)
+ current_ma /= (s16) chip->pdata->divisor;
+ if (chip->pdata->precision_multiplier)
+ current_ma /= (s16) chip->pdata->precision_multiplier;
+
+ *curr_ma = current_ma;
+ return 0;
+
+out:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina230_get_shunt_current(struct ina230_chip *chip, int *curr_ma)
+{
+ int voltage_uv;
+ int inverse_shunt_resistor, current_ma;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = ina230_ensure_enabled_start(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ voltage_uv = (s16)be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA230_SHUNT));
+
+ ina230_ensure_enabled_end(chip);
+ mutex_unlock(&chip->mutex);
+
+ voltage_uv = shuntv_register_to_uv(voltage_uv);
+ voltage_uv = abs(voltage_uv);
+
+ inverse_shunt_resistor = 1000 / chip->pdata->resistor;
+ current_ma = voltage_uv * inverse_shunt_resistor / 1000;
+
+ *curr_ma = current_ma;
+ return 0;
+}
+
+static int ina230_get_bus_power(struct ina230_chip *chip, int *pow_mw)
+{
+ int power_mw;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = ina230_ensure_enabled_start(chip);
+ if (ret < 0)
+ goto out;
+
+ /* fill calib data */
+ ret = i2c_smbus_write_word_data(chip->client, INA230_CAL,
+ __constant_cpu_to_be16(chip->pdata->calibration_data));
+ if (ret < 0) {
+ dev_err(chip->dev, "CAL read failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = __locked_wait_for_conversion(chip);
+ if (ret)
+ goto out;
+
+ /* getting power readings in milli watts*/
+ power_mw = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA230_POWER));
+ if (power_mw < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ina230_ensure_enabled_end(chip);
+ mutex_unlock(&chip->mutex);
+
+ power_mw = power_mw * chip->pdata->power_lsb;
+ if (chip->pdata->precision_multiplier)
+ power_mw /= chip->pdata->precision_multiplier;
+
+ *pow_mw = power_mw;
+ return 0;
+
+out:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina230_get_shunt_power(struct ina230_chip *chip, int *power_mw)
+{
+ int voltage_uv, voltage_mv;
+ int inverse_shunt_resistor, current_ma;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = ina230_ensure_enabled_start(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ voltage_mv = (s16)be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA230_VOLTAGE));
+
+ voltage_uv = (s16)be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA230_SHUNT));
+
+ ina230_ensure_enabled_end(chip);
+ mutex_unlock(&chip->mutex);
+
+ voltage_mv = busv_register_to_mv(voltage_mv);
+ voltage_uv = shuntv_register_to_uv(voltage_uv);
+ voltage_uv = abs(voltage_uv);
+
+ inverse_shunt_resistor = 1000 / chip->pdata->resistor;
+ current_ma = voltage_uv * inverse_shunt_resistor / 1000;
+ *power_mw = (voltage_mv * current_ma) / 1000;
+ return 0;
+}
+
+static int ina230_get_vbus_voltage_current(struct ina230_chip *chip,
+ int *current_ma, int *voltage_mv)
+{
+ int ret = 0, val;
+ int ma;
+
+ mutex_lock(&chip->mutex);
+ /* ensure that triggered mode will be used */
+ chip->running = 0;
+ ret = ina230_ensure_enabled_start(chip);
+ if (ret < 0)
+ goto out;
+
+ ret = __locked_wait_for_conversion(chip);
+ if (ret)
+ goto out;
+
+ val = i2c_smbus_read_word_data(chip->client, INA230_VOLTAGE);
+ if (val < 0) {
+ ret = val;
+ goto out;
+ }
+ *voltage_mv = busv_register_to_mv(be16_to_cpu(val));
+
+ if (chip->pdata->resistor) {
+ val = i2c_smbus_read_word_data(chip->client, INA230_SHUNT);
+ if (val < 0) {
+ ret = val;
+ goto out;
+ }
+ ma = shuntv_register_to_uv((s16)be16_to_cpu(val));
+ ma = DIV_ROUND_CLOSEST(ma, chip->pdata->resistor);
+ if (chip->pdata->shunt_polarity_inverted)
+ ma *= -1;
+ *current_ma = ma;
+ } else {
+ *current_ma = 0;
+ }
+out:
+ /* restart continuous current monitoring, if enabled */
+ if (chip->pdata->current_threshold)
+ __locked_ina230_evaluate_state(chip);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+
+static int ina230_set_current_threshold(struct ina230_chip *chip,
+ int current_ma)
+{
+ int ret = 0;
+
+ mutex_lock(&chip->mutex);
+
+ chip->pdata->current_threshold = current_ma;
+ if (current_ma) {
+ if (chip->running)
+ /* force restart */
+ ret = __locked_ina230_start_current_mon(chip);
+ else
+ __locked_ina230_evaluate_state(chip);
+ } else {
+ ret = __locked_ina230_power_down(chip);
+ }
+
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina230_show_alert_flag(struct ina230_chip *chip, char *buf)
+{
+ int alert_flag;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = ina230_ensure_enabled_start(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ alert_flag = be16_to_cpu(i2c_smbus_read_word_data(chip->client,
+ INA230_MASK));
+
+ ina230_ensure_enabled_end(chip);
+ mutex_unlock(&chip->mutex);
+
+ alert_flag = (alert_flag >> 4) & 0x1;
+ return sprintf(buf, "%d\n", alert_flag);
+}
+
+static int ina230_hotplug_notify(struct notifier_block *nb,
+ unsigned long event, void *hcpu)
+{
+ struct ina230_chip *chip = container_of(nb, struct ina230_chip, nb);
+
+ if (event == CPU_ONLINE || event == CPU_DEAD)
+ ina230_evaluate_state(chip);
+ return 0;
+}
+
+static int ina230_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct ina230_chip *chip = iio_priv(indio_dev);
+ struct device *dev = chip->dev;
+ int type = chan->type;
+ int address = chan->address;
+ int ret = 0;
+
+ if (mask != IIO_CHAN_INFO_PROCESSED) {
+ dev_err(dev, "Invalid mask 0x%08lx\n", mask);
+ return -EINVAL;
+ }
+
+ switch (address) {
+ case 0:
+ switch (type) {
+ case IIO_VOLTAGE:
+ ret = ina230_get_bus_voltage(chip, val);
+ break;
+
+ case IIO_CURRENT:
+ ret = ina230_get_bus_current(chip, val);
+ break;
+
+ case IIO_POWER:
+ ret = ina230_get_bus_power(chip, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+
+ case 1:
+ switch (type) {
+ case IIO_VOLTAGE:
+ ret = ina230_get_shunt_voltage(chip, val);
+ break;
+
+ case IIO_CURRENT:
+ ret = ina230_get_shunt_current(chip, val);
+ break;
+
+ case IIO_POWER:
+ ret = ina230_get_shunt_power(chip, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ ret = IIO_VAL_INT;
+ return ret;
+}
+
+static ssize_t ina230_show_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ina230_chip *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int current_ma = 0;
+ int voltage_mv = 0;
+ int ret;
+
+ switch (this_attr->address) {
+ case CHANNEL_NAME:
+ return sprintf(buf, "%s\n", chip->pdata->rail_name);
+
+ case CURRENT_THRESHOLD:
+ return sprintf(buf, "%d mA\n", chip->pdata->current_threshold);
+
+ case ALERT_FLAG:
+ return ina230_show_alert_flag(chip, buf);
+
+ case VBUS_VOLTAGE_CURRENT:
+ ret = ina230_get_vbus_voltage_current(chip, &current_ma,
+ &voltage_mv);
+ if (!ret)
+ return sprintf(buf, "%d %d\n", voltage_mv, current_ma);
+ return ret;
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static ssize_t ina230_set_channel(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ina230_chip *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int mode = this_attr->address;
+ long val;
+ int current_ma;
+
+ switch (mode) {
+ case CURRENT_THRESHOLD:
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ current_ma = (int) val;
+ return ina230_set_current_threshold(chip, current_ma);
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(rail_name, S_IRUGO,
+ ina230_show_channel, NULL, CHANNEL_NAME);
+
+static IIO_DEVICE_ATTR(current_threshold, S_IRUGO | S_IWUSR,
+ ina230_show_channel, ina230_set_channel, CURRENT_THRESHOLD);
+
+static IIO_DEVICE_ATTR(alert_flag, S_IRUGO,
+ ina230_show_channel, NULL, ALERT_FLAG);
+
+static IIO_DEVICE_ATTR(ui_input, S_IRUSR|S_IRGRP,
+ ina230_show_channel, NULL,
+ VBUS_VOLTAGE_CURRENT);
+
+
+static struct attribute *ina230_attributes[] = {
+ &iio_dev_attr_rail_name.dev_attr.attr,
+ &iio_dev_attr_current_threshold.dev_attr.attr,
+ &iio_dev_attr_alert_flag.dev_attr.attr,
+ &iio_dev_attr_ui_input.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ina230_groups = {
+ .attrs = ina230_attributes,
+};
+
+static const struct iio_chan_spec ina230_channels_spec[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .address = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_VOLTAGE,
+ .address = 1,
+ .extend_name = "shunt",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_CURRENT,
+ .address = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_CURRENT,
+ .address = 1,
+ .extend_name = "shunt",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_POWER,
+ .address = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_POWER,
+ .address = 1,
+ .extend_name = "shunt",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static const struct iio_info ina230_info = {
+ .attrs = &ina230_groups,
+ .driver_module = THIS_MODULE,
+ .read_raw = &ina230_read_raw,
+};
+
+static struct ina230_platform_data *ina230_get_platform_data_dt(
+ struct i2c_client *client)
+{
+ struct ina230_platform_data *pdata;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ u32 pval;
+ int ret;
+
+ if (!np) {
+ dev_err(dev, "Only DT supported\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "pdata allocation failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pdata->rail_name = of_get_property(np, "ti,rail-name", NULL);
+ if (!pdata->rail_name)
+ dev_err(dev, "Rail name is not provided on node %s\n",
+ np->full_name);
+
+ ret = of_property_read_u32(np, "ti,continuous-config", &pval);
+ if (!ret)
+ pdata->cont_conf_data = (u16)pval;
+
+ ret = of_property_read_u32(np, "ti,trigger-config", &pval);
+ if (!ret)
+ pdata->trig_conf_data = (u16)pval;
+
+ ret = of_property_read_u32(np, "ti,current-threshold", &pval);
+ if (!ret)
+ pdata->current_threshold = (int)pval;
+
+ ret = of_property_read_u32(np, "ti,resistor", &pval);
+ if (!ret)
+ pdata->resistor = pval;
+
+ ret = of_property_read_u32(np, "ti,minimum-core-online", &pval);
+ if (!ret)
+ pdata->min_cores_online = pval;
+
+ ret = of_property_read_u32(np, "ti,calibration-data", &pval);
+ if (!ret)
+ pdata->calibration_data = pval;
+
+ ret = of_property_read_u32(np, "ti,power-lsb", &pval);
+ if (!ret)
+ pdata->power_lsb = pval;
+
+ ret = of_property_read_u32(np, "ti,divisor", &pval);
+ if (!ret)
+ pdata->divisor = pval;
+
+ ret = of_property_read_u32(np, "ti,shunt-resistor-mohm", &pval);
+ if (!ret)
+ pdata->shunt_resistor = pval;
+
+ ret = of_property_read_u32(np, "ti,precision-multiplier", &pval);
+ if (!ret)
+ pdata->precision_multiplier = pval;
+
+ pdata->shunt_polarity_inverted = of_property_read_bool(np,
+ "ti,shunt-polartiy-inverted");
+
+ pdata->alert_latch_enable = of_property_read_bool(np,
+ "ti,enable-alert-latch");
+ return pdata;
+}
+
+static int ina230_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ina230_chip *chip;
+ struct iio_dev *indio_dev;
+ struct ina230_platform_data *pdata;
+ int ret;
+
+ pdata = ina230_get_platform_data_dt(client);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ dev_err(&client->dev, "platform data processing failed %d\n",
+ ret);
+ return ret;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation fails\n");
+ return -ENOMEM;
+ }
+
+ chip = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+ chip->client = client;
+ chip->dev = &client->dev;
+ chip->pdata = pdata;
+ chip->running = false;
+ chip->nb.notifier_call = ina230_hotplug_notify;
+ mutex_init(&chip->mutex);
+
+ indio_dev->info = &ina230_info;
+ indio_dev->channels = ina230_channels_spec;
+ indio_dev->num_channels = ARRAY_SIZE(ina230_channels_spec);
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = devm_iio_device_register(chip->dev, indio_dev);
+ if (ret < 0) {
+ dev_err(chip->dev, "iio registration fails with error %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_write_word_data(client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_RESET));
+ if (ret < 0) {
+ dev_err(&client->dev, "ina230 reset failed: %d\n", ret);
+ return ret;
+ }
+
+ register_hotcpu_notifier(&(chip->nb));
+
+ ret = i2c_smbus_write_word_data(client, INA230_MASK, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "MASK write failed: %d\n", ret);
+ goto exit;
+ }
+
+ /* set ina230 to power down mode */
+ ret = i2c_smbus_write_word_data(client, INA230_CONFIG,
+ __constant_cpu_to_be16(INA230_POWER_DOWN));
+ if (ret < 0) {
+ dev_err(&client->dev, "INA power down failed: %d\n", ret);
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ unregister_hotcpu_notifier(&chip->nb);
+ return ret;
+}
+
+static int ina230_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ina230_chip *chip = iio_priv(indio_dev);
+
+ unregister_hotcpu_notifier(&chip->nb);
+ ina230_power_down(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ina230_suspend(struct device *dev)
+{
+ struct ina230_chip *chip = to_ina230_chip(dev);
+
+ return ina230_power_down(chip);
+}
+
+static int ina230_resume(struct device *dev)
+{
+ struct ina230_chip *chip = to_ina230_chip(dev);
+
+ ina230_evaluate_state(chip);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops ina230_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ina230_suspend, ina230_resume)
+};
+
+static const struct i2c_device_id ina230_id[] = {
+ {"ina226x", 0 },
+ {"ina230x", 0 },
+ {"hpa01112x", 0 },
+ {"hpa02149x", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ina230_id);
+
+static struct i2c_driver ina230_driver = {
+ .driver = {
+ .name = "ina230x",
+ .pm = &ina230_pm_ops,
+ },
+ .probe = ina230_probe,
+ .remove = ina230_remove,
+ .id_table = ina230_id,
+};
+
+module_i2c_driver(ina230_driver);
+
+MODULE_DESCRIPTION("TI INA230 bidirectional current/power Monitor");
+MODULE_AUTHOR("Peter Boonstoppel <pboonstoppel@nvidia.com>");
+MODULE_AUTHOR("Deepak Nibade <dnibade@nvidia.com>");
+MODULE_AUTHOR("Timo Alho <talho@nvidia.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ina3221.c b/drivers/staging/iio/meter/ina3221.c
new file mode 100644
index 000000000000..89421962cfb7
--- /dev/null
+++ b/drivers/staging/iio/meter/ina3221.c
@@ -0,0 +1,1067 @@
+/*
+ * ina3221.c - driver for TI INA3221
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Based on hwmon driver:
+ * drivers/hwmon/ina3221.c
+ * and contributed by:
+ * Deepak Nibade <dnibade@nvidia.com>
+ * Timo Alho <talho@nvidia.com>
+ * Anshul Jain <anshulj@nvidia.com>
+ *
+ * This program is free software. you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+
+#define INA3221_CONFIG 0x00
+#define INA3221_SHUNT_VOL_CHAN1 0x01
+#define INA3221_BUS_VOL_CHAN1 0x02
+#define INA3221_SHUNT_VOL_CHAN2 0x03
+#define INA3221_BUS_VOL_CHAN2 0x04
+#define INA3221_SHUNT_VOL_CHAN3 0x05
+#define INA3221_BUS_VOL_CHAN3 0x06
+#define INA3221_CRIT_CHAN1 0x07
+#define INA3221_WARN_CHAN1 0x08
+#define INA3221_CRIT_CHAN2 0x09
+#define INA3221_WARN_CHAN2 0x0A
+#define INA3221_CRIT_CHAN3 0x0B
+#define INA3221_WARN_CHAN3 0x0C
+#define INA3221_MASK_ENABLE 0x0F
+
+#define INA3221_SHUNT_VOL(i) (INA3221_SHUNT_VOL_CHAN1 + (i) * 2)
+#define INA3221_BUS_VOL(i) (INA3221_BUS_VOL_CHAN1 + (i) * 2)
+#define INA3221_CRIT(i) (INA3221_CRIT_CHAN1 + (i) * 2)
+#define INA3221_WARN(i) (INA3221_WARN_CHAN1 + (i) * 2)
+
+#define INA3221_RESET 0x8000
+#define INA3221_POWER_DOWN 0
+#define INA3221_ENABLE_CHAN (7 << 12) /* enable all 3 channels */
+#define INA3221_AVG (3 << 9) /* 64 averages */
+#define INA3221_VBUS_CT (4 << 6) /* Vbus 1.1 mS conv time */
+#define INA3221_VSHUNT_CT (4 << 3) /* Vshunt 1.1 mS conv time */
+#define INA3221_CONT_MODE 7 /* continuous bus n shunt V measure */
+#define INA3221_TRIG_MODE 3 /* triggered bus n shunt V measure */
+
+#define INA3221_CONT_CONFIG_DATA (INA3221_ENABLE_CHAN | INA3221_AVG | \
+ INA3221_VBUS_CT | INA3221_VSHUNT_CT | \
+ INA3221_CONT_MODE) /* 0x7727 */
+
+#define INA3221_TRIG_CONFIG_DATA (INA3221_ENABLE_CHAN | \
+ INA3221_TRIG_MODE) /* 0x7723 */
+#define INA3221_NUMBER_OF_RAILS 3
+
+#define INA3221_CVRF 0x01
+
+#define CPU_THRESHOLD 2
+#define CPU_FREQ_THRESHOLD 102000
+
+#define INA3221_MAX_CONVERSION_TRIALS 10
+
+#define PACK_MODE_CHAN(mode, chan) ((mode) | ((chan) << 8))
+#define UNPACK_MODE(address) ((address) & 0xFF)
+#define UNPACK_CHAN(address) (((address) >> 8) & 0xFF)
+
+#define U32_MINUS_1 ((u32) -1)
+enum {
+ CHANNEL_NAME = 0,
+ CRIT_CURRENT_LIMIT,
+ RUNNING_MODE,
+ VBUS_VOLTAGE_CURRENT,
+};
+
+enum mode {
+ TRIGGERED = 0,
+ FORCED_TRIGGERED = 1,
+ CONTINUOUS = 2,
+ FORCED_CONTINUOUS = 3,
+};
+
+#define IS_TRIGGERED(x) (!((x) & 2))
+#define IS_CONTINUOUS(x) ((x) & 2)
+
+struct ina3221_chan_pdata {
+ const char *rail_name;
+ u32 warn_conf_limits;
+ u32 crit_conf_limits;
+ u32 shunt_resistor;
+};
+
+struct ina3221_platform_data {
+ u16 cont_conf_data;
+ u16 trig_conf_data;
+ struct ina3221_chan_pdata cpdata[INA3221_NUMBER_OF_RAILS];
+};
+
+struct ina3221_chip {
+ struct device *dev;
+ struct i2c_client *client;
+ struct ina3221_platform_data *pdata;
+ struct mutex mutex;
+ int shutdown_complete;
+ int is_suspended;
+ int mode;
+ int alert_enabled;
+ struct notifier_block nb_hot;
+ struct notifier_block nb_cpufreq;
+};
+
+static int __locked_ina3221_switch_mode(struct ina3221_chip *chip,
+ int cpus, int cpufreq);
+
+static inline struct ina3221_chip *to_ina3221_chip(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ return iio_priv(indio_dev);
+}
+
+static inline int shuntv_register_to_uv(u16 reg)
+{
+ int ret = (s16)reg;
+
+ return (ret >> 3) * 40;
+}
+
+static inline u16 uv_to_shuntv_register(s32 uv)
+{
+ return (u16)(uv/5);
+}
+
+static inline int busv_register_to_mv(u16 reg)
+{
+ int ret = (s16)reg;
+
+ return (ret >> 3) * 8;
+}
+
+/* convert shunt voltage register value to current (in mA) */
+static int shuntv_register_to_ma(u16 reg, int resistance)
+{
+ int uv, ma;
+
+ uv = (s16)reg;
+ uv = ((uv >> 3) * 40); /* LSB (4th bit) is 40uV */
+ /*
+ * calculate uv/resistance with rounding knowing that C99 truncates
+ * towards zero
+ */
+ if (uv > 0)
+ ma = ((uv * 2 / resistance) + 1) / 2;
+ else
+ ma = ((uv * 2 / resistance) - 1) / 2;
+ return ma;
+}
+
+static int __locked_power_down_ina3221(struct ina3221_chip *chip)
+{
+ int ret;
+
+ ret = i2c_smbus_write_word_data(chip->client, INA3221_CONFIG,
+ INA3221_POWER_DOWN);
+ if (ret < 0)
+ dev_err(chip->dev, "Power down failed: %d", ret);
+ return ret;
+}
+
+static int __locked_power_up_ina3221(struct ina3221_chip *chip, int config)
+{
+ int ret;
+
+ ret = i2c_smbus_write_word_data(chip->client, INA3221_CONFIG,
+ cpu_to_be16(config));
+ if (ret < 0)
+ dev_err(chip->dev, "Power up failed: %d\n", ret);
+ return ret;
+}
+
+static int __locked_start_conversion(struct ina3221_chip *chip)
+{
+ int ret, cvrf, trials = 0;
+
+ if (IS_TRIGGERED(chip->mode)) {
+ ret = __locked_power_up_ina3221(chip,
+ chip->pdata->trig_conf_data);
+
+ if (ret < 0)
+ return ret;
+
+ /* wait till conversion ready bit is set */
+ do {
+ ret = i2c_smbus_read_word_data(chip->client,
+ INA3221_MASK_ENABLE);
+ if (ret < 0) {
+ dev_err(chip->dev, "MASK read failed: %d\n",
+ ret);
+ return ret;
+ }
+ cvrf = be16_to_cpu(ret) & INA3221_CVRF;
+ } while ((!cvrf) && (++trials < INA3221_MAX_CONVERSION_TRIALS));
+ if (trials == INA3221_MAX_CONVERSION_TRIALS) {
+ dev_err(chip->dev, "maximum retries exceeded\n");
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+
+static int __locked_end_conversion(struct ina3221_chip *chip)
+{
+ int ret = 0;
+
+ if (IS_TRIGGERED(chip->mode))
+ ret = __locked_power_down_ina3221(chip);
+
+ return ret;
+}
+
+static int __locked_do_conversion(struct ina3221_chip *chip, u16 *vsh,
+ u16 *vbus, int ch)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+
+ ret = __locked_start_conversion(chip);
+ if (ret < 0)
+ return ret;
+
+ if (vsh) {
+ ret = i2c_smbus_read_word_data(client, INA3221_SHUNT_VOL(ch));
+ if (ret < 0)
+ return ret;
+ *vsh = be16_to_cpu(ret);
+ }
+
+ if (vbus) {
+ ret = i2c_smbus_read_word_data(client, INA3221_BUS_VOL(ch));
+ if (ret < 0)
+ return ret;
+ *vbus = be16_to_cpu(ret);
+ }
+
+ return __locked_end_conversion(chip);
+}
+
+static int ina3221_get_mode(struct ina3221_chip *chip, char *buf)
+{
+ int v;
+
+ mutex_lock(&chip->mutex);
+ v = (IS_TRIGGERED(chip->mode)) ? 0 : 1;
+ mutex_unlock(&chip->mutex);
+ return sprintf(buf, "%d\n", v);
+}
+
+static int ina3221_set_mode(struct ina3221_chip *chip,
+ const char *buf, size_t count)
+{
+ int cpufreq;
+ int cpus;
+ long val;
+ int ret = 0;
+
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ if (val > 0) {
+ ret = __locked_power_up_ina3221(chip,
+ chip->pdata->cont_conf_data);
+ if (!ret)
+ chip->mode = FORCED_CONTINUOUS;
+ } else if (val == 0) {
+ chip->mode = FORCED_TRIGGERED;
+ ret = __locked_power_down_ina3221(chip);
+ } else {
+ if (chip->alert_enabled) {
+ if (IS_TRIGGERED(chip->mode))
+ chip->mode = TRIGGERED;
+ else
+ chip->mode = CONTINUOUS;
+ /* evaluate the state */
+ cpufreq = cpufreq_quick_get(0);
+ cpus = num_online_cpus();
+ ret = __locked_ina3221_switch_mode(chip, cpus, cpufreq);
+ } else {
+ chip->mode = TRIGGERED;
+ ret = __locked_power_down_ina3221(chip);
+ }
+ }
+ mutex_unlock(&chip->mutex);
+ return ret ? ret : count;
+}
+
+static int ina3221_get_channel_voltage(struct ina3221_chip *chip,
+ int channel, int *voltage_mv)
+{
+ u16 vbus;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+
+ ret = __locked_do_conversion(chip, NULL, &vbus, channel);
+ if (ret < 0) {
+ dev_err(chip->dev, "Voltage read on channel %d failed: %d\n",
+ channel, ret);
+ goto exit;
+ }
+ *voltage_mv = busv_register_to_mv(vbus);
+exit:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina3221_get_channel_current(struct ina3221_chip *chip,
+ int channel, int trigger, int *current_ma)
+{
+ u16 vsh;
+ int ret = 0;
+
+ mutex_lock(&chip->mutex);
+
+ /* return 0 if INA is off */
+ if (trigger && (IS_TRIGGERED(chip->mode))) {
+ *current_ma = 0;
+ goto exit;
+ }
+
+ ret = __locked_do_conversion(chip, &vsh, NULL, channel);
+ if (ret < 0) {
+ dev_err(chip->dev, "Current read on channel %d failed: %d\n",
+ channel, ret);
+ goto exit;
+ }
+ *current_ma = shuntv_register_to_ma(vsh,
+ chip->pdata->cpdata[channel].shunt_resistor);
+exit:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina3221_get_channel_power(struct ina3221_chip *chip,
+ int channel, int trigger, int *power_mw)
+{
+ u16 vsh, vbus;
+ int current_ma, voltage_mv;
+ int ret = 0;
+
+ mutex_lock(&chip->mutex);
+
+ if (trigger && (IS_TRIGGERED(chip->mode))) {
+ *power_mw = 0;
+ goto exit;
+ }
+
+ ret = __locked_do_conversion(chip, &vsh, &vbus, channel);
+ if (ret < 0) {
+ dev_err(chip->dev, "Read on channel %d failed: %d\n",
+ channel, ret);
+ goto exit;
+ }
+
+ current_ma = shuntv_register_to_ma(vsh,
+ chip->pdata->cpdata[channel].shunt_resistor);
+ voltage_mv = busv_register_to_mv(vbus);
+ *power_mw = (voltage_mv * current_ma) / 1000;
+exit:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina3221_get_channel_vbus_voltage_current(struct ina3221_chip *chip,
+ int channel, int *current_ma, int *voltage_mv)
+{
+ u16 vsh, vbus;
+ int ret = 0;
+
+ mutex_lock(&chip->mutex);
+
+ ret = __locked_do_conversion(chip, &vsh, &vbus, channel);
+ if (ret < 0) {
+ dev_err(chip->dev, "Read on channel %d failed: %d\n",
+ channel, ret);
+ goto exit;
+ }
+
+ *current_ma = shuntv_register_to_ma(vsh,
+ chip->pdata->cpdata[channel].shunt_resistor);
+ *voltage_mv = busv_register_to_mv(vbus);
+exit:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int __locked_set_crit_alert_register(struct ina3221_chip *chip,
+ u32 channel)
+{
+ struct ina3221_chan_pdata *cpdata = &chip->pdata->cpdata[channel];
+ int shunt_volt_limit;
+
+ chip->alert_enabled = 1;
+ shunt_volt_limit = cpdata->crit_conf_limits * cpdata->shunt_resistor;
+ shunt_volt_limit = uv_to_shuntv_register(shunt_volt_limit);
+
+ return i2c_smbus_write_word_data(chip->client, INA3221_CRIT(channel),
+ cpu_to_be16(shunt_volt_limit));
+}
+
+static int __locked_set_warn_alert_register(struct ina3221_chip *chip,
+ u32 channel)
+{
+ struct ina3221_chan_pdata *cpdata = &chip->pdata->cpdata[channel];
+ int shunt_volt_limit;
+
+ chip->alert_enabled = 1;
+ shunt_volt_limit = cpdata->warn_conf_limits * cpdata->shunt_resistor;
+ shunt_volt_limit = uv_to_shuntv_register(shunt_volt_limit);
+ return i2c_smbus_write_word_data(chip->client, INA3221_WARN(channel),
+ cpu_to_be16(shunt_volt_limit));
+}
+
+static int __locked_set_crit_warn_limits(struct ina3221_chip *chip)
+{
+ struct ina3221_chan_pdata *cpdata;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < INA3221_NUMBER_OF_RAILS; i++) {
+ cpdata = &chip->pdata->cpdata[i];
+
+ if (cpdata->crit_conf_limits != U32_MINUS_1) {
+ ret = __locked_set_crit_alert_register(chip, i);
+ if (ret < 0)
+ break;
+ }
+
+ if (cpdata->warn_conf_limits != U32_MINUS_1) {
+ ret = __locked_set_warn_alert_register(chip, i);
+ if (ret < 0)
+ break;
+ }
+ }
+ return ret;
+}
+
+static int ina3221_set_channel_critical(struct ina3221_chip *chip,
+ int channel, int curr_limit)
+{
+ struct ina3221_chan_pdata *cpdata = &chip->pdata->cpdata[channel];
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ cpdata->crit_conf_limits = curr_limit;
+ ret = __locked_set_crit_alert_register(chip, channel);
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina3221_get_channel_critical(struct ina3221_chip *chip,
+ int channel, int *curr_limit)
+{
+ struct ina3221_chan_pdata *cpdata = &chip->pdata->cpdata[channel];
+ u32 crit_reg_addr = INA3221_CRIT(channel);
+ int ret;
+
+ mutex_lock(&chip->mutex);
+
+ /* getting voltage readings in micro volts*/
+ ret = i2c_smbus_read_word_data(chip->client, crit_reg_addr);
+ if (ret < 0) {
+ dev_err(chip->dev, "Channel %d crit register read failed: %d\n",
+ channel, ret);
+ goto exit;
+ }
+
+ *curr_limit = shuntv_register_to_ma(be16_to_cpu(ret),
+ cpdata->shunt_resistor);
+ ret = 0;
+exit:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int __locked_ina3221_switch_mode(struct ina3221_chip *chip,
+ int cpus, int cpufreq)
+{
+ int ret = 0;
+
+ if (!chip->alert_enabled)
+ return 0;
+
+ switch (chip->mode) {
+ case TRIGGERED:
+ if ((cpus >= CPU_THRESHOLD) ||
+ (cpufreq >= CPU_FREQ_THRESHOLD)) {
+ /**
+ * Turn INA on when cpu frequency crosses threshold or
+ * number of cpus crosses threshold
+ */
+ dev_vdbg(chip->dev, "Turn-on cpus:%d, cpufreq:%d\n",
+ cpus, cpufreq);
+
+ ret = __locked_power_up_ina3221(chip,
+ chip->pdata->cont_conf_data);
+ if (ret < 0) {
+ dev_err(chip->dev, "INA power up failed: %d\n",
+ ret);
+ return ret;
+ }
+ chip->mode = CONTINUOUS;
+ }
+ break;
+ case CONTINUOUS:
+ if ((cpus < CPU_THRESHOLD) && (cpufreq < CPU_FREQ_THRESHOLD)) {
+ /*
+ * Turn off ina when number of cpu cores on are below
+ * threshold and cpu frequency are below threshold
+ */
+ dev_vdbg(chip->dev, "Turn-off, cpus:%d, cpufreq:%d\n",
+ cpus, cpufreq);
+
+ ret = __locked_power_down_ina3221(chip);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "INA power down failed:%d\n", ret);
+ return ret;
+ }
+ chip->mode = TRIGGERED;
+ }
+ break;
+ case FORCED_CONTINUOUS:
+ case FORCED_TRIGGERED:
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ina3221_cpufreq_notify(struct notifier_block *nb,
+ unsigned long event, void *hcpu)
+{
+ struct ina3221_chip *chip = container_of(nb,
+ struct ina3221_chip, nb_cpufreq);
+ int cpufreq;
+ int cpus;
+ int ret = 0;
+
+ if (event != CPUFREQ_POSTCHANGE)
+ return 0;
+
+ mutex_lock(&chip->mutex);
+ if (chip->is_suspended)
+ goto exit;
+
+ cpufreq = ((struct cpufreq_freqs *)hcpu)->new;
+ cpus = num_online_cpus();
+ dev_vdbg(chip->dev, "CPUfreq notified freq:%d cpus:%d\n",
+ cpufreq, cpus);
+ ret = __locked_ina3221_switch_mode(chip, cpus, cpufreq);
+ if (ret < 0) {
+ dev_err(chip->dev, "INA change mode failed %d\n", ret);
+ goto exit;
+ }
+exit:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina3221_hotplug_notify(struct notifier_block *nb,
+ unsigned long event, void *hcpu)
+{
+ struct ina3221_chip *chip = container_of(nb,
+ struct ina3221_chip, nb_hot);
+ int cpus;
+ int cpufreq = 0;
+ int ret = 0;
+
+ if (event == CPU_ONLINE || event == CPU_DEAD) {
+ mutex_lock(&chip->mutex);
+ cpufreq = cpufreq_quick_get(0);
+ cpus = num_online_cpus();
+ dev_vdbg(chip->dev, "hotplug notified cpufreq:%d cpus:%d\n",
+ cpufreq, cpus);
+ ret = __locked_ina3221_switch_mode(chip, cpus, cpufreq);
+ mutex_unlock(&chip->mutex);
+
+ if (ret < 0)
+ dev_err(chip->dev, "INA switch mode failed: %d\n", ret);
+ }
+ return ret;
+}
+
+static int ina3221_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct ina3221_chip *chip = iio_priv(indio_dev);
+ struct device *dev = chip->dev;
+ int type = chan->type;
+ int channel = chan->channel;
+ int address = chan->address;
+ int ret = 0;
+
+ if (channel >= 3) {
+ dev_err(dev, "Invalid channel Id %d\n", channel);
+ return -EINVAL;
+ }
+ if (mask != IIO_CHAN_INFO_PROCESSED) {
+ dev_err(dev, "Invalid mask 0x%08lx\n", mask);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case IIO_VOLTAGE:
+ ret = ina3221_get_channel_voltage(chip, channel, val);
+ break;
+
+ case IIO_CURRENT:
+ ret = ina3221_get_channel_current(chip, channel, address, val);
+ break;
+
+ case IIO_POWER:
+ ret = ina3221_get_channel_power(chip, channel, address, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret)
+ ret = IIO_VAL_INT;
+ return ret;
+}
+
+static ssize_t ina3221_show_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ina3221_chip *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int mode = UNPACK_MODE(this_attr->address);
+ int channel = UNPACK_CHAN(this_attr->address);
+ int ret;
+ int current_ma;
+ int voltage_mv;
+
+ if (channel >= 3) {
+ dev_err(dev, "Invalid channel Id %d\n", channel);
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case CHANNEL_NAME:
+ return sprintf(buf, "%s\n",
+ chip->pdata->cpdata[channel].rail_name);
+
+ case CRIT_CURRENT_LIMIT:
+ ret = ina3221_get_channel_critical(chip, channel, &current_ma);
+ if (!ret)
+ return sprintf(buf, "%d ma\n", current_ma);
+ return ret;
+
+ case RUNNING_MODE:
+ return ina3221_get_mode(chip, buf);
+
+ case VBUS_VOLTAGE_CURRENT:
+ ret = ina3221_get_channel_vbus_voltage_current(chip,
+ channel, &current_ma, &voltage_mv);
+ if (!ret)
+ return sprintf(buf, "%d %d\n", voltage_mv, current_ma);
+ return ret;
+
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static ssize_t ina3221_set_channel(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ina3221_chip *chip = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int mode = UNPACK_MODE(this_attr->address);
+ int channel = UNPACK_CHAN(this_attr->address);
+ long val;
+ int current_ma;
+ int ret;
+
+ if (channel >= 3) {
+ dev_err(dev, "Invalid channel Id %d\n", channel);
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case CRIT_CURRENT_LIMIT:
+ if (kstrtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ current_ma = (int) val;
+ ret = ina3221_set_channel_critical(chip, channel, current_ma);
+ return ret < 0 ? ret : len;
+
+ case RUNNING_MODE:
+ return ina3221_set_mode(chip, buf, len);
+ }
+ return -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(rail_name_0, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(CHANNEL_NAME, 0));
+static IIO_DEVICE_ATTR(rail_name_1, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(CHANNEL_NAME, 1));
+static IIO_DEVICE_ATTR(rail_name_2, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(CHANNEL_NAME, 2));
+
+static IIO_DEVICE_ATTR(crit_current_limit_0, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(CRIT_CURRENT_LIMIT, 0));
+static IIO_DEVICE_ATTR(crit_current_limit_1, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(CRIT_CURRENT_LIMIT, 1));
+static IIO_DEVICE_ATTR(crit_current_limit_2, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(CRIT_CURRENT_LIMIT, 2));
+
+static IIO_DEVICE_ATTR(ui_input_0, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(VBUS_VOLTAGE_CURRENT, 0));
+static IIO_DEVICE_ATTR(ui_input_1, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(VBUS_VOLTAGE_CURRENT, 1));
+static IIO_DEVICE_ATTR(ui_input_2, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(VBUS_VOLTAGE_CURRENT, 2));
+
+static IIO_DEVICE_ATTR(running_mode, S_IRUGO | S_IWUSR,
+ ina3221_show_channel, ina3221_set_channel,
+ PACK_MODE_CHAN(RUNNING_MODE, 0));
+
+static struct attribute *ina3221_attributes[] = {
+ &iio_dev_attr_rail_name_0.dev_attr.attr,
+ &iio_dev_attr_rail_name_1.dev_attr.attr,
+ &iio_dev_attr_rail_name_2.dev_attr.attr,
+ &iio_dev_attr_crit_current_limit_0.dev_attr.attr,
+ &iio_dev_attr_crit_current_limit_1.dev_attr.attr,
+ &iio_dev_attr_crit_current_limit_2.dev_attr.attr,
+ &iio_dev_attr_ui_input_0.dev_attr.attr,
+ &iio_dev_attr_ui_input_1.dev_attr.attr,
+ &iio_dev_attr_ui_input_2.dev_attr.attr,
+ &iio_dev_attr_running_mode.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ina3221_groups = {
+ .attrs = ina3221_attributes,
+};
+
+#define channel_type(_type, _add, _channel, _name) \
+ { \
+ .type = _type, \
+ .indexed = 1, \
+ .address = _add, \
+ .channel = _channel, \
+ .extend_name = _name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ }
+
+#define channel_spec(chan) \
+ channel_type(IIO_VOLTAGE, 0, chan, NULL), \
+ channel_type(IIO_CURRENT, 0, chan, NULL), \
+ channel_type(IIO_CURRENT, 1, chan, "trigger"), \
+ channel_type(IIO_POWER, 0, chan, NULL), \
+ channel_type(IIO_POWER, 1, chan, "trigger")
+
+static const struct iio_chan_spec ina3221_channels_spec[] = {
+ channel_spec(0),
+ channel_spec(1),
+ channel_spec(2),
+};
+
+static const struct iio_info ina3221_info = {
+ .attrs = &ina3221_groups,
+ .driver_module = THIS_MODULE,
+ .read_raw = &ina3221_read_raw,
+};
+
+static struct ina3221_platform_data *ina3221_get_platform_data_dt(
+ struct i2c_client *client)
+{
+ struct ina3221_platform_data *pdata;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ u32 reg;
+ int ret;
+ u32 pval;
+ int valid_channel = 0;
+
+ if (!np) {
+ dev_err(&client->dev, "Only DT supported\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&client->dev, "pdata allocation failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = of_property_read_u32(np, "ti,continuous-config", &pval);
+ if (!ret)
+ pdata->cont_conf_data = (u16)pval;
+
+ ret = of_property_read_u32(np, "ti,trigger-config", &pval);
+ if (!ret)
+ pdata->trig_conf_data = (u16)pval;
+
+ for_each_child_of_node(np, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg >= 3) {
+ dev_err(dev, "reg property invalid on node %s\n",
+ child->name);
+ continue;
+ }
+
+ pdata->cpdata[reg].rail_name = of_get_property(child,
+ "ti,rail-name", NULL);
+ if (!pdata->cpdata[reg].rail_name) {
+ dev_err(dev, "Rail name is not provided on node %s\n",
+ child->full_name);
+ continue;
+ }
+
+ ret = of_property_read_u32(child, "ti,current-warning-limit-ma",
+ &pval);
+ if (!ret)
+ pdata->cpdata[reg].warn_conf_limits = pval;
+ else
+ pdata->cpdata[reg].warn_conf_limits = U32_MINUS_1;
+
+ ret = of_property_read_u32(child,
+ "ti,current-critical-limit-ma", &pval);
+ if (!ret)
+ pdata->cpdata[reg].crit_conf_limits = pval;
+ else
+ pdata->cpdata[reg].crit_conf_limits = U32_MINUS_1;
+
+ ret = of_property_read_u32(child, "ti,shunt-resistor-mohm",
+ &pval);
+ if (!ret)
+ pdata->cpdata[reg].shunt_resistor = pval;
+
+ valid_channel++;
+ }
+
+ if (!valid_channel)
+ return ERR_PTR(-EINVAL);
+
+ return pdata;
+}
+
+static int ina3221_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ina3221_chip *chip;
+ struct iio_dev *indio_dev;
+ struct ina3221_platform_data *pdata;
+ int ret;
+
+ pdata = ina3221_get_platform_data_dt(client);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ dev_err(&client->dev, "platform data processing failed %d\n",
+ ret);
+ return ret;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation fails\n");
+ return -ENOMEM;
+ }
+
+ chip = iio_priv(indio_dev);
+ chip->dev = &client->dev;
+ chip->client = client;
+ i2c_set_clientdata(client, indio_dev);
+ chip->pdata = pdata;
+ mutex_init(&chip->mutex);
+
+ chip->mode = TRIGGERED;
+ chip->shutdown_complete = 0;
+ chip->is_suspended = 0;
+
+ indio_dev->info = &ina3221_info;
+ indio_dev->channels = ina3221_channels_spec;
+ indio_dev->num_channels = ARRAY_SIZE(ina3221_channels_spec);
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = devm_iio_device_register(chip->dev, indio_dev);
+ if (ret < 0) {
+ dev_err(chip->dev, "iio registration fails with error %d\n",
+ ret);
+ return ret;
+ }
+
+ /* reset ina3221 */
+ ret = i2c_smbus_write_word_data(client, INA3221_CONFIG,
+ __constant_cpu_to_be16((INA3221_RESET)));
+ if (ret < 0) {
+ dev_err(&client->dev, "ina3221 reset failure status: 0x%x\n",
+ ret);
+ return ret;
+ }
+
+ chip->nb_hot.notifier_call = ina3221_hotplug_notify;
+ chip->nb_cpufreq.notifier_call = ina3221_cpufreq_notify;
+ register_hotcpu_notifier(&(chip->nb_hot));
+ cpufreq_register_notifier(&(chip->nb_cpufreq),
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ ret = __locked_set_crit_warn_limits(chip);
+ if (ret < 0) {
+ dev_info(&client->dev, "Not able to set warn and crit limits!\n");
+ /*Not an error condition, could let the probe continue*/
+ }
+
+ /* set ina3221 to power down mode */
+ ret = __locked_power_down_ina3221(chip);
+ if (ret < 0) {
+ dev_err(&client->dev, "INA power down failed: %d\n", ret);
+ goto exit_pd;
+ }
+ return 0;
+
+exit_pd:
+ unregister_hotcpu_notifier(&(chip->nb_hot));
+ cpufreq_unregister_notifier(&(chip->nb_cpufreq),
+ CPUFREQ_TRANSITION_NOTIFIER);
+ return ret;
+}
+
+static int ina3221_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ina3221_chip *chip = iio_priv(indio_dev);
+
+ mutex_lock(&chip->mutex);
+ __locked_power_down_ina3221(chip);
+ mutex_unlock(&chip->mutex);
+ unregister_hotcpu_notifier(&(chip->nb_hot));
+ cpufreq_unregister_notifier(&(chip->nb_cpufreq),
+ CPUFREQ_TRANSITION_NOTIFIER);
+ return 0;
+}
+
+static void ina3221_shutdown(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ina3221_chip *chip = iio_priv(indio_dev);
+
+ mutex_lock(&chip->mutex);
+ __locked_power_down_ina3221(chip);
+ chip->shutdown_complete = 1;
+ mutex_unlock(&chip->mutex);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ina3221_suspend(struct device *dev)
+{
+ struct ina3221_chip *chip = to_ina3221_chip(dev);
+ int ret = 0;
+
+ mutex_lock(&chip->mutex);
+ ret = __locked_power_down_ina3221(chip);
+ if (ret < 0) {
+ dev_err(dev, "INA can't be turned off: 0x%x\n", ret);
+ goto error;
+ }
+ if (chip->mode == CONTINUOUS)
+ chip->mode = TRIGGERED;
+ chip->is_suspended = 1;
+error:
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+
+static int ina3221_resume(struct device *dev)
+{
+ struct ina3221_chip *chip = to_ina3221_chip(dev);
+ int cpufreq, cpus;
+ int ret = 0;
+
+ mutex_lock(&chip->mutex);
+ if (chip->mode == FORCED_CONTINUOUS) {
+ ret = __locked_power_up_ina3221(chip,
+ chip->pdata->cont_conf_data);
+ } else {
+ cpufreq = cpufreq_quick_get(0);
+ cpus = num_online_cpus();
+ ret = __locked_ina3221_switch_mode(chip, cpus, cpufreq);
+ }
+ if (ret < 0)
+ dev_err(dev, "INA can't be turned off/on: 0x%x\n", ret);
+ chip->is_suspended = 0;
+ mutex_unlock(&chip->mutex);
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops ina3221_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ina3221_suspend,
+ ina3221_resume)
+};
+
+static const struct i2c_device_id ina3221_id[] = {
+ {.name = "ina3221x",},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ina3221_id);
+
+static struct i2c_driver ina3221_driver = {
+ .driver = {
+ .name = "ina3221x",
+ .owner = THIS_MODULE,
+ .pm = &ina3221_pm_ops,
+ },
+ .probe = ina3221_probe,
+ .remove = ina3221_remove,
+ .shutdown = ina3221_shutdown,
+ .id_table = ina3221_id,
+};
+
+module_i2c_driver(ina3221_driver);
+
+MODULE_DESCRIPTION("TI INA3221 3-Channel Shunt and Bus Voltage Monitor");
+MODULE_AUTHOR("Deepak Nibade <dnibade@nvidia.com>");
+MODULE_AUTHOR("Timo Alho <talho@nvidia.com>");
+MODULE_AUTHOR("Anshul Jain <anshulj@nvidia.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/pressure/Kconfig b/drivers/staging/iio/pressure/Kconfig
new file mode 100644
index 000000000000..b20571ea63bb
--- /dev/null
+++ b/drivers/staging/iio/pressure/Kconfig
@@ -0,0 +1,13 @@
+#
+# Pressure sensors
+#
+menu "Pressure sensors"
+config SENSORS_BMP180
+ tristate "BMP 180 pressure sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for pressure sensing
+ from Bosch BMP180 sensor. This driver will provide the measurements
+ of pressure intensity. You can also set the pressure measurement
+ precision using sysfs. Data from sensor is accessible via sysfs.
+endmenu
diff --git a/drivers/staging/iio/pressure/Makefile b/drivers/staging/iio/pressure/Makefile
new file mode 100644
index 000000000000..4e01653469d6
--- /dev/null
+++ b/drivers/staging/iio/pressure/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Pressure sensors
+#
+GCOV_PROFILE := y
+
+obj-$(CONFIG_SENSORS_BMP180) += bmp180.o
diff --git a/drivers/staging/iio/pressure/bmp180.c b/drivers/staging/iio/pressure/bmp180.c
new file mode 100644
index 000000000000..b1ffbf55ddbc
--- /dev/null
+++ b/drivers/staging/iio/pressure/bmp180.c
@@ -0,0 +1,421 @@
+/*
+ * A driver for pressure sensor BMP180.
+ *
+ * BMP180 pressure sensor driver to detect pressure
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/regmap.h>
+
+/* 16 bit regs address initialization */
+#define BMP180_REG_AC1 0xAA
+#define BMP180_REG_AC2 0xAC
+#define BMP180_REG_AC3 0xAE
+#define BMP180_REG_AC4 0xB0
+#define BMP180_REG_AC5 0xB2
+#define BMP180_REG_AC6 0xB4
+#define BMP180_REG_B1 0xB6
+#define BMP180_REG_B2 0xB8
+#define BMP180_REG_MB 0xBA
+#define BMP180_REG_MC 0xBC
+#define BMP180_REG_MD 0xBE
+
+/* 8 bit regs address initialization */
+#define BMP180_REG_ID 0xD0
+#define BMP180_REG_CTRL 0xF4
+#define BMP180_REG_OUT_MSB 0xF6
+#define BMP180_REG_OUT_LSB 0xF7
+#define BMP180_REG_OUT_XLSB 0xF8l
+
+/* control word init */
+#define BMP180_CTRL_PINIT 0x34
+#define BMP180_CTRL_TINIT 0x2E
+#define BMP180_MAX_DIGIT 10
+/* max pressure read by chip is 1100 hPa */
+
+/* delay macros */
+#define BMP180_DELAY_ULP 5
+#define BMP180_DELAY_ST 8
+#define BMP180_DELAY_HIGH_RES 14
+#define BMP180_DELAY_UHIGH_RES 26
+
+struct bmp180_chip {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct regmap *regmap;
+ /* calibration data register values for the chip */
+ s16 ac1;
+ s16 ac2;
+ s16 ac3;
+ u16 ac4;
+ u16 ac5;
+ u16 ac6;
+ s16 b1;
+ s16 b2;
+ s16 mb;
+ s16 mc;
+ s16 md;
+ /* calibration data end */
+ u8 oss;
+ long UT; /* uncompensated temperature */
+ long UP; /* uncompensated pressure */
+ long pressure; /* final pressure in hPa/100 Pa/1 mBar */
+ u16 delay;
+};
+
+static int bmp180_write_data(struct i2c_client *client, u8 reg_addr,
+ u8 value)
+{
+ struct bmp180_chip *chip = i2c_get_clientdata(client);
+ return regmap_write(chip->regmap, reg_addr, value);
+}
+
+static int bmp180_read_data(struct i2c_client *client, u8 reg_addr, u16 *val)
+{
+ struct bmp180_chip *chip = i2c_get_clientdata(client);
+ return regmap_read(chip->regmap, reg_addr, (unsigned int *)val);
+}
+
+static int bmp180_read_word(struct i2c_client *client, u8 reg_msb_addr,
+ u16 *val)
+{
+ int ret_val;
+ u16 result;
+ u16 temp;
+
+ ret_val = bmp180_read_data(client, reg_msb_addr, &temp);
+ if (ret_val < 0) {
+ dev_err(&client->dev, "Error msb data in sensor\n");
+ return ret_val;
+ } else {
+ result = (u8)temp;
+ ret_val = bmp180_read_data(client, reg_msb_addr+1, &temp);
+ if (ret_val < 0) {
+ dev_err(&client->dev, "Error lsb data in sensor\n");
+ return ret_val;
+ } else {
+ result = (result << 8)+(u8)temp;
+ *val = result;
+ return 0;
+ }
+ }
+}
+
+static ssize_t bmp180_update_oss(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count) {
+ struct bmp180_chip *chip = dev_get_drvdata(dev);
+ int ret;
+ u8 oss;
+
+ ret = kstrtou8(buf, 10, &oss);
+ if (ret > 0) {
+ mutex_lock(&chip->lock);
+ chip->oss = oss;
+ switch (oss) {
+ case 0:
+ chip->delay = BMP180_DELAY_ULP;
+ break;
+ case 1:
+ chip->delay = BMP180_DELAY_ST;
+ break;
+ case 2:
+ chip->delay = BMP180_DELAY_HIGH_RES;
+ break;
+ case 3:
+ chip->delay = BMP180_DELAY_UHIGH_RES;
+ break;
+ }
+ mutex_unlock(&chip->lock);
+ return ret;
+ }
+ return -1;
+}
+
+static int bmp180_chip_init(struct i2c_client *client)
+{
+ struct bmp180_chip *chip = i2c_get_clientdata(client);
+ int ret;
+ u16 val;
+
+ dev_dbg(&client->dev, "%s called\n", __func__);
+ mutex_lock(&chip->lock);
+
+ chip->client = client;
+
+ chip->UT = 0;
+ chip->UP = 0;
+ chip->pressure = 0;
+ chip->oss = 0;
+ chip->delay = BMP180_DELAY_ULP;
+ ret = bmp180_read_word(client, BMP180_REG_AC1, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->ac1 = (s16)val;
+ ret = bmp180_read_word(client, BMP180_REG_AC2, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->ac2 = (s16)val;
+ ret = bmp180_read_word(client, BMP180_REG_AC3, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->ac3 = (s16)val;
+ ret = bmp180_read_word(client, BMP180_REG_AC4, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->ac4 = (u16)val;
+ ret = bmp180_read_word(client, BMP180_REG_AC5, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->ac5 = (u16)val;
+ ret = bmp180_read_word(client, BMP180_REG_AC6, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->ac6 = (u16)val;
+ ret = bmp180_read_word(client, BMP180_REG_B1, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->b1 = (s16)val;
+ ret = bmp180_read_word(client, BMP180_REG_B2, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->b2 = (s16)val;
+ ret = bmp180_read_word(client, BMP180_REG_MB, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->mb = (s16)val;
+ ret = bmp180_read_word(client, BMP180_REG_MC, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->mc = (s16)val;
+ ret = bmp180_read_word(client, BMP180_REG_MD, &val);
+ if (ret < 0)
+ goto error;
+ else
+ chip->md = (s16)val;
+ mutex_unlock(&chip->lock);
+ return 0;
+error:
+ dev_err(&client->dev, "Error in reading register\n");
+ return -1;
+}
+
+static long bmp180_convert_UP(struct bmp180_chip *chip)
+{
+ long X1, X2, X3, B3, B4, B5, B6, B7;
+
+ X1 = (chip->UT - chip->ac6) * (chip->ac5 >> 15);
+ X2 = chip->mc * (1 << 11)/(X1 + chip->md);
+ B5 = X1 + X2;
+ B6 = B5 - 4000;
+ X1 = (chip->b2 * (B6 * (B6 >> 12))) >> 11;
+ X2 = chip->ac2 * (B6 >> 11);
+ X3 = X1 + X2;
+ B3 = ((((chip->ac1 << 2) + X3) << chip->oss) + 2) >> 2;
+ X1 = chip->ac3 * (B6 >> 13);
+ X2 = (chip->b1 * (B6 * (B6 >> 12))) >> 16;
+ X3 = ((X1 + X2) + 2) >> 2;
+ B4 = chip->ac4 * ((u32)(X3 + 32768)) >> 15;
+ B7 = ((u32)chip->UP - B3) * (50000 >> chip->oss);
+ if (B7 < 0x80000000)
+ chip->pressure = (B7 * 2) / B4;
+ else
+ chip->pressure = (B7 / B4) * 2;
+ X1 = (chip->pressure >> 8) * (chip->pressure >> 8);
+ X1 = (X1 * 3038) >> 16;
+ X2 = (-7357 * chip->pressure) >> 16;
+ chip->pressure = chip->pressure + ((X1 + X2 + 3791) >> 14);
+ return chip->pressure;
+}
+
+static ssize_t bmp180_read_pressure(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bmp180_chip *chip;
+ struct i2c_client *client;
+ int ret;
+ long temp_UT, temp_UP;
+ u16 val;
+ u8 xlsb;
+
+ chip = dev_get_drvdata(dev);
+ client = chip->client;
+ dev_dbg(&client->dev, "%s\n called\n", __func__);
+ ret = bmp180_write_data(client, BMP180_REG_CTRL, BMP180_CTRL_TINIT);
+ if (ret < 0)
+ goto exit;
+ mdelay(5);
+ ret = bmp180_read_word(client, BMP180_REG_OUT_MSB, &val);
+ if (ret < 0)
+ goto exit;
+ temp_UT = (long)val;
+ ret = bmp180_write_data(client, BMP180_REG_CTRL,
+ BMP180_CTRL_PINIT+(chip->oss<<6));
+ if (ret < 0)
+ goto exit;
+ mdelay(chip->delay);
+ ret = bmp180_read_word(client, BMP180_REG_OUT_MSB, &val);
+ if (ret < 0)
+ goto exit;
+ temp_UP = (long)val;
+ if (chip->oss > 0) {
+ ret = bmp180_read_data(client, BMP180_REG_OUT_XLSB, &val);
+ if (ret < 0)
+ goto exit;
+ xlsb = (u8)val;
+ temp_UP = (temp_UP << chip->oss) + xlsb;
+ }
+ mutex_lock(&chip->lock);
+ chip->UT = temp_UT;
+ chip->UP = temp_UP;
+ bmp180_convert_UP(chip);
+ dev_dbg(&client->dev, "pressure value read %lu\n\n", chip->pressure);
+ ret = snprintf(buf, BMP180_MAX_DIGIT, "%lu", chip->pressure);
+ mutex_unlock(&chip->lock);
+ if (ret > 0)
+ return ret;
+exit:
+ dev_err(&client->dev, "R/W operation failed\n");
+ return -1;
+}
+
+static DEVICE_ATTR(pressure, 0444, bmp180_read_pressure, NULL);
+static DEVICE_ATTR(oss, 0664, NULL, bmp180_update_oss);
+
+static struct attribute *bmp180_attrs[] = {
+ &dev_attr_pressure.attr,
+ &dev_attr_oss.attr,
+ NULL
+};
+
+static const struct attribute_group bmp180_attr_group = {
+ .attrs = bmp180_attrs,
+};
+
+static bool is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP180_REG_ID:
+ case BMP180_REG_CTRL:
+ case BMP180_REG_OUT_MSB:
+ case BMP180_REG_OUT_LSB:
+ case BMP180_REG_OUT_XLSB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config bmp180_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_reg = is_volatile_reg,
+ .max_register = BMP180_REG_OUT_XLSB,
+ .num_reg_defaults_raw = BMP180_REG_OUT_XLSB + 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int bmp180_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bmp180_chip *chip;
+ int error = 0;
+
+ dev_dbg(&client->dev, "%s\n called\n", __func__);
+ chip = devm_kzalloc(&client->dev, sizeof(struct bmp180_chip),
+ GFP_KERNEL);
+ if (!chip) {
+ dev_err(&client->dev, "Memory Allocation Failed\n");
+ error = -ENOMEM;
+ goto exit;
+ }
+ i2c_set_clientdata(client, chip);
+ mutex_init(&chip->lock);
+ chip->regmap = devm_regmap_init_i2c(client, &bmp180_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ error = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "regmap initialization failed: %d\n",
+ error);
+ goto exit;
+ }
+ error = bmp180_chip_init(client);
+ if (error < 0) {
+ dev_err(&client->dev, "Probe failed in chip init\n");
+ goto exit;
+ }
+ error = sysfs_create_group(&client->dev.kobj, &bmp180_attr_group);
+ if (error) {
+ dev_err(&client->dev, "Failed to create sysfs group\n");
+ goto exit;
+ }
+ return 0;
+exit:
+ return error;
+}
+
+static int bmp180_remove(struct i2c_client *client)
+{
+ struct bmp180_chip *chip = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "%s\n called\n", __func__);
+ sysfs_remove_group(&client->dev.kobj, &bmp180_attr_group);
+ mutex_destroy(&chip->lock);
+ return 0;
+}
+
+
+static const struct i2c_device_id bmp180_id[] = {
+ {"bmp180", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, bmp180_id);
+
+static struct i2c_driver bmp180_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "bmp180",
+ .owner = THIS_MODULE,
+ },
+ .probe = bmp180_probe,
+ .remove = bmp180_remove,
+ .id_table = bmp180_id
+};
+
+module_i2c_driver(bmp180_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sumit Sharma <sumsharma@nvidia.com>");
+MODULE_DESCRIPTION("Pressure Sensor BMP180 driver");
diff --git a/drivers/staging/nvshm/Kconfig b/drivers/staging/nvshm/Kconfig
new file mode 100644
index 000000000000..cf43188a0e30
--- /dev/null
+++ b/drivers/staging/nvshm/Kconfig
@@ -0,0 +1,17 @@
+config NVSHM
+ bool "NV Tegra Baseband Shared Memory Interface"
+ depends on ARCH_TEGRA
+ depends on TEGRA_BASEBAND
+ depends on SUNRPC
+ help
+ Say Y here to enable support for high level shared memory interface
+ driver on NVIDIA Tegra integrated baseband
+
+config NVSHM_RPC_RSM
+ bool "RPC mechanism for the resource manager"
+ default y
+ depends on NVSHM
+ depends on TEGRA_BBC_PROXY
+ help
+ This provides the support for RSM (resource management) via the
+ shared memory interface's RPC (remote procedure call) channel.
diff --git a/drivers/staging/nvshm/Makefile b/drivers/staging/nvshm/Makefile
new file mode 100644
index 000000000000..267b483f2aa9
--- /dev/null
+++ b/drivers/staging/nvshm/Makefile
@@ -0,0 +1,16 @@
+subdir-ccflags-y := -Werror
+
+nvshm-objs := nvshm_init.o
+nvshm-objs += nvshm_ipc.o
+nvshm-objs += nvshm_queue.o
+nvshm-objs += nvshm_iobuf.o
+nvshm-objs += nvshm_tty.o
+nvshm-objs += nvshm_net.o
+nvshm-objs += nvshm_if.o
+nvshm-objs += nvshm_rpc.o
+nvshm-objs += nvshm_rpc_utils.o
+nvshm-objs += nvshm_rpc_dispatcher.o
+nvshm-objs += nvshm_stats.o
+
+obj-$(CONFIG_NVSHM) += nvshm.o
+obj-$(CONFIG_NVSHM_RPC_RSM) += nvshm_rpc_prog_rsm.o
diff --git a/drivers/staging/nvshm/nvshm_if.c b/drivers/staging/nvshm/nvshm_if.c
new file mode 100644
index 000000000000..8ff030bd2f2f
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_if.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/debugfs.h>
+
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_ipc.h"
+#include "nvshm_queue.h"
+#include "nvshm_iobuf.h"
+
+struct nvshm_channel *nvshm_open_channel(int chan,
+ struct nvshm_if_operations *ops,
+ void *interface_data)
+{
+ struct nvshm_handle *handle = nvshm_get_handle();
+
+ pr_debug("%s(%d)\n", __func__, chan);
+ spin_lock(&handle->lock);
+ if (handle->chan[chan].ops) {
+ pr_err("%s: already registered on chan %d\n", __func__, chan);
+ return NULL;
+ }
+
+ handle->chan[chan].ops = ops;
+ handle->chan[chan].data = interface_data;
+ spin_unlock(&handle->lock);
+ return &handle->chan[chan];
+}
+
+void nvshm_close_channel(struct nvshm_channel *handle)
+{
+ struct nvshm_handle *priv = nvshm_get_handle();
+
+ /* we cannot flush the work queue here as the call to
+ nvshm_close_channel() is made from cleanup_interfaces(),
+ which executes from the context of the work queue
+
+ additionally, flushing the work queue is unnecessary here
+ as the main work queue handler always checks the state of
+ the IPC */
+
+ spin_lock(&priv->lock);
+ priv->chan[handle->index].ops = NULL;
+ priv->chan[handle->index].data = NULL;
+ spin_unlock(&priv->lock);
+}
+
+int nvshm_write(struct nvshm_channel *handle, struct nvshm_iobuf *iob)
+{
+ struct nvshm_handle *priv = nvshm_get_handle();
+ struct nvshm_iobuf *list, *leaf;
+ int count = 0, ret = 0;
+
+ spin_lock_bh(&priv->lock);
+ if (!priv->chan[handle->index].ops) {
+ pr_err("%s: channel not mapped\n", __func__);
+ spin_unlock_bh(&priv->lock);
+ return -EINVAL;
+ }
+
+ list = iob;
+ while (list) {
+ count++;
+ leaf = list->sg_next;
+ while (leaf) {
+ count++;
+ leaf = NVSHM_B2A(priv, leaf);
+ leaf = leaf->sg_next;
+ }
+ list = list->next;
+ if (list)
+ list = NVSHM_B2A(priv, list);
+ }
+ priv->chan[handle->index].rate_counter -= count;
+ if (priv->chan[handle->index].rate_counter < 0) {
+ priv->chan[handle->index].xoff = 1;
+ pr_warn("%s: rate limit hit on chan %d\n", __func__,
+ handle->index);
+ ret = 1;
+ }
+
+ iob->chan = handle->index;
+ iob->qnext = NULL;
+ nvshm_queue_put(priv, iob);
+ nvshm_generate_ipc(priv);
+ spin_unlock_bh(&priv->lock);
+ return ret;
+}
+
+/* Defered to nvshm_wq because it can be called from atomic context */
+void nvshm_start_tx(struct nvshm_channel *handle)
+{
+ struct nvshm_handle *priv = nvshm_get_handle();
+ queue_work(priv->nvshm_wq, &handle->start_tx_work);
+}
diff --git a/drivers/staging/nvshm/nvshm_if.h b/drivers/staging/nvshm/nvshm_if.h
new file mode 100644
index 000000000000..bd55d0c2e22a
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_if.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2012-2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NVSHM_IF_H
+#define _NVSHM_IF_H
+
+/* Error type */
+enum nvshm_error_id {
+ NVSHM_NO_ERROR = 0,
+ NVSHM_RESTART,
+ NVSHM_IOBUF_ERROR,
+ NVSHM_UNKNOWN_ERROR
+};
+
+/*
+ * Interface operations - upper layer interface
+ *
+ * important note on nvshm_iobuf structure:
+ * ALL pointer inside structure are in BB memory space
+ * Read/write on these pointers must be done via macro (NVSHM_A2B/NVSHM_B2A)
+ * NULL test/assignment should be done without macro
+ * nvshm_iobuf pointers in parameters/return are in
+ * ipc cached area in kernel space
+ * see nvshm-iobuf.h for macro reference
+ */
+struct nvshm_if_operations {
+ /**
+ * rx_event
+ *
+ * This is called by the NVSHM core when an event
+ * and/or a packet of data is received.
+ * receiver should consume all iobuf in given list.
+ * Note that packet could be fragmented with ->sg_next
+ * and multiple packet can be linked via ->next
+ *
+ * @param struct nvshm_channel channel handle
+ * @param struct nvshm_iobuf holding received data
+ */
+ void (*rx_event)(struct nvshm_channel *handle, struct nvshm_iobuf *iob);
+
+ /**
+ * error_event
+ *
+ * This is called by the NVSHM core when an error event is
+ * received
+ *
+ * @param struct nvshm_channel channel handle
+ * @param error type of error see enum nvshm_error_id
+ */
+ void (*error_event)(struct nvshm_channel *handle,
+ enum nvshm_error_id error);
+
+ /**
+ * start_tx
+ *
+ * This is called by the NVSHM core to restart Tx
+ * after flow control off
+ *
+ * @param struct nvshm_channel channel handle
+ */
+ void (*start_tx)(struct nvshm_channel *handle);
+};
+
+/**
+ * nvshm_open_channel
+ *
+ * This is used to register a new interface on a specified channel
+ *
+ * @param int channel to open
+ * @param ops interface operations
+ * @param void * interface data pointer (private)
+ * @return struct nvshm_channel channel handle
+ */
+struct nvshm_channel *nvshm_open_channel(int chan,
+ struct nvshm_if_operations *ops,
+ void *interface_data);
+
+/**
+ * nvshm_close_channel
+ *
+ * This is used to unregister an interface on specified channel
+ *
+ * @param struct nvshm_channel channel handle
+ *
+ */
+void nvshm_close_channel(struct nvshm_channel *handle);
+
+/**
+ * write an iobuf chain to an NVSHM channel
+ *
+ * Note that packet could be fragmented with ->sg_next
+ * and multiple packet can be linked via ->next
+ * Passed iobuf must have a ref count of 1 only or write will fail
+ *
+ * @param struct nvshm_channel handle
+ * @param struct nvshm_iobuf holding packet to write
+ *
+ * @return 0 if write is ok, 1 if flow control is XOFF, negative for error
+ */
+int nvshm_write(struct nvshm_channel *handle, struct nvshm_iobuf *iob);
+
+/**
+ * Start TX on nvshm channel
+ *
+ * Used to signal upper driver to start tx again
+ * after a XOFF situation
+ * Can be called from irq context
+ *
+ * @param struct nvshm_channel
+ *
+ */
+void nvshm_start_tx(struct nvshm_channel *handle);
+
+#endif /* _NVSHM_IF_H */
diff --git a/drivers/staging/nvshm/nvshm_init.c b/drivers/staging/nvshm/nvshm_init.c
new file mode 100644
index 000000000000..00528543016d
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_init.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2012-2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+
+#include <linux/platform_data/nvshm.h>
+#include <asm/mach/map.h>
+
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_ipc.h"
+#include "nvshm_iobuf.h"
+
+static struct nvshm_handle *_nvshm_instance;
+
+static int nvshm_probe(struct platform_device *pdev)
+{
+ struct nvshm_handle *handle = NULL;
+ struct tegra_bb_platform_data *bb_pdata;
+ struct nvshm_platform_data *pdata =
+ pdev->dev.platform_data;
+
+ if (!pdata) {
+ pr_err("%s platform_data not available\n", __func__);
+ goto fail;
+ }
+
+ handle = kzalloc(sizeof(struct nvshm_handle), GFP_KERNEL);
+
+ if (handle == NULL) {
+ pr_err("%s fail to alloc memory\n", __func__);
+ goto fail;
+ }
+
+ _nvshm_instance = handle;
+
+ spin_lock_init(&handle->lock);
+ spin_lock_init(&handle->qlock);
+
+ wake_lock_init(&handle->ul_lock, WAKE_LOCK_SUSPEND, "SHM-UL");
+ wake_lock_init(&handle->dl_lock, WAKE_LOCK_SUSPEND, "SHM-DL");
+
+ handle->ipc_base_virt = pdata->ipc_base_virt;
+ handle->ipc_size = pdata->ipc_size;
+
+ handle->mb_base_virt = pdata->mb_base_virt;
+ handle->mb_size = pdata->mb_size;
+
+ handle->dev = &pdev->dev;
+ handle->instance = pdev->id;
+
+ handle->tegra_bb = pdata->tegra_bb;
+ bb_pdata = handle->tegra_bb->dev.platform_data;
+
+ handle->bb_irq = pdata->bb_irq;
+ platform_set_drvdata(pdev, handle);
+ nvshm_register_ipc(handle);
+ return 0;
+fail:
+ kfree(handle);
+ return -1;
+}
+
+static int __exit nvshm_remove(struct platform_device *pdev)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static int nvshm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static int nvshm_resume(struct platform_device *pdev)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static struct platform_driver nvshm_driver = {
+ .driver = {
+ .name = "nvshm",
+ .owner = THIS_MODULE,
+ },
+ .probe = nvshm_probe,
+ .remove = __exit_p(nvshm_remove),
+#ifdef CONFIG_PM
+ .suspend = nvshm_suspend,
+ .resume = nvshm_resume,
+#endif
+};
+
+inline struct nvshm_handle *nvshm_get_handle()
+{
+ return _nvshm_instance;
+}
+
+static int __init nvshm_startup(void)
+{
+ int ret;
+ ret = platform_driver_register(&nvshm_driver);
+ pr_debug("%s ret %d\n", __func__, ret);
+ return ret;
+}
+
+static void __exit nvshm_exit(void)
+{
+ struct nvshm_handle *handle = nvshm_get_handle();
+ pr_debug("%s\n", __func__);
+ nvshm_tty_cleanup();
+ nvshm_unregister_ipc(handle);
+ wake_lock_destroy(&handle->dl_lock);
+ wake_lock_destroy(&handle->ul_lock);
+ kfree(handle);
+ platform_driver_unregister(&nvshm_driver);
+}
+
+module_init(nvshm_startup);
+module_exit(nvshm_exit);
+
+MODULE_DESCRIPTION("NV Shared Memory Interface");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/nvshm/nvshm_iobuf.c b/drivers/staging/nvshm/nvshm_iobuf.c
new file mode 100644
index 000000000000..e6b9c7efbf9c
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_iobuf.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_ipc.h"
+#include "nvshm_iobuf.h"
+#include "nvshm_queue.h"
+/*
+ * really simple allocator: data is divided of chunk of equal size
+ * since iobuf are mainly for tty/net traffic which is well below 8k
+ */
+
+#define NVSHM_DEFAULT_OFFSET 32
+#define NVSHM_MAX_FREE_PENDING (16)
+
+struct nvshm_allocator {
+ spinlock_t lock;
+ /* AP free iobufs */
+ struct nvshm_iobuf *free_pool_head;
+ struct nvshm_iobuf *free_pool_tail;
+ /* Freed BBC iobuf to be returned */
+ struct nvshm_iobuf *bbc_pool_head;
+ struct nvshm_iobuf *bbc_pool_tail;
+ int nbuf;
+ int free_count;
+};
+
+static struct nvshm_allocator alloc;
+
+static const char *give_pointer_location(struct nvshm_handle *handle, void *ptr)
+{
+ if (!ptr)
+ return "null";
+
+ ptr = NVSHM_B2A(handle, ptr);
+
+ if (ADDR_OUTSIDE(ptr, handle->desc_base_virt, handle->desc_size)
+ && ADDR_OUTSIDE(ptr, handle->data_base_virt, handle->data_size)) {
+ if (ADDR_OUTSIDE(ptr, handle->ipc_base_virt, handle->ipc_size))
+ return "Err";
+ else
+ return "BBC";
+ }
+
+ return "AP";
+}
+
+/* Accumulate BBC freed iobuf to return them later at end of rx processing */
+/* This saves a lot of CPU/memory cycles on both sides */
+static void bbc_free(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
+{
+ unsigned long f;
+
+ spin_lock_irqsave(&alloc.lock, f);
+ alloc.free_count++;
+ if (alloc.bbc_pool_head) {
+ alloc.bbc_pool_tail->next = NVSHM_A2B(handle, iob);
+ alloc.bbc_pool_tail = iob;
+ } else {
+ alloc.bbc_pool_head = alloc.bbc_pool_tail = iob;
+ }
+ spin_unlock_irqrestore(&alloc.lock, f);
+ if (alloc.free_count > NVSHM_MAX_FREE_PENDING)
+ nvshm_iobuf_bbc_free(handle);
+}
+
+/* Effectively free all iobufs accumulated */
+void nvshm_iobuf_bbc_free(struct nvshm_handle *handle)
+{
+ struct nvshm_iobuf *iob = NULL;
+ unsigned long f;
+
+ spin_lock_irqsave(&alloc.lock, f);
+ if (alloc.bbc_pool_head) {
+ alloc.free_count = 0;
+ iob = alloc.bbc_pool_head;
+ alloc.bbc_pool_head = alloc.bbc_pool_tail = NULL;
+ }
+ spin_unlock_irqrestore(&alloc.lock, f);
+ if (iob) {
+ nvshm_queue_put(handle, iob);
+ nvshm_generate_ipc(handle);
+ }
+}
+
+struct nvshm_iobuf *nvshm_iobuf_alloc(struct nvshm_channel *chan, int size)
+{
+ struct nvshm_handle *handle = nvshm_get_handle();
+ struct nvshm_iobuf *desc = NULL;
+ unsigned long f;
+
+ spin_lock_irqsave(&alloc.lock, f);
+ if (alloc.free_pool_head) {
+ int check = nvshm_iobuf_check(alloc.free_pool_head);
+
+ if (check) {
+ spin_unlock_irqrestore(&alloc.lock, f);
+ pr_err("%s: iobuf check ret %d\n", __func__, check);
+ return NULL;
+ }
+ if (size > (alloc.free_pool_head->total_length -
+ NVSHM_DEFAULT_OFFSET)) {
+ spin_unlock_irqrestore(&alloc.lock, f);
+ pr_err("%s: requested size (%d > %d) too big\n",
+ __func__,
+ size,
+ alloc.free_pool_head->total_length -
+ NVSHM_DEFAULT_OFFSET);
+ if (chan->ops) {
+ chan->ops->error_event(chan,
+ NVSHM_IOBUF_ERROR);
+ }
+ return desc;
+ }
+ desc = alloc.free_pool_head;
+ alloc.free_pool_head = desc->next;
+ if (alloc.free_pool_head) {
+ alloc.free_pool_head = NVSHM_B2A(handle,
+ alloc.free_pool_head);
+ } else {
+ pr_debug("%s end of alloc queue - clearing tail\n",
+ __func__);
+ alloc.free_pool_tail = NULL;
+ }
+ desc->length = 0;
+ desc->flags = 0;
+ desc->data_offset = NVSHM_DEFAULT_OFFSET;
+ desc->sg_next = NULL;
+ desc->next = NULL;
+ desc->ref = 1;
+
+ } else {
+ spin_unlock_irqrestore(&alloc.lock, f);
+ pr_err("%s: no more alloc space\n", __func__);
+ /* No error since it's only Xoff situation */
+ return desc;
+ }
+
+ spin_unlock_irqrestore(&alloc.lock, f);
+
+ return desc;
+}
+
+/** Returned iobuf are already freed - just process them */
+void nvshm_iobuf_process_freed(struct nvshm_iobuf *desc)
+{
+ struct nvshm_handle *priv = nvshm_get_handle();
+ unsigned long f;
+
+ while (desc) {
+ int callback = 0, chan;
+ struct nvshm_iobuf *next = desc->next;
+
+ if (desc->ref != 0) {
+ pr_err("%s: BBC returned an non freed iobuf (0x%x)\n",
+ __func__,
+ (unsigned int)desc);
+ return;
+ }
+
+ chan = desc->chan;
+ spin_lock_irqsave(&alloc.lock, f);
+ /* update rate counter */
+ if ((chan >= 0) &&
+ (chan < NVSHM_MAX_CHANNELS)) {
+ if ((priv->chan[chan].rate_counter++ ==
+ NVSHM_RATE_LIMIT_TRESHOLD)
+ && (priv->chan[chan].xoff)) {
+ priv->chan[chan].xoff = 0;
+ callback = 1;
+ }
+ }
+ desc->sg_next = NULL;
+ desc->next = NULL;
+ desc->length = 0;
+ desc->flags = 0;
+ desc->data_offset = 0;
+ desc->chan = 0;
+ if (alloc.free_pool_tail) {
+ alloc.free_pool_tail->next = NVSHM_A2B(priv,
+ desc);
+ alloc.free_pool_tail = desc;
+ } else {
+ alloc.free_pool_head = desc;
+ alloc.free_pool_tail = desc;
+ }
+ spin_unlock_irqrestore(&alloc.lock, f);
+ if (callback)
+ nvshm_start_tx(&priv->chan[chan]);
+ if (next) {
+ desc = NVSHM_B2A(priv, next);
+ } else {
+ desc = next;
+ }
+ }
+}
+
+/** Single iobuf free - do not follow iobuf links */
+void nvshm_iobuf_free(struct nvshm_iobuf *desc)
+{
+ struct nvshm_handle *priv = nvshm_get_handle();
+ int callback = 0, chan;
+ unsigned long f;
+
+ if (desc->ref == 0) {
+ pr_err("%s: freeing an already freed iobuf (0x%x)\n",
+ __func__,
+ (unsigned int)desc);
+ return;
+ }
+ spin_lock_irqsave(&alloc.lock, f);
+ pr_debug("%s: free 0x%p ref %d pool %x\n", __func__,
+ desc, desc->ref, desc->pool_id);
+ desc->ref--;
+ chan = desc->chan;
+ if (desc->ref == 0) {
+ if (desc->pool_id >= NVSHM_AP_POOL_ID) {
+ /* update rate counter */
+ if ((chan >= 0) &&
+ (chan < NVSHM_MAX_CHANNELS)) {
+ if ((priv->chan[chan].rate_counter++ ==
+ NVSHM_RATE_LIMIT_TRESHOLD)
+ && (priv->chan[chan].xoff)) {
+ priv->chan[chan].xoff = 0;
+ callback = 1;
+ }
+ }
+ desc->sg_next = NULL;
+ desc->next = NULL;
+ desc->length = 0;
+ desc->flags = 0;
+ desc->data_offset = 0;
+ desc->chan = 0;
+ if (alloc.free_pool_tail) {
+ alloc.free_pool_tail->next = NVSHM_A2B(priv,
+ desc);
+ alloc.free_pool_tail = desc;
+ } else {
+ alloc.free_pool_head = desc;
+ alloc.free_pool_tail = desc;
+ }
+ } else {
+ /* iobuf belongs to other side */
+ pr_debug("%s: re-queue freed buffer\n", __func__);
+ desc->sg_next = NULL;
+ desc->next = NULL;
+ desc->length = 0;
+ desc->data_offset = 0;
+ spin_unlock_irqrestore(&alloc.lock, f);
+ bbc_free(priv, desc);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&alloc.lock, f);
+ if (callback)
+ nvshm_start_tx(&priv->chan[chan]);
+}
+
+void nvshm_iobuf_free_cluster(struct nvshm_iobuf *list)
+{
+ struct nvshm_handle *priv = nvshm_get_handle();
+ struct nvshm_iobuf *_phy_list, *_to_free, *leaf;
+ int n = 0;
+
+ _phy_list = list;
+ while (_phy_list) {
+ _to_free = list;
+ if (list->sg_next) {
+ _phy_list = list->sg_next;
+ if (_phy_list) {
+ leaf = NVSHM_B2A(priv, _phy_list);
+ leaf->next = list->next;
+ }
+ } else {
+ _phy_list = list->next;
+ }
+ list = NVSHM_B2A(priv, _phy_list);
+ n++;
+ nvshm_iobuf_free(_to_free);
+ }
+}
+
+int nvshm_iobuf_ref(struct nvshm_iobuf *iob)
+{
+ int ref;
+ unsigned long f;
+
+ spin_lock_irqsave(&alloc.lock, f);
+ ref = iob->ref++;
+ spin_unlock_irqrestore(&alloc.lock, f);
+ return ref;
+}
+
+int nvshm_iobuf_unref(struct nvshm_iobuf *iob)
+{
+ int ref;
+ unsigned long f;
+
+ spin_lock_irqsave(&alloc.lock, f);
+ ref = iob->ref--;
+ spin_unlock_irqrestore(&alloc.lock, f);
+ return ref;
+}
+
+int nvshm_iobuf_ref_cluster(struct nvshm_iobuf *iob)
+{
+ int ref, ret = 0;
+ struct nvshm_iobuf *_phy_list, *_phy_leaf;
+ struct nvshm_handle *handle = nvshm_get_handle();
+
+ _phy_list = iob;
+ while (_phy_list) {
+ _phy_leaf = _phy_list;
+ while (_phy_leaf) {
+ ref = nvshm_iobuf_ref(_phy_leaf);
+ ret = (ref > ret) ? ref : ret;
+ if (_phy_leaf->sg_next) {
+ _phy_leaf = NVSHM_B2A(handle,
+ _phy_leaf->sg_next);
+ } else {
+ _phy_leaf = NULL;
+ }
+ }
+ if (_phy_list->next)
+ _phy_list = NVSHM_B2A(handle, _phy_list->next);
+ else
+ _phy_list = NULL;
+ }
+ return ret;
+}
+
+int nvshm_iobuf_unref_cluster(struct nvshm_iobuf *iob)
+{
+ int ref, ret = 0;
+ struct nvshm_iobuf *_phy_list, *_phy_leaf;
+ struct nvshm_handle *handle = nvshm_get_handle();
+
+ _phy_list = iob;
+ while (_phy_list) {
+ _phy_leaf = _phy_list;
+ while (_phy_leaf) {
+ ref = nvshm_iobuf_unref(_phy_leaf);
+ ret = (ref > ret) ? ref : ret;
+ if (_phy_leaf->sg_next) {
+ _phy_leaf = NVSHM_B2A(handle,
+ _phy_leaf->sg_next);
+ } else {
+ _phy_leaf = NULL;
+ }
+ }
+ if (_phy_list->next)
+ _phy_list = NVSHM_B2A(handle, _phy_list->next);
+ else
+ _phy_list = NULL;
+ }
+
+ return ret;
+}
+
+int nvshm_iobuf_flags(struct nvshm_iobuf *iob,
+ unsigned int set,
+ unsigned int clear)
+{
+ iob->flags &= ~(clear & 0xFFFF);
+ iob->flags |= set & 0xFFFF;
+ return 0;
+}
+
+void nvshm_iobuf_dump(struct nvshm_iobuf *iob)
+{
+ struct nvshm_handle *priv = nvshm_get_handle();
+
+ pr_err("iobuf (0x%p) dump:\n", NVSHM_A2B(priv, iob));
+ pr_err("\t data = 0x%p (%s)\n", iob->npdu_data,
+ give_pointer_location(priv, iob->npdu_data));
+ pr_err("\t length = %d\n", iob->length);
+ pr_err("\t offset = %d\n", iob->data_offset);
+ pr_err("\t total_len = %d\n", iob->total_length);
+ pr_err("\t ref = %d\n", iob->ref);
+ pr_err("\t pool_id = %d (%s)\n", iob->pool_id,
+ (iob->pool_id < NVSHM_AP_POOL_ID) ? "BBC" : "AP");
+ pr_err("\t next = 0x%p (%s)\n", iob->next,
+ give_pointer_location(priv, iob->next));
+ pr_err("\t sg_next = 0x%p (%s)\n", iob->sg_next,
+ give_pointer_location(priv, iob->sg_next));
+ pr_err("\t flags = 0x%x\n", iob->flags);
+ pr_err("\t _size = %d\n", iob->_size);
+ pr_err("\t _handle = 0x%p\n", iob->_handle);
+ pr_err("\t _reserved = 0x%x\n", iob->_reserved);
+ pr_err("\t qnext = 0x%p (%s)\n", iob->qnext,
+ give_pointer_location(priv, iob->qnext));
+ pr_err("\t chan = 0x%x\n", iob->chan);
+ pr_err("\t qflags = 0x%x\n", iob->qflags);
+}
+
+int nvshm_iobuf_check(struct nvshm_iobuf *iob)
+{
+ struct nvshm_handle *priv = nvshm_get_handle();
+ struct nvshm_iobuf *bbiob;
+ int ret = 0;
+
+ /* Check iobuf is in IPC space */
+ if (ADDR_OUTSIDE(iob, priv->ipc_base_virt, priv->ipc_size)) {
+ pr_err("%s: iob @ check failed 0x%lx\n",
+ __func__,
+ (long)iob);
+ return -1;
+ }
+
+ bbiob = NVSHM_A2B(priv, iob);
+
+ if (ADDR_OUTSIDE(iob->npdu_data, NVSHM_IPC_BB_BASE, priv->ipc_size)) {
+ pr_err("%s 0x%lx: npduData @ check failed 0x%lx\n",
+ __func__,
+ (long)bbiob,
+ (long)iob->npdu_data);
+ ret = -2;
+ goto dump;
+ }
+ if (ADDR_OUTSIDE(iob->npdu_data + iob->data_offset,
+ NVSHM_IPC_BB_BASE, priv->ipc_size)) {
+ pr_err("%s 0x%lx: npduData + offset @ check failed 0x%lx/0x%lx\n",
+ __func__, (long)bbiob,
+ (long)iob->npdu_data, (long)iob->data_offset);
+ ret = -3;
+ goto dump;
+ }
+ if (iob->next) {
+ if (ADDR_OUTSIDE(iob->next,
+ NVSHM_IPC_BB_BASE, priv->ipc_size)) {
+ pr_err("%s 0x%lx: next @ check failed 0x%lx\n",
+ __func__,
+ (long)bbiob,
+ (long)iob->next);
+ ret = -4;
+ goto dump;
+ }
+ }
+ if (iob->sg_next) {
+ if (ADDR_OUTSIDE(iob->sg_next,
+ NVSHM_IPC_BB_BASE, priv->ipc_size)) {
+ pr_err("%s 0x%lx:sg_next @ check failed 0x%lx\n",
+ __func__, (long)bbiob, (long)iob->sg_next);
+ ret = -5;
+ goto dump;
+ }
+ }
+ if (iob->qnext) {
+ if (ADDR_OUTSIDE(iob->qnext,
+ NVSHM_IPC_BB_BASE, priv->ipc_size)) {
+ pr_err("%s 0x%lx:qnext @ check failed 0x%lx\n",
+ __func__, (long)bbiob, (long)iob->qnext);
+ ret = -6;
+ goto dump;
+ }
+ }
+
+ return ret;
+dump:
+ nvshm_iobuf_dump(iob);
+ return ret;
+}
+
+int nvshm_iobuf_init(struct nvshm_handle *handle)
+{
+ struct nvshm_iobuf *iob;
+ int ndesc, desc, datasize;
+ unsigned char *dataptr;
+
+ pr_debug("%s instance %d\n", __func__, handle->instance);
+
+ spin_lock_init(&alloc.lock);
+ /* Clear BBC free list */
+ alloc.bbc_pool_head = alloc.bbc_pool_tail = NULL;
+ alloc.free_count = 0;
+ ndesc = handle->desc_size / sizeof(struct nvshm_iobuf) ;
+ alloc.nbuf = ndesc;
+ alloc.free_count = 0;
+ datasize = handle->data_size / ndesc;
+ spin_lock(&alloc.lock);
+ if (handle->shared_queue_tail != handle->desc_base_virt) {
+ pr_err("%s initial tail != desc_base_virt not supported yet\n",
+ __func__);
+ }
+ iob = (struct nvshm_iobuf *)handle->desc_base_virt;
+
+ dataptr = handle->data_base_virt;
+ /* Invalidate all data region */
+ INV_CPU_DCACHE(dataptr, handle->data_size);
+ /* Clear all desc region */
+ memset(handle->desc_base_virt, 0, handle->desc_size);
+ /* Dummy queue element */
+ iob->npdu_data = NVSHM_A2B(handle, dataptr);
+ dataptr += datasize;
+ iob->data_offset = NVSHM_DEFAULT_OFFSET;
+ iob->total_length = datasize;
+ iob->chan = -1;
+ iob->next = NULL;
+ iob->pool_id = NVSHM_AP_POOL_ID;
+ iob->ref = 1;
+ alloc.free_pool_head = ++iob;
+ for (desc = 1; desc < (ndesc-1); desc++) {
+ iob->npdu_data = NVSHM_A2B(handle, dataptr);
+ dataptr += datasize;
+ iob->data_offset = NVSHM_DEFAULT_OFFSET;
+ iob->total_length = datasize;
+ iob->next = NVSHM_A2B(handle, (void *)iob +
+ sizeof(struct nvshm_iobuf));
+ iob->pool_id = NVSHM_AP_POOL_ID;
+ iob++;
+ }
+ /* Untied last */
+ iob->npdu_data = NVSHM_A2B(handle, dataptr);
+ iob->data_offset = NVSHM_DEFAULT_OFFSET;
+ iob->total_length = datasize;
+ iob->pool_id = NVSHM_AP_POOL_ID;
+ iob->next = NULL;
+
+ alloc.free_pool_tail = iob;
+ /* Flush all descriptor region */
+ FLUSH_CPU_DCACHE(handle->desc_base_virt,
+ (long)handle->desc_size);
+ spin_unlock(&alloc.lock);
+ return 0;
+}
diff --git a/drivers/staging/nvshm/nvshm_iobuf.h b/drivers/staging/nvshm/nvshm_iobuf.h
new file mode 100644
index 000000000000..1b3bbbb6f0f9
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_iobuf.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _NVSHM_IOBUF_H
+#define _NVSHM_IOBUF_H
+
+/* Baseband base address in BB memory space - this is a constant */
+#define NVSHM_IPC_BB_BASE (0x8C000000)
+
+#define ADDR_OUTSIDE(addr, base, size) (((unsigned long)(addr) \
+ < (unsigned long)(base)) || \
+ ((unsigned long)(addr) \
+ > ((unsigned long)(base) + \
+ (unsigned long)(size))))
+
+/**
+ * NVSHM_B2A convert from Baseband address space
+ * to AP virtual kernel space (cached)
+ *
+ * All iobuf conversion are done from/to cached kernel space
+ *
+ * @param h : struct nvshm_handle pointer
+ * @param x : address to convert
+ * @return : void * pointer in cached kernel space
+ */
+#define NVSHM_B2A(h, x) ((void *)(x) + ((int)(h)->ipc_base_virt) \
+ - NVSHM_IPC_BB_BASE)
+
+/**
+ * NVSHM_A2B convert from AP kernel space (cached) to Baseband address space
+ *
+ * All iobuf conversion are done from/to cached kernel space
+ *
+ * @param h : struct nvshm_handle pointer
+ * @param x : address to convert
+ * @return : void * pointer in BB memory space
+ */
+#define NVSHM_A2B(h, x) ((void *)(x) - ((int)(h)->ipc_base_virt) \
+ + NVSHM_IPC_BB_BASE)
+
+/**
+* Payload start address in AP virtual memory space
+*
+* @param h : struct nvshm_handle pointer
+* @param b : pointer to the iobuf
+* @return : pointer to payload in cached kernel space
+*/
+#define NVSHM_IOBUF_PAYLOAD(h, b) \
+ NVSHM_B2A((h), (b)->npdu_data + (b)->data_offset)
+
+/**
+ * Alloc a nvshm_iobuf descriptor to be used for write operation
+ * Failure of allocation is considered as an Xoff situation and
+ * will be followed by a call to (*start_tx)() operation when flow
+ * control return to Xon. If excessive size is requested, call to
+ * (*error_event)() with NVSHM_IOBUF_ERROR will be raised synchronously
+ *
+ * @param struct nvshm_channel handle
+ * @param size - data size requested in bytes
+ * @return iobuf pointer or
+ * NULL if no iobuf can be allocated (flow control Xoff)
+ */
+struct nvshm_iobuf *nvshm_iobuf_alloc(struct nvshm_channel *handle, int size);
+
+/**
+ * Free a nvshm_iobuf descriptor given in rx_event
+ * pointers are not followed and cleared on free
+ *
+ * @param struct nvshm_iobuf descriptor to free
+ *
+ */
+void nvshm_iobuf_free(struct nvshm_iobuf *iob);
+
+/**
+ * Free a nvshm_iobuf descriptor list given in rx_event
+ * both ->next and ->sg_next are followed
+ *
+ * @param struct nvshm_iobuf list of descriptor to free
+ *
+ */
+void nvshm_iobuf_free_cluster(struct nvshm_iobuf *list);
+
+/**
+ * clear/set nvshm_iobuf internal flags (unused/unspecified for now)
+ *
+ * @param struct nvshm_iobuf descriptor
+ * @param unsigned int set value
+ * @param unsigned int clear value
+ * @return 0 if no error
+ */
+int nvshm_iobuf_update_bits(struct nvshm_iobuf *iob,
+ unsigned int clear, unsigned int set);
+
+/**
+ * Increase reference count of iobuf
+ *
+ * @param struct nvshm_iobuf descriptor
+ * @return previous ref value
+ */
+int nvshm_iobuf_ref(struct nvshm_iobuf *iob);
+
+/**
+ * Decrease reference count of iobuf
+ *
+ * @param struct nvshm_iobuf descriptor
+ * @return previous ref value
+ */
+int nvshm_iobuf_unref(struct nvshm_iobuf *iob);
+
+/**
+ * Increase reference count of iobuf cluster
+ *
+ * @param struct nvshm_iobuf descriptor
+ * @return previous maximum ref value
+ */
+int nvshm_iobuf_ref_cluster(struct nvshm_iobuf *iob);
+
+/**
+ * Decrease reference count of iobuf cluster
+ *
+ * @param struct nvshm_iobuf descriptor
+ * @return previous maximum ref value
+ */
+int nvshm_iobuf_unref_cluster(struct nvshm_iobuf *iob);
+
+/**
+ * Check if iobuf pointers are sane
+ *
+ * @param handle to nvshm channel
+ * @param struct nvshm_iobuf to check
+ * @return 0 if sane
+ */
+int nvshm_iobuf_check(struct nvshm_iobuf *iob);
+
+/**
+ * Finalize BBC iobuf free
+ * Only called internaly
+ * @param handle to nvshm
+ * @return None
+ */
+void nvshm_iobuf_bbc_free(struct nvshm_handle *handle);
+
+/**
+ * Process iobuf freed by BBC
+ * Only called internaly
+ * @param handle to nvshm_iobuf
+ * @return None
+ */
+void nvshm_iobuf_process_freed(struct nvshm_iobuf *desc);
+
+/**
+ * Init iobuf subsystem
+ *
+ * @param handle to nvshm channel
+ * @return 0 if ok negative otherwise
+ */
+int nvshm_iobuf_init(struct nvshm_handle *handle);
+
+#endif /* _NVSHM_IOBUF_H */
diff --git a/drivers/staging/nvshm/nvshm_ipc.c b/drivers/staging/nvshm/nvshm_ipc.c
new file mode 100644
index 000000000000..6559a282a99d
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_ipc.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_iobuf.h"
+#include "nvshm_ipc.h"
+#include "nvshm_queue.h"
+
+#include <linux/interrupt.h>
+#include <asm/mach/map.h>
+#include <mach/tegra_bb.h>
+#include <asm/cacheflush.h>
+
+#define NVSHM_WAKE_TIMEOUT_NS (20 * NSEC_PER_MSEC)
+#define NVSHM_WAKE_MAX_COUNT (50)
+
+static int ipc_readconfig(struct nvshm_handle *handle)
+{
+ struct nvshm_config *conf;
+ int chan;
+
+ pr_debug("%s\n", __func__);
+
+ conf = (struct nvshm_config *)(handle->mb_base_virt
+ + NVSHM_CONFIG_OFFSET);
+ /* No change in v2.x kernel prevents from running v1.3 modems, so let's
+ * ensure some continuity of service.
+ */
+ if ((conf->version == NVSHM_CONFIG_VERSION_1_3) &&
+ (NVSHM_MAJOR(NVSHM_CONFIG_VERSION) == 2)) {
+ pr_warn("%s BBC version 1.3, statistics not available\n",
+ __func__);
+ } else if (NVSHM_MAJOR(conf->version) !=
+ NVSHM_MAJOR(NVSHM_CONFIG_VERSION)) {
+ pr_err("%s SHM version mismatch: BBC: %d.%d / AP: %d.%d\n",
+ __func__,
+ NVSHM_MAJOR(conf->version),
+ NVSHM_MINOR(conf->version),
+ NVSHM_MAJOR(NVSHM_CONFIG_VERSION),
+ NVSHM_MINOR(NVSHM_CONFIG_VERSION));
+ return -1;
+ } else if (NVSHM_MINOR(conf->version) !=
+ NVSHM_MINOR(NVSHM_CONFIG_VERSION)) {
+ pr_warn("%s SHM versions differ: BBC: %d.%d / AP: %d.%d\n",
+ __func__,
+ NVSHM_MAJOR(conf->version),
+ NVSHM_MINOR(conf->version),
+ NVSHM_MAJOR(NVSHM_CONFIG_VERSION),
+ NVSHM_MINOR(NVSHM_CONFIG_VERSION));
+ }
+
+ if (handle->ipc_size != conf->shmem_size) {
+ pr_warn("%s shmem mapped/reported not matching: 0x%x/0x%x\n",
+ __func__, (unsigned int)handle->ipc_size,
+ conf->shmem_size);
+ }
+ handle->desc_base_virt = handle->ipc_base_virt
+ + conf->region_ap_desc_offset;
+ pr_debug("%s desc_base_virt=0x%p\n",
+ __func__, handle->desc_base_virt);
+
+ handle->desc_size = conf->region_ap_desc_size;
+ pr_debug("%s desc_size=%d\n",
+ __func__, (int)handle->desc_size);
+
+ /* Data is cached */
+ handle->data_base_virt = handle->ipc_base_virt
+ + conf->region_ap_data_offset;
+ pr_debug("%s data_base_virt=0x%p\n",
+ __func__, handle->data_base_virt);
+
+ handle->data_size = conf->region_ap_data_size;
+ pr_debug("%s data_size=%d\n", __func__, (int)handle->data_size);
+
+ if (NVSHM_MAJOR(conf->version) < 2) {
+ handle->stats_base_virt = 0;
+ handle->stats_size = 0;
+ } else {
+ handle->stats_base_virt = handle->mb_base_virt
+ + conf->region_dxp1_stats_offset;
+ pr_debug("%s stats_base_virt=0x%p\n",
+ __func__, handle->stats_base_virt);
+
+ handle->stats_size = conf->region_dxp1_stats_size;
+ pr_debug("%s stats_size=%lu\n", __func__, handle->stats_size);
+ }
+
+#ifndef CONFIG_TEGRA_BASEBAND_SIMU
+ handle->shared_queue_head =
+ (struct nvshm_iobuf *)(handle->ipc_base_virt
+ + conf->queue_bb_offset);
+ pr_debug("%s shared_queue_head offset=0x%lx\n",
+ __func__,
+ (long)handle->shared_queue_head - (long)handle->ipc_base_virt);
+#else
+ handle->shared_queue_head =
+ (struct nvshm_iobuf *)(handle->ipc_base_virt
+ + conf->queue_ap_offset);
+ pr_debug("%s shared_queue_head offset=0x%lx\n",
+ __func__,
+ (long)handle->shared_queue_head - (long)handle->ipc_base_virt);
+#endif
+ handle->shared_queue_tail =
+ (struct nvshm_iobuf *)(handle->ipc_base_virt
+ + conf->queue_ap_offset);
+ pr_debug("%s shared_queue_tail offset=0x%lx\n",
+ __func__, (long)handle->shared_queue_tail -
+ (long)handle->ipc_base_virt);
+
+ for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++) {
+ handle->chan[chan].index = chan;
+ handle->chan[chan].map = conf->chan_map[chan];
+ if (handle->chan[chan].map.type != NVSHM_CHAN_UNMAP) {
+ pr_debug("%s chan[%d]=%s\n",
+ __func__, chan, handle->chan[chan].map.name);
+ }
+ }
+
+ /* Serial number (e.g BBC PCID) */
+ tegra_bb_set_ipc_serial(handle->tegra_bb, conf->serial);
+
+ /* Invalidate cache for IPC region before use */
+ INV_CPU_DCACHE(handle->ipc_base_virt, handle->ipc_size);
+ handle->conf = conf;
+ handle->configured = 1;
+ return 0;
+}
+
+static int init_interfaces(struct nvshm_handle *handle)
+{
+ int nlog = 0, ntty = 0, nnet = 0, nrpc = 0;
+ int chan;
+
+ for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++) {
+ handle->chan[chan].xoff = 0;
+ switch (handle->chan[chan].map.type) {
+ case NVSHM_CHAN_UNMAP:
+ break;
+ case NVSHM_CHAN_TTY:
+ case NVSHM_CHAN_LOG:
+ ntty++;
+ handle->chan[chan].rate_counter = NVSHM_RATE_LIMIT_TTY;
+ break;
+ case NVSHM_CHAN_NET:
+ handle->chan[chan].rate_counter = NVSHM_RATE_LIMIT_NET;
+ nnet++;
+ break;
+ case NVSHM_CHAN_RPC:
+ handle->chan[chan].rate_counter = NVSHM_RATE_LIMIT_RPC;
+ nrpc++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (ntty) {
+ pr_debug("%s init %d tty channels\n", __func__, ntty);
+ nvshm_tty_init(handle);
+ }
+
+ if (nlog)
+ pr_debug("%s init %d log channels\n", __func__, nlog);
+
+ if (nnet) {
+ pr_debug("%s init %d net channels\n", __func__, nnet);
+ nvshm_net_init(handle);
+ }
+
+ if (nrpc) {
+ pr_debug("%s init %d rpc channels\n", __func__, nrpc);
+ nvshm_rpc_init(handle);
+ nvshm_rpc_dispatcher_init();
+ }
+
+ pr_debug("%s init statistics support\n", __func__);
+ nvshm_stats_init(handle);
+
+ return 0;
+}
+
+static int cleanup_interfaces(struct nvshm_handle *handle)
+{
+ int nlog = 0, ntty = 0, nnet = 0, nrpc = 0;
+ int chan;
+
+ /* No need to protect this as configuration will arrive after cleanup
+ * is propagated to userland
+ */
+ handle->configured = 0;
+
+ for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++) {
+ switch (handle->chan[chan].map.type) {
+ case NVSHM_CHAN_TTY:
+ case NVSHM_CHAN_LOG:
+ ntty++;
+ break;
+ case NVSHM_CHAN_NET:
+ nnet++;
+ break;
+ case NVSHM_CHAN_RPC:
+ nrpc++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (ntty) {
+ pr_debug("%s cleanup %d tty channels\n", __func__, ntty);
+ nvshm_tty_cleanup();
+ }
+
+ if (nlog)
+ pr_debug("%s cleanup %d log channels\n", __func__, nlog);
+
+ if (nnet) {
+ pr_debug("%s cleanup %d net channels\n", __func__, nnet);
+ nvshm_net_cleanup();
+ }
+
+ if (nrpc) {
+ pr_debug("%s cleanup %d rpc channels\n", __func__, nrpc);
+ nvshm_rpc_dispatcher_cleanup();
+ nvshm_rpc_cleanup();
+ }
+
+ pr_debug("%s cleanup statistics support\n", __func__);
+ nvshm_stats_cleanup();
+
+ /* Remove serial sysfs entry */
+ tegra_bb_set_ipc_serial(handle->tegra_bb, NULL);
+
+ return 0;
+}
+
+static void ipc_work(struct work_struct *work)
+{
+ struct nvshm_handle *handle = container_of(work,
+ struct nvshm_handle,
+ nvshm_work);
+ int new_state;
+ int cmd;
+
+ if (!wake_lock_active(&handle->dl_lock))
+ wake_lock(&handle->dl_lock);
+ new_state = *((int *)handle->mb_base_virt);
+ cmd = new_state & 0xFFFF;
+ if (((~new_state >> 16) ^ (cmd)) & 0xFFFF) {
+ pr_err("%s: IPC check failure msg=0x%x\n",
+ __func__, new_state);
+ if (handle->configured) {
+ nvshm_abort_queue(handle);
+ cleanup_interfaces(handle);
+ }
+ goto ipc_exit;
+ }
+ switch (cmd) {
+ case NVSHM_IPC_READY:
+ /* most encountered message - process queue */
+ if (cmd != handle->old_status) {
+ if (ipc_readconfig(handle))
+ goto ipc_exit;
+
+ nvshm_iobuf_init(handle);
+ nvshm_init_queue(handle);
+ init_interfaces(handle);
+ }
+ /* Process IPC queue but do not notify sysfs */
+ if (handle->configured) {
+ nvshm_process_queue(handle);
+ if (handle->errno) {
+ pr_err("%s: cleanup interfaces\n",
+ __func__);
+ nvshm_abort_queue(handle);
+ cleanup_interfaces(handle);
+ break;
+ }
+ }
+ break;
+ case NVSHM_IPC_BOOT_FW_REQ:
+ case NVSHM_IPC_BOOT_RESTART_FW_REQ:
+ if (handle->configured) {
+ nvshm_abort_queue(handle);
+ cleanup_interfaces(handle);
+ pr_debug("%s: cleanup done\n", __func__);
+ }
+ break;
+ case NVSHM_IPC_BOOT_ERROR_BT2_HDR:
+ case NVSHM_IPC_BOOT_ERROR_BT2_SIGN:
+ case NVSHM_IPC_BOOT_ERROR_HWID:
+ case NVSHM_IPC_BOOT_ERROR_APP_HDR:
+ case NVSHM_IPC_BOOT_ERROR_APP_SIGN:
+ case NVSHM_IPC_BOOT_ERROR_UNLOCK_HEADER:
+ case NVSHM_IPC_BOOT_ERROR_UNLOCK_SIGN:
+ case NVSHM_IPC_BOOT_ERROR_UNLOCK_PCID:
+ pr_err("%s BB startup failure: msg=0x%x\n",
+ __func__, new_state);
+ break;
+ case NVSHM_IPC_BOOT_COLD_BOOT_IND:
+ case NVSHM_IPC_BOOT_FW_CONF:
+ /* Should not have these - something went wrong... */
+ pr_err("%s IPC IT error: msg=0x%x\n",
+ __func__, new_state);
+ break;
+ default:
+ pr_err("%s unknown IPC message found: msg=0x%x\n",
+ __func__, new_state);
+ }
+ handle->old_status = cmd;
+ipc_exit:
+ wake_unlock(&handle->dl_lock);
+ enable_irq(handle->bb_irq);
+}
+
+static void start_tx_worker(struct work_struct *work)
+{
+ struct nvshm_channel *chan = container_of(work,
+ struct nvshm_channel,
+ start_tx_work);
+
+ pr_warn("%s: start tx on chan %d\n", __func__, chan->index);
+ if (chan->ops)
+ chan->ops->start_tx(chan);
+}
+
+static void nvshm_ipc_handler(void *data)
+{
+ struct nvshm_handle *handle = (struct nvshm_handle *)data;
+ int ret;
+ pr_debug("%s\n", __func__);
+ ret = queue_work(handle->nvshm_wq, &handle->nvshm_work);
+}
+
+static enum hrtimer_restart nvshm_ipc_timer_func(struct hrtimer *timer)
+{
+ struct nvshm_handle *handle =
+ container_of(timer, struct nvshm_handle, wake_timer);
+
+ if (tegra_bb_check_ipc(handle->tegra_bb) == 1) {
+ pr_debug("%s AP2BB is cleared\n", __func__);
+ wake_unlock(&handle->ul_lock);
+ return HRTIMER_NORESTART;
+ }
+ if (handle->timeout++ > NVSHM_WAKE_MAX_COUNT) {
+ pr_warn("%s AP2BB not cleared in 1s - aborting\n", __func__);
+ tegra_bb_abort_ipc(handle->tegra_bb);
+ wake_unlock(&handle->ul_lock);
+ return HRTIMER_NORESTART;
+ }
+ pr_debug("%s AP2BB is still set\n", __func__);
+ hrtimer_forward_now(timer, ktime_set(0, NVSHM_WAKE_TIMEOUT_NS));
+ return HRTIMER_RESTART;
+}
+
+int nvshm_register_ipc(struct nvshm_handle *handle)
+{
+ int chan;
+
+ pr_debug("%s\n", __func__);
+ snprintf(handle->wq_name, 15, "nvshm_queue%d", handle->instance);
+ handle->nvshm_wq = create_singlethread_workqueue(handle->wq_name);
+ INIT_WORK(&handle->nvshm_work, ipc_work);
+
+ for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++)
+ INIT_WORK(&handle->chan[chan].start_tx_work, start_tx_worker);
+
+ hrtimer_init(&handle->wake_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ handle->wake_timer.function = nvshm_ipc_timer_func;
+
+ tegra_bb_register_ipc(handle->tegra_bb, nvshm_ipc_handler, handle);
+ return 0;
+}
+
+int nvshm_unregister_ipc(struct nvshm_handle *handle)
+{
+ pr_debug("%s flush workqueue\n", __func__);
+ flush_workqueue(handle->nvshm_wq);
+
+ pr_debug("%s destroy workqueue\n", __func__);
+ destroy_workqueue(handle->nvshm_wq);
+
+ pr_debug("%s unregister tegra_bb\n", __func__);
+ tegra_bb_register_ipc(handle->tegra_bb, NULL, NULL);
+
+ hrtimer_cancel(&handle->wake_timer);
+ return 0;
+}
+
+int nvshm_generate_ipc(struct nvshm_handle *handle)
+{
+ /* take wake lock until BB ack our irq */
+ if (!wake_lock_active(&handle->ul_lock))
+ wake_lock(&handle->ul_lock);
+
+ if (!hrtimer_active(&handle->wake_timer)) {
+ handle->timeout = 0;
+ hrtimer_start(&handle->wake_timer,
+ ktime_set(0, NVSHM_WAKE_TIMEOUT_NS),
+ HRTIMER_MODE_REL);
+ }
+ /* generate ipc */
+ tegra_bb_generate_ipc(handle->tegra_bb);
+ return 0;
+}
+
diff --git a/drivers/staging/nvshm/nvshm_ipc.h b/drivers/staging/nvshm/nvshm_ipc.h
new file mode 100644
index 000000000000..3e8981eccad7
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_ipc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2012 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NVSHM_IPC_H
+#define _NVSHM_IPC_H
+
+/**
+ * Register IPC for handle
+ *
+ * @param struct nvshm_handle
+ * @return 0 if ok
+ */
+extern int nvshm_register_ipc(struct nvshm_handle *handle);
+
+/**
+ * Unregister IPC for handle
+ *
+ * @param struct nvshm_handle
+ * @return 0 if ok
+ */
+extern int nvshm_unregister_ipc(struct nvshm_handle *handle);
+
+/**
+ * Generate an IPC interrupt with given mailbox content
+ *
+ * @param struct _nvshm_priv_handle
+ * @return 0 if ok
+ */
+extern int nvshm_generate_ipc(struct nvshm_handle *handle);
+#endif /* _NVHSM_IPC_H */
diff --git a/drivers/staging/nvshm/nvshm_net.c b/drivers/staging/nvshm/nvshm_net.c
new file mode 100644
index 000000000000..a754519bd39a
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_net.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/semaphore.h>
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_iobuf.h"
+
+#define MAX_XMIT_SIZE 1500
+
+#define NVSHM_NETIF_PREFIX "wwan"
+
+/* This structure holds the per network port information like
+ * nvshm_iobuf queues, and the back reference to nvshm_channel */
+struct nvshm_net_line {
+ int use;
+ int nvshm_chan;
+ struct net_device_stats stats;
+
+ /* iobuf queues for nvshm flow control support */
+ struct nvshm_iobuf *q_head;
+ struct nvshm_iobuf *q_tail;
+ struct net_device *net;
+ struct nvshm_channel *pchan; /* contains (struct net_device *)data */
+ int errno;
+ spinlock_t lock;
+};
+
+struct nvshm_net_device {
+ struct nvshm_handle *handle;
+ struct nvshm_net_line *line[NVSHM_MAX_CHANNELS];
+ int nlines;
+};
+
+static struct nvshm_net_device netdev;
+
+
+/* rx_event() is called when a packet of data is received.
+ * The receiver should consume all iobuf in the given list.
+ */
+void nvshm_netif_rx_event(struct nvshm_channel *chan,
+ struct nvshm_iobuf *iobuf)
+{
+ struct net_device *dev = (struct net_device *)chan->data;
+ struct nvshm_net_line *priv = netdev_priv(dev);
+ struct nvshm_iobuf *bb_iob, *bb_next, *ap_iob, *ap_next;
+ unsigned char *src; /* AP address for BB source buffer */
+ unsigned char *dst; /* AP address for skb */
+ unsigned int datagram_len; /* datagram total data length */
+ struct sk_buff *skb;
+
+ pr_debug("%s()\n", __func__);
+ if (!priv) {
+ pr_err("%s() no private info on iface!\n", __func__);
+ return;
+ }
+
+ if (!iobuf) {
+ pr_err("%s() null input buffer address\n", __func__);
+ return;
+ }
+
+ ap_next = iobuf;
+ bb_next = NVSHM_A2B(netdev.handle, iobuf);
+ while (bb_next) {
+ datagram_len = 0;
+ ap_iob = ap_next;
+ bb_iob = bb_next;
+ while (bb_iob) {
+ datagram_len += ap_iob->length;
+ bb_iob = ap_iob->sg_next;
+ ap_iob = NVSHM_B2A(netdev.handle, bb_iob);
+ }
+ if (datagram_len > dev->mtu) {
+ pr_err("%s: MTU %d>%d\n", __func__,
+ dev->mtu, datagram_len);
+ priv->stats.rx_errors++;
+ /* move to next datagram - drop current one */
+ ap_iob = ap_next;
+ bb_next = ap_next->next;
+ ap_next = NVSHM_B2A(netdev.handle, bb_next);
+ /* Break ->next chain before free */
+ ap_iob->next = NULL;
+ nvshm_iobuf_free_cluster(ap_iob);
+ continue;
+ }
+ /* construct the skb */
+ skb = (struct sk_buff *) __netdev_alloc_skb(dev,
+ datagram_len,
+ GFP_KERNEL);
+ if (!skb) {
+ /* Out of memory - nothing to do except */
+ /* free current iobufs and return */
+ pr_err("%s: skb alloc failed!\n", __func__);
+ priv->stats.rx_errors++;
+ nvshm_iobuf_free_cluster(ap_next);
+ return;
+ }
+ dst = skb_put(skb, datagram_len);
+
+ ap_iob = ap_next;
+ bb_iob = bb_next;
+ bb_next = ap_next->next;
+ ap_next = NVSHM_B2A(netdev.handle, bb_next);
+ while (bb_iob) {
+ src = NVSHM_B2A(netdev.handle, ap_iob->npdu_data)
+ + ap_iob->data_offset;
+ memcpy(dst, src, ap_iob->length);
+ dst += ap_iob->length;
+ bb_iob = ap_iob->sg_next;
+ nvshm_iobuf_free(ap_iob);
+ ap_iob = NVSHM_B2A(netdev.handle, bb_iob);
+ }
+ /* deliver skb to netif */
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb_reset_transport_header(skb);
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ pr_err("%s() Non IP packet received!\n", __func__);
+ priv->stats.rx_errors++;
+ /* Drop packet */
+ kfree_skb(skb);
+ /* move to next datagram */
+ continue;
+ }
+ skb->pkt_type = PACKET_HOST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += datagram_len;
+ if (netif_rx(skb) == NET_RX_DROP)
+ pr_debug("%s() : dropped packet\n", __func__);
+ }
+}
+
+/* error_event() is called when an error event is received */
+void nvshm_netif_error_event(struct nvshm_channel *chan,
+ enum nvshm_error_id error)
+{
+ struct net_device *dev = (struct net_device *)chan->data;
+ struct nvshm_net_line *priv = netdev_priv(dev);
+
+ pr_debug("%s()\n", __func__);
+ if (chan->data == NULL) {
+ pr_err("%s() chan->data is null pointer\n", __func__);
+ return;
+ }
+
+ priv->errno = error;
+
+ spin_lock(&priv->lock);
+ priv->stats.tx_errors++;
+ spin_unlock(&priv->lock);
+ pr_err("%s() : error on nvshm net interface!\n", __func__);
+}
+
+/* start_tx() is called to restart the transmit */
+void nvshm_netif_start_tx(struct nvshm_channel *chan)
+{
+ struct net_device *dev = (struct net_device *)chan->data;
+ struct nvshm_net_line *priv = netdev_priv(dev);
+
+ pr_debug("%s()\n", __func__);
+
+ if (!priv)
+ return;
+
+ /* Wake up queue */
+ netif_wake_queue(dev);
+}
+
+static struct nvshm_if_operations nvshm_netif_ops = {
+ .rx_event = nvshm_netif_rx_event, /* nvshm_queue.c */
+ .error_event = nvshm_netif_error_event, /* nvshm_iobuf.c */
+ .start_tx = nvshm_netif_start_tx,
+};
+
+
+/* called when ifconfig <if> up */
+static int nvshm_netops_open(struct net_device *dev)
+{
+ struct nvshm_net_line *priv = netdev_priv(dev);
+ int ret = 0;
+
+ pr_debug("%s()\n", __func__);
+ if (!priv)
+ return -EINVAL;
+
+ spin_lock(&priv->lock);
+ if (!priv->use) {
+ priv->pchan = nvshm_open_channel(priv->nvshm_chan,
+ &nvshm_netif_ops,
+ dev);
+ if (priv->pchan == NULL)
+ ret = -EINVAL;
+ }
+ if (!ret)
+ priv->use++;
+ spin_unlock(&priv->lock);
+
+ /* Start if queue */
+ netif_start_queue(dev);
+ return ret;
+}
+
+/* called when ifconfig <if> down */
+static int nvshm_netops_close(struct net_device *dev)
+{
+ struct nvshm_net_line *priv = netdev_priv(dev);
+
+ pr_debug("%s()\n", __func__);
+ if (!priv)
+ return -EINVAL;
+
+ spin_lock(&priv->lock);
+ if (priv->use > 0)
+ priv->use--;
+ spin_unlock(&priv->lock);
+
+ if (!priv->use) {
+ /* Cleanup if data are still present in io queue */
+ if (priv->q_head) {
+ pr_debug("%s: still some data in queue!\n", __func__);
+ nvshm_iobuf_free_cluster(
+ (struct nvshm_iobuf *)priv->q_head);
+ priv->q_head = priv->q_tail = NULL;
+ }
+ nvshm_close_channel(priv->pchan);
+ }
+
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int nvshm_netops_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ struct nvshm_net_line *priv = netdev_priv(dev);
+ struct nvshm_iobuf *iob, *leaf = NULL, *list = NULL;
+ int to_send = 0, remain;
+ int len;
+ char *data;
+
+ pr_debug("Transmit frame\n");
+ pr_debug("%s()\n", __func__);
+ if (!priv)
+ return -EINVAL;
+
+ len = skb->len;
+ data = skb->data;
+
+ /* write a frame to an nvshm channel */
+ pr_debug("len=%d\n", len);
+
+ /* write data from skb (data,len) to net_device dev */
+ remain = len;
+ while (remain) {
+ pr_debug("remain=%d\n", remain);
+ to_send = remain < MAX_XMIT_SIZE ? remain : MAX_XMIT_SIZE;
+ iob = nvshm_iobuf_alloc(priv->pchan, to_send);
+ if (!iob) {
+ pr_warn("%s iobuf alloc failed\n", __func__);
+ netif_stop_queue(dev);
+ if (list)
+ nvshm_iobuf_free_cluster(list);
+ return NETDEV_TX_BUSY; /* was return -ENOMEM; */
+ }
+
+ iob->length = to_send;
+ remain -= to_send;
+
+ memcpy(NVSHM_B2A(netdev.handle,
+ iob->npdu_data + iob->data_offset),
+ data,
+ to_send);
+
+ data += to_send;
+
+ if (!list) {
+ leaf = list = iob;
+ } else {
+ leaf->sg_next = NVSHM_A2B(netdev.handle, iob);
+ leaf = iob;
+ }
+ }
+ if (nvshm_write(priv->pchan, list)) {
+ /* no more transmit possible - stop queue */
+ pr_warning("%s rate limit hit on channel %d\n",
+ __func__, priv->nvshm_chan);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* successfully written len data bytes */
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+
+ pr_debug("packets=%ld, tx_bytes=%ld\n", priv->stats.tx_packets,
+ priv->stats.tx_bytes);
+
+ /* free skb now as nvshm is a ref on it now */
+ kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int nvshm_netops_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct nvshm_net_line *priv = netdev_priv(dev);
+
+ pr_debug("%s()\n", __func__);
+ if (!priv)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct net_device_stats *nvshm_netops_get_stats(struct net_device *dev)
+{
+ struct nvshm_net_line *priv = netdev_priv(dev);
+
+ pr_debug("%s()\n", __func__);
+ if (priv)
+ return &priv->stats;
+ else
+ return NULL;
+}
+
+static void nvshm_netops_tx_timeout(struct net_device *dev)
+{
+ struct nvshm_net_line *priv = netdev_priv(dev);
+
+ pr_debug("%s()\n", __func__);
+ if (!priv)
+ return;
+
+ spin_lock(&priv->lock);
+ priv->stats.tx_errors++;
+ spin_unlock(&priv->lock);
+ netif_wake_queue(dev);
+}
+
+static const struct net_device_ops nvshm_netdev_ops = {
+ .ndo_open = nvshm_netops_open,
+ .ndo_stop = nvshm_netops_close,
+ .ndo_start_xmit = nvshm_netops_xmit_frame,
+ .ndo_get_stats = nvshm_netops_get_stats,
+ .ndo_change_mtu = nvshm_netops_change_mtu,
+ .ndo_tx_timeout = nvshm_netops_tx_timeout,
+};
+
+static void nvshm_nwif_init_dev(struct net_device *dev)
+{
+ dev->netdev_ops = &nvshm_netdev_ops;
+ dev->mtu = 1500;
+ dev->type = ARPHRD_NONE;
+ /* No hardware address */
+ dev->hard_header_len = 0;
+ /* Should be tuned as soon as framer allow multiple frames */
+ dev->tx_queue_len = 10;
+ /* No hardware address */
+ dev->addr_len = 0;
+ dev->watchdog_timeo = HZ;
+ dev->flags |= IFF_POINTOPOINT | IFF_NOARP;
+}
+
+int nvshm_net_init(struct nvshm_handle *handle)
+{
+ struct net_device *dev;
+ int chan;
+ int ret = 0;
+ struct nvshm_net_line *priv;
+
+ pr_debug("%s()\n", __func__);
+ memset(&netdev, 0, sizeof(netdev));
+ netdev.handle = handle;
+
+ /* check nvshm_channels[] for net devices */
+ for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++) {
+
+ if (handle->chan[chan].map.type == NVSHM_CHAN_NET) {
+ pr_debug("Registering %s%d\n",
+ NVSHM_NETIF_PREFIX,
+ netdev.nlines);
+ dev = alloc_netdev(sizeof(struct nvshm_net_line),
+ NVSHM_NETIF_PREFIX"%d",
+ nvshm_nwif_init_dev);
+ if (!dev)
+ goto err_exit;
+
+ dev->base_addr = netdev.nlines;
+
+ priv = netdev_priv(dev);
+ netdev.line[netdev.nlines] = priv;
+ netdev.line[netdev.nlines]->net = dev;
+ priv->nvshm_chan = chan;
+ spin_lock_init(&priv->lock);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("Error %i registering %s%d device\n",
+ ret,
+ NVSHM_NETIF_PREFIX,
+ netdev.nlines);
+ goto err_exit;
+ }
+
+ netdev.nlines++;
+ }
+ }
+
+ return ret;
+
+err_exit:
+ nvshm_net_cleanup();
+ return ret;
+}
+
+void nvshm_net_cleanup(void)
+{
+ int chan;
+
+ pr_debug("%s()\n", __func__);
+
+ for (chan = 0; chan < netdev.nlines; chan++) {
+ if (netdev.line[chan]->net) {
+ pr_debug("%s free %s%d\n",
+ __func__,
+ NVSHM_NETIF_PREFIX,
+ chan);
+ unregister_netdev(netdev.line[chan]->net);
+ free_netdev(netdev.line[chan]->net);
+ }
+ }
+}
+
diff --git a/drivers/staging/nvshm/nvshm_priv.h b/drivers/staging/nvshm/nvshm_priv.h
new file mode 100644
index 000000000000..2bacba025c9d
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_priv.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _NVSHM_PRIV_H
+#define _NVSHM_PRIV_H
+
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/wakelock.h>
+#include <asm/memory.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include "nvshm_types.h"
+/*
+ * Test stub is used to implement nvshm on private memory for testing purpose.
+ * Data are allocated into this private memory but queues loop on themselves
+ */
+#define NVSHM_TEST_STUB
+
+/* Generate NVSHM_IPC MSG */
+#define NVSHM_IPC_MESSAGE(id) (((~id & 0xFFFF) << 16) | (id & 0xFFFF))
+
+/* Flags for descriptors */
+#define NVSHM_DESC_AP (0x01) /* AP descriptor ownership */
+#define NVSHM_DESC_BB (0x02) /* BB descriptor ownership */
+#define NVSHM_DESC_OPEN (0x04) /* OOB channel open */
+#define NVSHM_DESC_CLOSE (0x08) /* OOB channel close */
+#define NVSHM_DESC_XOFF (0x10) /* OOB channel Tx off */
+#define NVSHM_DESC_XON (0x20) /* OOB channel Tx on */
+
+#define FLUSH_CPU_DCACHE(va, size) \
+ do { \
+ unsigned long _pa_ = page_to_phys(vmalloc_to_page((va))) \
+ + ((unsigned long)va & ~PAGE_MASK); \
+ __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
+ outer_flush_range(_pa_, _pa_+(size_t)(size)); \
+ } while (0)
+
+#define INV_CPU_DCACHE(va, size) \
+ do { \
+ unsigned long _pa_ = page_to_phys(vmalloc_to_page((va))) \
+ + ((unsigned long)va & ~PAGE_MASK); \
+ outer_inv_range(_pa_, _pa_+(size_t)(size)); \
+ __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \
+ } while (0)
+
+struct nvshm_handle {
+ spinlock_t lock;
+ spinlock_t qlock;
+ struct wake_lock ul_lock;
+ struct wake_lock dl_lock;
+ int instance;
+ int old_status;
+ int configured;
+ int bb_irq;
+ int errno;
+ struct nvshm_config *conf;
+ void *ipc_base_virt;
+ void *mb_base_virt;
+ void *desc_base_virt; /* AP desc region */
+ void *data_base_virt; /* AP data region */
+ void *stats_base_virt;
+ unsigned long ipc_size;
+ unsigned long mb_size;
+ unsigned long desc_size;
+ unsigned long data_size;
+ unsigned long stats_size;
+ struct nvshm_iobuf *shared_queue_head; /* shared desc list */
+ struct nvshm_iobuf *shared_queue_tail; /* shared desc list */
+ struct nvshm_iobuf *free_pool_head; /* free desc list */
+ struct nvshm_channel chan[NVSHM_MAX_CHANNELS];
+ struct work_struct nvshm_work;
+ struct workqueue_struct *nvshm_wq;
+ struct hrtimer wake_timer;
+ int timeout;
+ char wq_name[16];
+ struct device *dev;
+ void *ipc_data;
+ void (*generate_ipc)(void *ipc_data);
+ struct platform_device *tegra_bb;
+};
+
+extern inline struct nvshm_handle *nvshm_get_handle(void);
+
+extern int nvshm_tty_init(struct nvshm_handle *handle);
+extern void nvshm_tty_cleanup(void);
+
+extern int nvshm_net_init(struct nvshm_handle *handle);
+extern void nvshm_net_cleanup(void);
+
+extern int nvshm_rpc_init(struct nvshm_handle *handle);
+extern void nvshm_rpc_cleanup(void);
+
+extern void nvshm_stats_init(struct nvshm_handle *handle);
+extern void nvshm_stats_cleanup(void);
+
+extern int nvshm_rpc_dispatcher_init(void);
+extern void nvshm_rpc_dispatcher_cleanup(void);
+
+#endif /* _NVSHM_PRIV_H */
diff --git a/drivers/staging/nvshm/nvshm_queue.c b/drivers/staging/nvshm/nvshm_queue.c
new file mode 100644
index 000000000000..ec6350636994
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_queue.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_ipc.h"
+#include "nvshm_queue.h"
+#include "nvshm_iobuf.h"
+
+#include <mach/tegra_bb.h>
+
+/* Flush cache lines associated with iobuf list */
+static void flush_iob_list(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
+{
+ struct nvshm_iobuf *phy_list, *leaf, *next, *sg_next;
+
+ phy_list = iob;
+ while (phy_list) {
+ leaf = phy_list;
+ next = phy_list->next;
+ while (leaf) {
+ sg_next = leaf->sg_next;
+ BUG_ON(nvshm_iobuf_check(leaf) < 0);
+ /* Flush associated data */
+ if (leaf->length) {
+ FLUSH_CPU_DCACHE(NVSHM_B2A(handle,
+ (int)leaf->npdu_data
+ + leaf->data_offset),
+ leaf->length);
+ }
+ /* Flush iobuf */
+ FLUSH_CPU_DCACHE(leaf, sizeof(struct nvshm_iobuf));
+ if (sg_next)
+ leaf = NVSHM_B2A(handle, sg_next);
+ else
+ leaf = NULL;
+ }
+ if (next)
+ phy_list = NVSHM_B2A(handle, next);
+ else
+ phy_list = NULL;
+ }
+}
+
+/* Invalidate cache lines associated with iobuf list */
+/* Return 0 if ok or non zero otherwise */
+static int inv_iob_list(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
+{
+ struct nvshm_iobuf *phy_list, *leaf;
+
+ phy_list = iob;
+ while (phy_list) {
+ leaf = phy_list;
+ while (leaf) {
+ /* Check leaf address before any operation on it */
+ /* Cannot use nvshm_iobuf_check because iobuf */
+ /* is not invalidated so content will be wrong */
+ if (ADDR_OUTSIDE(leaf, handle->ipc_base_virt,
+ handle->ipc_size)) {
+ return -EIO;
+ }
+ /* Invalidate iobuf */
+ INV_CPU_DCACHE(leaf, sizeof(struct nvshm_iobuf));
+ /* Check iobuf */
+ if (nvshm_iobuf_check(leaf))
+ return -EIO;
+ /* Invalidate associated data */
+ if (leaf->length) {
+ INV_CPU_DCACHE(NVSHM_B2A(handle,
+ (int)leaf->npdu_data
+ + leaf->data_offset),
+ leaf->length);
+ }
+ if (leaf->sg_next)
+ leaf = NVSHM_B2A(handle, leaf->sg_next);
+ else
+ leaf = NULL;
+ }
+ if (phy_list->next)
+ phy_list = NVSHM_B2A(handle, phy_list->next);
+ else
+ phy_list = NULL;
+ }
+ return 0;
+}
+
+struct nvshm_iobuf *nvshm_queue_get(struct nvshm_handle *handle)
+{
+ struct nvshm_iobuf *dummy, *ret;
+
+ if (!handle->shared_queue_head) {
+ pr_err("%s: Queue not init!\n", __func__);
+ return NULL;
+ }
+
+ dummy = handle->shared_queue_head;
+ /* Invalidate lower part of iobuf - upper part can be written by AP */
+
+ INV_CPU_DCACHE(&dummy->qnext,
+ sizeof(struct nvshm_iobuf) / 2);
+ ret = NVSHM_B2A(handle, dummy->qnext);
+
+ if (dummy->qnext == NULL)
+ return NULL;
+
+ /* Invalidate iobuf(s) and check validity */
+ handle->errno = inv_iob_list(handle, ret);
+
+ if (handle->errno) {
+ pr_err("%s: queue corruption\n", __func__);
+ return NULL;
+ }
+
+ handle->shared_queue_head = ret;
+
+ /* Update queue_bb_offset for debug purpose */
+ handle->conf->queue_bb_offset = (int)ret
+ - (int)handle->ipc_base_virt;
+
+ if ((handle->conf->queue_bb_offset < 0) ||
+ (handle->conf->queue_bb_offset > handle->conf->shmem_size))
+ pr_err("%s: out of bound descriptor offset %d addr 0x%p/0x%p\n",
+ __func__,
+ handle->conf->queue_bb_offset,
+ ret,
+ NVSHM_A2B(handle, ret));
+
+ pr_debug("%s (%p)->%p->(%p)\n", __func__,
+ dummy, ret, ret->qnext);
+
+ dummy->qnext = NULL;
+ nvshm_iobuf_free(dummy);
+
+ return ret;
+}
+
+int nvshm_queue_put(struct nvshm_handle *handle, struct nvshm_iobuf *iob)
+{
+ unsigned long f;
+
+ spin_lock_irqsave(&handle->qlock, f);
+ if (!handle->shared_queue_tail) {
+ spin_unlock_irqrestore(&handle->qlock, f);
+ pr_err("%s: Queue not init!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!iob) {
+ pr_err("%s: Queueing null pointer!\n", __func__);
+ spin_unlock_irqrestore(&handle->qlock, f);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (handle->shared_queue_tail->qnext) {
+ pr_err("%s: illegal queue pointer detected!\n", __func__);
+ spin_unlock_irqrestore(&handle->qlock, f);
+ return -EINVAL;
+ }
+
+ pr_debug("%s (%p)->%p/%d/%d->%p\n", __func__,
+ handle->shared_queue_tail,
+ iob, iob->chan, iob->length,
+ iob->next);
+
+ /* Take a reference on queued iobuf */
+ nvshm_iobuf_ref(iob);
+ /* Flush iobuf(s) in cache */
+ flush_iob_list(handle, iob);
+ handle->shared_queue_tail->qnext = NVSHM_A2B(handle, iob);
+ /* Flush guard element from cache */
+ FLUSH_CPU_DCACHE(handle->shared_queue_tail, sizeof(struct nvshm_iobuf));
+ handle->shared_queue_tail = iob;
+
+ spin_unlock_irqrestore(&handle->qlock, f);
+ return 0;
+}
+
+int nvshm_init_queue(struct nvshm_handle *handle)
+{
+
+ pr_debug("%s instance %d\n", __func__, handle->instance);
+ /* Catch config issues */
+ if ((!handle->ipc_base_virt) || (!handle->desc_base_virt)) {
+ pr_err("%s IPC or DESC base not defined!", __func__);
+ return -ENOMEM;
+ }
+
+ if ((handle->desc_size % sizeof(struct nvshm_iobuf))) {
+ pr_err("%s DESC zone illegal size!", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+/*
+ * Called from IPC workqueue
+ */
+void nvshm_process_queue(struct nvshm_handle *handle)
+{
+ struct nvshm_iobuf *iob;
+ struct nvshm_if_operations *ops;
+ int chan;
+
+ spin_lock_bh(&handle->lock);
+ iob = nvshm_queue_get(handle);
+ while (iob) {
+ pr_debug("%s %p/%d/%d/%d->%p\n", __func__,
+ iob, iob->chan, iob->length, iob->ref, iob->next);
+ tegra_bb_clear_ipc(handle->tegra_bb);
+ chan = iob->chan;
+ if (iob->pool_id < NVSHM_AP_POOL_ID) {
+ ops = handle->chan[chan].ops;
+ if (ops) {
+ spin_unlock_bh(&handle->lock);
+ ops->rx_event(
+ &handle->chan[chan],
+ iob);
+ spin_lock_bh(&handle->lock);
+ } else {
+ nvshm_iobuf_free_cluster(
+ iob);
+ }
+ } else {
+ /* freed iobuf can form a tree */
+ /* Process attached iobufs but do not touch iob */
+ /* as it will be freed by next queue_get */
+ if (iob->next) {
+ nvshm_iobuf_process_freed(
+ NVSHM_B2A(handle, iob->next));
+ }
+ }
+ iob = nvshm_queue_get(handle);
+ }
+ spin_unlock_bh(&handle->lock);
+ /* Finalize BBC free */
+ nvshm_iobuf_bbc_free(handle);
+}
+
+void nvshm_abort_queue(struct nvshm_handle *handle)
+{
+ pr_debug("%s:abort queue\n", __func__);
+ /* Clear IPC to avoid warning in kernel log */
+ tegra_bb_abort_ipc(handle->tegra_bb);
+}
+
diff --git a/drivers/staging/nvshm/nvshm_queue.h b/drivers/staging/nvshm/nvshm_queue.h
new file mode 100644
index 000000000000..c8867e54f604
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_queue.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 NVIDIA Corporation.
+ *
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _NVSHM_QUEUE_H
+#define _NVSHM_QUEUE_H
+
+extern int nvshm_init_queue(struct nvshm_handle *handle);
+extern struct nvshm_iobuf *nvshm_queue_get(struct nvshm_handle *handle);
+extern int nvshm_queue_put(struct nvshm_handle *handle,
+ struct nvshm_iobuf *iob);
+extern void nvshm_process_queue(struct nvshm_handle *handle);
+extern void nvshm_abort_queue(struct nvshm_handle *handle);
+#endif /* _NVSHM_QUEUE_H */
diff --git a/drivers/staging/nvshm/nvshm_rpc.c b/drivers/staging/nvshm/nvshm_rpc.c
new file mode 100644
index 000000000000..bbfb4fed837a
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_rpc.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/sunrpc/msg_prot.h>
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_iobuf.h"
+#include "nvshm_rpc.h"
+
+enum {
+ CONCURRENT_REQUESTS_MAX = 16, /* MUST be a power of two */
+};
+
+struct nvshm_rpc_header {
+ u32 xid; /* Endianness does not matter */
+ enum rpc_msg_type msg_type;
+};
+
+typedef void (*nvshm_rpc_callback_t)(
+ struct nvshm_rpc_message *message,
+ void *context);
+
+struct nvshm_rpc_request {
+ u32 requestid;
+ nvshm_rpc_callback_t callback;
+ void *context;
+};
+
+struct nvshm_rpc {
+ int chanid;
+ struct nvshm_channel *pchan;
+ struct nvshm_handle *handle;
+ nvshm_rpc_callback_t dispatcher_callback;
+ void *dispatcher_context;
+ struct mutex requestid_mutex;
+ u32 requestid;
+ struct nvshm_rpc_request requests[CONCURRENT_REQUESTS_MAX];
+ u32 free_requests_number;
+};
+
+static struct nvshm_rpc rpc_private;
+
+/*
+ * We want the request ID to be unique, even if a rollover happens, so we have
+ * the array index as LSBs and a counter as MSBs. Hence the requirement for
+ * CONCURRENT_REQUESTS_MAX to be a power of 2.
+ */
+static u32 request_create(nvshm_rpc_callback_t callback, void *context)
+{
+ u32 requestid = 0;
+ int i;
+
+ mutex_lock(&rpc_private.requestid_mutex);
+ if (rpc_private.free_requests_number == 0)
+ goto end;
+
+ for (i = 0; i < CONCURRENT_REQUESTS_MAX; ++i) {
+ struct nvshm_rpc_request *request;
+
+ if (rpc_private.requests[i].requestid)
+ continue;
+
+ rpc_private.requestid += CONCURRENT_REQUESTS_MAX;
+ /* Make sure we never give out request ID 0 */
+ if (rpc_private.requestid + i == 0)
+ rpc_private.requestid += CONCURRENT_REQUESTS_MAX;
+
+ request = &rpc_private.requests[i];
+ request->requestid = rpc_private.requestid + i;
+ request->callback = callback;
+ request->context = context;
+ --rpc_private.free_requests_number;
+ requestid = request->requestid;
+ break;
+ }
+end:
+ mutex_unlock(&rpc_private.requestid_mutex);
+ return requestid;
+}
+
+static struct nvshm_rpc_request *request_get(u32 requestid)
+{
+ struct nvshm_rpc_request *request = NULL;
+ int i;
+
+ /*
+ * We only have two threads here: one that creates the message and sends
+ * it, and one that receives the answer to it and reads it, then deletes
+ * it. Creation implies a free slot, so will not interfere. Hence we do
+ * not need to lock.
+ */
+ for (i = 0; i < CONCURRENT_REQUESTS_MAX; ++i)
+ if (rpc_private.requests[i].requestid == requestid) {
+ request = &rpc_private.requests[i];
+ break;
+ }
+ return request;
+}
+
+static void request_delete(u32 requestid)
+{
+ int i;
+
+ mutex_lock(&rpc_private.requestid_mutex);
+ for (i = 0; i < CONCURRENT_REQUESTS_MAX; ++i)
+ if (rpc_private.requests[i].requestid == requestid) {
+ rpc_private.requests[i].requestid = 0;
+ ++rpc_private.free_requests_number;
+ break;
+ }
+ mutex_unlock(&rpc_private.requestid_mutex);
+}
+
+static void nvshm_rpc_rx_event(struct nvshm_channel *chan,
+ struct nvshm_iobuf *iobuf) {
+ u8 *data = NVSHM_IOBUF_PAYLOAD(rpc_private.handle, iobuf);
+ struct nvshm_rpc_header *header;
+ struct nvshm_rpc_message *message;
+
+ header = (struct nvshm_rpc_header *) data;
+ data += sizeof(*header);
+ /* Create message structure */
+ message = kmalloc(sizeof(*message), GFP_KERNEL);
+ if (unlikely(!message)) {
+ pr_err("failed to allocate message\n");
+ goto failed;
+ }
+
+ message->private = iobuf;
+ message->payload = data;
+ message->length = iobuf->length - sizeof(*header);
+ if (header->msg_type == ntohl(RPC_REPLY)) {
+ struct nvshm_rpc_request *request = request_get(header->xid);
+ nvshm_rpc_callback_t callback;
+ void *context;
+
+ if (!request) {
+ pr_err("invalid request ID %u\n", header->xid);
+ goto failed;
+ }
+ /* Free the request in case the callback wants to send */
+ callback = request->callback;
+ context = request->context;
+ request_delete(header->xid);
+ /* Call back */
+ if (callback)
+ callback(message, context);
+ else
+ nvshm_rpc_free(message);
+ } else {
+ /* Check payload length */
+ if (message->length == 0) {
+ /* Empty payload: for latency measurement */
+ struct nvshm_rpc_message *response;
+
+ response = nvshm_rpc_allocresponse(0, message);
+ nvshm_rpc_send(response);
+ nvshm_rpc_free(message);
+ } else if (rpc_private.dispatcher_callback != NULL) {
+ /* Dispatch */
+ rpc_private.dispatcher_callback(message,
+ rpc_private.dispatcher_context);
+ } else {
+ nvshm_rpc_free(message);
+ }
+ }
+
+ return;
+failed:
+ kfree(message);
+ nvshm_iobuf_free(iobuf);
+}
+
+static void nvshm_rpc_error_event(struct nvshm_channel *chan,
+ enum nvshm_error_id error)
+{
+}
+
+static void nvshm_rpc_start_tx(struct nvshm_channel *chan)
+{
+}
+
+static struct nvshm_if_operations nvshm_rpc_ops = {
+ .rx_event = nvshm_rpc_rx_event,
+ .error_event = nvshm_rpc_error_event,
+ .start_tx = nvshm_rpc_start_tx
+};
+
+int nvshm_rpc_init(struct nvshm_handle *handle)
+{
+ int chan;
+ int i;
+
+ for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++)
+ if (handle->chan[chan].map.type == NVSHM_CHAN_RPC) {
+ rpc_private.chanid = chan;
+ rpc_private.handle = handle;
+ rpc_private.pchan = nvshm_open_channel(chan,
+ &nvshm_rpc_ops,
+ &rpc_private);
+ if (!rpc_private.pchan) {
+ pr_err("failed to open channel\n");
+ goto fail;
+ }
+ /* Only one RPC channel */
+ break;
+ }
+
+ /* Initialize request ID stuff (never destroyed) */
+ mutex_init(&rpc_private.requestid_mutex);
+ rpc_private.requestid = 0;
+ for (i = 0; i < CONCURRENT_REQUESTS_MAX; ++i)
+ rpc_private.requests[i].requestid = 0;
+ rpc_private.free_requests_number = CONCURRENT_REQUESTS_MAX;
+ return 0;
+fail:
+ return -1;
+}
+
+void nvshm_rpc_cleanup(void)
+{
+ /* FIXME Check module ref count if we ever make this a module */
+ if (!rpc_private.pchan) {
+ pr_err("not initialized\n");
+ return;
+ }
+
+ nvshm_close_channel(rpc_private.pchan);
+ mutex_destroy(&rpc_private.requestid_mutex);
+}
+
+void nvshm_rpc_setdispatcher(nvshm_rpc_callback_t callback, void *context)
+{
+ /*
+ * The dispatcher callback is set at init and unset at cleanup, when no
+ * message can be received. This therefore does not need locking.
+ */
+ rpc_private.dispatcher_callback = callback;
+ rpc_private.dispatcher_context = context;
+}
+
+struct nvshm_rpc_message*
+nvshm_rpc_allocrequest(u32 size,
+ nvshm_rpc_callback_t callback,
+ void *context)
+{
+ u32 requestid;
+ struct nvshm_iobuf *iobuf;
+ struct nvshm_rpc_message *message;
+ u8 *data;
+ struct nvshm_rpc_header *header;
+
+ /* Initialize iobuf */
+ if (!rpc_private.pchan) {
+ pr_err("not initialized\n");
+ return NULL;
+ }
+
+ /* Get request ID */
+ do {
+ requestid = request_create(callback, context);
+ /* Should not happen anyway... */
+ if (requestid == 0)
+ udelay(50);
+ } while (requestid == 0);
+
+ /* Initialize iobuf */
+ iobuf = nvshm_iobuf_alloc(rpc_private.pchan, sizeof(*header) + size);
+ if (!iobuf) {
+ request_delete(requestid);
+ pr_err("failed to allocate iobuf\n");
+ return NULL;
+ }
+
+ iobuf->length = sizeof(*header) + size;
+ data = NVSHM_IOBUF_PAYLOAD(rpc_private.handle, iobuf);
+
+ /* Initialize header */
+ header = (struct nvshm_rpc_header *) data;
+ header->xid = requestid;
+ header->msg_type = htonl(RPC_CALL);
+ data += sizeof(*header);
+
+ /* Initialize message */
+ message = kmalloc(sizeof(*message), GFP_KERNEL);
+ if (!message) {
+ request_delete(requestid);
+ nvshm_iobuf_free(iobuf);
+ pr_err("failed to allocate message\n");
+ return NULL;
+ }
+
+ message->private = iobuf;
+ message->payload = data;
+ message->length = size;
+ return message;
+}
+
+struct nvshm_rpc_message *nvshm_rpc_allocresponse(u32 size,
+ const struct nvshm_rpc_message *request)
+{
+ struct nvshm_iobuf *req_iobuf = request->private;
+ u8 *req_data;
+ struct nvshm_iobuf *iobuf;
+ struct nvshm_rpc_message *message;
+ u8 *data;
+ struct nvshm_rpc_header *req_header;
+ struct nvshm_rpc_header *header;
+
+ /* Reader request header */
+ if (!req_iobuf) {
+ pr_err("null request iobuf\n");
+ return NULL;
+ }
+
+ req_data = NVSHM_IOBUF_PAYLOAD(rpc_private.handle, req_iobuf);
+
+ /* Initialize iobuf */
+ if (!rpc_private.pchan) {
+ pr_err("not initialized\n");
+ return NULL;
+ }
+ iobuf = nvshm_iobuf_alloc(rpc_private.pchan, sizeof(*header) + size);
+ if (!iobuf) {
+ pr_err("failed to allocate iobuf\n");
+ return NULL;
+ }
+
+ iobuf->length = sizeof(*header) + size;
+ data = NVSHM_IOBUF_PAYLOAD(rpc_private.handle, iobuf);
+
+ /* Copy header and opaque data from request */
+ header = (struct nvshm_rpc_header *) data;
+ req_header = (struct nvshm_rpc_header *) req_data;
+ header->xid = req_header->xid;
+ header->msg_type = htonl(RPC_REPLY);
+ data += sizeof(*header);
+
+ /* Initialize message */
+ message = kmalloc(sizeof(*message), GFP_KERNEL);
+ if (!message) {
+ pr_err("failed to allocate message\n");
+ nvshm_iobuf_free(iobuf);
+ return NULL;
+ }
+
+ message->private = iobuf;
+ message->payload = data;
+ message->length = size;
+ return message;
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_allocresponse);
+
+void nvshm_rpc_free(struct nvshm_rpc_message *message)
+{
+ struct nvshm_iobuf *iobuf = message->private;
+
+ nvshm_iobuf_free(iobuf);
+ kfree(message);
+}
+
+int nvshm_rpc_send(struct nvshm_rpc_message *message)
+{
+ /* Send */
+ struct nvshm_iobuf *iobuf = message->private;
+ int rc;
+
+ /* Note: as RPC traffic is very low, we don't care about flow control */
+ rc = nvshm_write(rpc_private.pchan, iobuf);
+ /* Do not free iobuf here (see SHM specification for details) */
+ kfree(message);
+ if (rc < 0)
+ nvshm_iobuf_free(iobuf);
+
+ return rc;
+}
diff --git a/drivers/staging/nvshm/nvshm_rpc.h b/drivers/staging/nvshm/nvshm_rpc.h
new file mode 100644
index 000000000000..476b5804dadc
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_rpc.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_H
+#define __DRIVERS_STAGING_NVSHM_NVSHM_RPC_H
+
+#include <linux/types.h>
+
+/**
+ * Type for a RPC message (request or response.)
+ *
+ * @param payload Payload to send across
+ * @param length Payload length - DO NOT MODIFY
+ * @param private An internal context - DO NOT MODIFY
+ */
+struct nvshm_rpc_message {
+ void *payload;
+ /* The fields below are set at allocation time and are private */
+ u32 length;
+ void *private;
+};
+
+/**
+ * Set a default dispatcher.
+ *
+ * The default dispatcher is the dispatcher that receives requests from clients
+ * on the remote processor, while responses are sent back the originator's
+ * callback automatically.
+ *
+ * Reminder: the callback (or one of its sub-processes) MUST free the message.
+ *
+ * @param callback Callback to use to receive incoming messages
+ * @param context Context to remind at callback time (may be NULL)
+ */
+void nvshm_rpc_setdispatcher(
+ void (*callback)(struct nvshm_rpc_message *message, void *context),
+ void *context);
+
+/**
+ * Allocate a message buffer for request.
+ *
+ * The point here is for the client to fill in this buffer and not make a copy.
+ * NOTE: SENT MESSAGES ARE FREED AUTOMATICALLY.
+ *
+ * Reminder: the callback (or one of its sub-processes) MUST free the message.
+ *
+ * @param size Size of the buffer to allocate
+ * @param callback Callback to use to receive ASYNCHRONOUS responses
+ * @param context A user context to pass to the callback, if relevant
+ * @return a buffer, or NULL on error
+ */
+struct nvshm_rpc_message *nvshm_rpc_allocrequest(
+ u32 size,
+ void (*callback)(struct nvshm_rpc_message *message, void *context),
+ void *context);
+
+/**
+ * Allocate a message buffer for response.
+ *
+ * The point here is for the client to fill in this buffer and avoid making a
+ * copy.
+ * NOTE: SENT MESSAGES ARE FREED AUTOMATICALLY.
+ *
+ * @param size Size of the buffer to allocate
+ * @param request Request message as received
+ * @return a buffer, or NULL on error
+ */
+struct nvshm_rpc_message *nvshm_rpc_allocresponse(
+ u32 size,
+ const struct nvshm_rpc_message *request);
+
+/**
+ * Free a message buffer.
+ *
+ * Use of this function should never be need if the message is sent, as the
+ * destruction is then automatic. It is needed to destroy the response to
+ * synchronous calls though, and the message passed to both dispatcher and
+ * message callbacks.
+ *
+ * @param message Message to free
+ */
+void nvshm_rpc_free(
+ struct nvshm_rpc_message *message);
+
+/**
+ * Send a request or response message.
+ *
+ * Responses go through the callback (if any)
+ *
+ * @param message Request or response to send, automatically freed once sent
+ * @return 0, or negative on error
+ */
+int nvshm_rpc_send(
+ struct nvshm_rpc_message *message);
+
+#endif /* #ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_H */
diff --git a/drivers/staging/nvshm/nvshm_rpc_dispatcher.c b/drivers/staging/nvshm/nvshm_rpc_dispatcher.c
new file mode 100644
index 000000000000..62f0f6ca9c61
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_rpc_dispatcher.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/export.h>
+#include "nvshm_rpc_dispatcher.h"
+
+struct global_data {
+ bool cleaning_up;
+ struct workqueue_struct *wq;
+ struct nvshm_rpc_program *programs[NVSHM_RPC_PROGRAMS_MAX];
+};
+
+struct work_data {
+ struct nvshm_rpc_message *request;
+ struct work_struct work;
+};
+
+static struct global_data global;
+
+/* Meaningful Sun RPC protocol errors */
+static const char * const protocol_errors[] = {
+ "success",
+ "program unavailable",
+ "program version mismatch",
+ "procedure unavailable",
+ "garbage arguments",
+ "system error",
+};
+
+/*
+ * This function is called in a dedicated thread and calls the function
+ * managers, which in turn call the real function.
+ */
+static void nvshm_rpc_dispatcher(struct work_struct *work)
+{
+ struct work_data *data = container_of(work, struct work_data, work);
+ struct nvshm_rpc_message *request = data->request;
+ struct nvshm_rpc_message *response = NULL;
+ struct nvshm_rpc_procedure procedure;
+ struct nvshm_rpc_program *program = NULL;
+ nvshm_rpc_function_t function;
+ enum rpc_accept_stat rc;
+
+ /* Try to find a function to call */
+ nvshm_rpc_utils_decode_procedure(request, &procedure);
+ /* Find program */
+ if (procedure.program >= NVSHM_RPC_PROGRAMS_MAX) {
+ rc = RPC_PROG_UNAVAIL;
+ goto done;
+ }
+ program = global.programs[procedure.program];
+ if (!program) {
+ rc = RPC_PROG_UNAVAIL;
+ goto done;
+ }
+ /* Check version */
+ if ((procedure.version < program->version_min) ||
+ (procedure.version > program->version_max)) {
+ rc = RPC_PROG_MISMATCH;
+ goto done;
+ }
+ /* Find function */
+ if (procedure.procedure >= program->procedures_size) {
+ rc = RPC_PROC_UNAVAIL;
+ goto done;
+ }
+ function = program->procedures[procedure.procedure];
+ if (!function) {
+ rc = RPC_PROC_UNAVAIL;
+ goto done;
+ }
+ rc = function(procedure.version, request, &response);
+done:
+ /* Check we still have someone to reply to */
+ if (!global.cleaning_up) {
+ if (rc == RPC_PROG_MISMATCH) {
+ /* Create version mismatch error message */
+ struct nvshm_rpc_datum_in vers[] = {
+ NVSHM_RPC_IN_UINT(program->version_min),
+ NVSHM_RPC_IN_UINT(program->version_max),
+ };
+ u32 n = ARRAY_SIZE(vers);
+
+ pr_err("failed to reply to %d:%d:%d: %s\n",
+ procedure.program, procedure.version,
+ procedure.procedure, protocol_errors[rc]);
+ response = nvshm_rpc_utils_prepare_response(request, rc,
+ vers, n);
+ } else if (rc != RPC_SUCCESS) {
+ /* Create other error message */
+ pr_err("failed to reply to %d:%d:%d: %s\n",
+ procedure.program, procedure.version,
+ procedure.procedure, protocol_errors[rc]);
+ response = nvshm_rpc_utils_prepare_response(request, rc,
+ NULL, 0);
+ }
+ if (response) {
+ pr_debug("send response\n");
+ nvshm_rpc_send(response);
+ }
+ } else if (response)
+ nvshm_rpc_free(response);
+ nvshm_rpc_free(request);
+ kfree(data);
+}
+
+/*
+ * This function receives the requests and creates a thread to do the work.
+ */
+static void nvshm_rpc_dispatch(struct nvshm_rpc_message *request, void *context)
+{
+ struct work_data *data;
+
+ data = kmalloc(sizeof(struct work_data), GFP_KERNEL);
+ data->request = request;
+ INIT_WORK(&data->work, nvshm_rpc_dispatcher);
+ queue_work(global.wq, &data->work);
+}
+
+int nvshm_rpc_dispatcher_init(void)
+{
+ global.wq = alloc_workqueue("nvshm_rpc_work", WQ_UNBOUND|WQ_HIGHPRI, 0);
+ if (!global.wq) {
+ pr_err("failed to create workqueue\n");
+ return -ENOMEM;
+ }
+ global.cleaning_up = false;
+ nvshm_rpc_setdispatcher(nvshm_rpc_dispatch, NULL);
+ return 0;
+}
+
+void nvshm_rpc_dispatcher_cleanup(void)
+{
+ /* This call likely means that the modem is gone */
+ global.cleaning_up = true;
+ nvshm_rpc_setdispatcher(NULL, NULL);
+ flush_workqueue(global.wq);
+ destroy_workqueue(global.wq);
+}
+
+int nvshm_rpc_program_register(enum nvshm_rpc_programs index,
+ struct nvshm_rpc_program *program)
+{
+ if (index >= NVSHM_RPC_PROGRAMS_MAX)
+ return -EINVAL;
+ if (global.programs[index])
+ return -EBUSY;
+ global.programs[index] = program;
+ pr_info("program #%d registered\n", index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_program_register);
+
+void nvshm_rpc_program_unregister(enum nvshm_rpc_programs index)
+{
+ if (index >= NVSHM_RPC_PROGRAMS_MAX)
+ return;
+ global.programs[index] = NULL;
+ pr_info("program #%d unregistered\n", index);
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_program_unregister);
diff --git a/drivers/staging/nvshm/nvshm_rpc_dispatcher.h b/drivers/staging/nvshm/nvshm_rpc_dispatcher.h
new file mode 100644
index 000000000000..8c75b6a52b4e
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_rpc_dispatcher.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_DISPATCHER_H
+#define __DRIVERS_STAGING_NVSHM_NVSHM_RPC_DISPATCHER_H
+
+#include "nvshm_rpc_shared.h"
+#include "nvshm_rpc_utils.h"
+
+/**
+ * Type for a program
+ *
+ * @param version_min Minimum program version supported
+ * @param version_max Maximum program version supported
+ * @param procedures_size Size of procedures array
+ * @param procedures Procedures array
+ */
+struct nvshm_rpc_program {
+ u32 version_min;
+ u32 version_max;
+ u32 procedures_size;
+ nvshm_rpc_function_t *procedures;
+};
+
+/**
+ * Register a program
+ *
+ * @param index Index
+ * @param program Program data to register
+ * @return 0 on success, negative otherwise
+ */
+int nvshm_rpc_program_register(
+ enum nvshm_rpc_programs index,
+ struct nvshm_rpc_program *program);
+
+/**
+ * Unregister a program
+ *
+ * @param index Index
+ * @return 0 on success, negative otherwise
+ */
+void nvshm_rpc_program_unregister(
+ enum nvshm_rpc_programs index);
+
+#endif /* #ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_DISPATCHER_H */
diff --git a/drivers/staging/nvshm/nvshm_rpc_prog_rsm.c b/drivers/staging/nvshm/nvshm_rpc_prog_rsm.c
new file mode 100644
index 000000000000..2d55e87e0055
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_rpc_prog_rsm.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <mach/tegra_bb.h>
+#include <mach/tegra_bbc_proxy.h>
+#include <nvshm_rpc_utils.h>
+#include <nvshm_rpc_dispatcher.h>
+
+/*
+ * RSM APIs:
+ * int bbc_edp_request(u32 mode, u32 state, u32 threshold)
+ * int bbc_edp_register(u32 num_states, u32 states[])
+ * int bbc_bw_register(u32 bw);
+ * int bbc_bw_request(u32 mode, u32 bw, u32 lt)
+ */
+
+static struct device *proxy_dev, *tegra_bb;
+
+static enum rpc_accept_stat rpc_bbc_edp_request(
+ u32 version,
+ struct nvshm_rpc_message *req,
+ struct nvshm_rpc_message **resp)
+{
+ u32 mode;
+ u32 state;
+ u32 threshold;
+ struct nvshm_rpc_datum_out req_data[] = {
+ NVSHM_RPC_OUT_UINT(&mode),
+ NVSHM_RPC_OUT_UINT(&state),
+ NVSHM_RPC_OUT_UINT(&threshold),
+ };
+ int rc;
+
+ /* Decode request */
+ if (nvshm_rpc_utils_decode_args(req, false, req_data,
+ ARRAY_SIZE(req_data)) < 0)
+ return RPC_GARBAGE_ARGS;
+
+ /* Call */
+ rc = tegra_bbc_proxy_edp_request(proxy_dev, mode, state, threshold);
+
+ /* Encode response */
+ {
+ struct nvshm_rpc_datum_in resp_data[] = {
+ NVSHM_RPC_IN_SINT(rc),
+ };
+
+ *resp = nvshm_rpc_utils_prepare_response(req, RPC_SUCCESS,
+ resp_data, ARRAY_SIZE(resp_data));
+ }
+ return *resp ? RPC_SUCCESS : RPC_SYSTEM_ERR;
+}
+
+static enum rpc_accept_stat rpc_bbc_edp_register(
+ u32 version,
+ struct nvshm_rpc_message *req,
+ struct nvshm_rpc_message **resp)
+{
+ u32 num_states;
+ u32 *states;
+ struct nvshm_rpc_datum_out req_data[] = {
+ NVSHM_RPC_OUT_ARRAY(TYPE_UINT, &num_states, &states),
+ };
+ int rc;
+
+ /* Decode request */
+ if (nvshm_rpc_utils_decode_args(req, false, req_data,
+ ARRAY_SIZE(req_data)) < 0)
+ return RPC_GARBAGE_ARGS;
+
+ /* Call */
+ rc = tegra_bbc_proxy_edp_register(proxy_dev, num_states, states);
+ kfree(states);
+
+ /* Encode response */
+ {
+ struct nvshm_rpc_datum_in resp_data[] = {
+ NVSHM_RPC_IN_SINT(rc),
+ };
+
+ *resp = nvshm_rpc_utils_prepare_response(req, RPC_SUCCESS,
+ resp_data, ARRAY_SIZE(resp_data));
+ }
+ return *resp ? RPC_SUCCESS : RPC_SYSTEM_ERR;
+}
+
+static enum rpc_accept_stat rpc_bbc_bw_register(
+ u32 version,
+ struct nvshm_rpc_message *req,
+ struct nvshm_rpc_message **resp)
+{
+ u32 bw;
+ struct nvshm_rpc_datum_out req_data[] = {
+ NVSHM_RPC_OUT_UINT(&bw),
+ };
+ int rc;
+
+ /* Decode request */
+ if (nvshm_rpc_utils_decode_args(req, false, req_data,
+ ARRAY_SIZE(req_data)) < 0)
+ return RPC_GARBAGE_ARGS;
+
+ /* Call */
+ rc = tegra_bbc_proxy_bw_register(proxy_dev, bw);
+
+ /* Encode response */
+ {
+ struct nvshm_rpc_datum_in resp_data[] = {
+ NVSHM_RPC_IN_SINT(rc),
+ };
+
+ *resp = nvshm_rpc_utils_prepare_response(req, RPC_SUCCESS,
+ resp_data, ARRAY_SIZE(resp_data));
+ }
+ return *resp ? RPC_SUCCESS : RPC_SYSTEM_ERR;
+}
+
+static enum rpc_accept_stat rpc_bbc_bw_request(
+ u32 version,
+ struct nvshm_rpc_message *req,
+ struct nvshm_rpc_message **resp)
+{
+ u32 mode;
+ u32 bw;
+ u32 lt;
+ u32 margin;
+ u32 freq_floor;
+ u32 flags;
+ struct nvshm_rpc_datum_out req_data[] = {
+ NVSHM_RPC_OUT_UINT(&mode),
+ NVSHM_RPC_OUT_UINT(&bw),
+ NVSHM_RPC_OUT_UINT(&lt),
+ NVSHM_RPC_OUT_UINT(&margin),
+ NVSHM_RPC_OUT_UINT(&freq_floor),
+ NVSHM_RPC_OUT_UINT(&flags),
+ };
+ int rc;
+
+ /* Decode request */
+ if (nvshm_rpc_utils_decode_args(req, false, req_data,
+ ARRAY_SIZE(req_data)) < 0)
+ return RPC_GARBAGE_ARGS;
+
+ /* Call */
+ rc = tegra_bbc_proxy_bw_request(proxy_dev, mode, bw, lt, margin);
+ tegra_bb_set_emc_floor(tegra_bb, freq_floor, flags);
+
+ /* Encode response */
+ {
+ struct nvshm_rpc_datum_in resp_data[] = {
+ NVSHM_RPC_IN_SINT(rc),
+ };
+
+ *resp = nvshm_rpc_utils_prepare_response(req, RPC_SUCCESS,
+ resp_data, ARRAY_SIZE(resp_data));
+ }
+ return *resp ? RPC_SUCCESS : RPC_SYSTEM_ERR;
+}
+
+static nvshm_rpc_function_t procedures[] = {
+ rpc_bbc_edp_register,
+ rpc_bbc_edp_request,
+ rpc_bbc_bw_register,
+ rpc_bbc_bw_request,
+};
+
+static struct nvshm_rpc_program program = {
+ .version_min = 0,
+ .version_max = 0,
+ .procedures_size = ARRAY_SIZE(procedures),
+ .procedures = procedures,
+};
+
+static int __init prog_rsm_init(void)
+{
+ proxy_dev = bus_find_device_by_name(&platform_bus_type, NULL,
+ "tegra_bbc_proxy");
+ if (!proxy_dev) {
+ pr_err("failed to get proxy device pointer\n");
+ return -ENXIO;
+ }
+
+ tegra_bb = bus_find_device_by_name(&platform_bus_type, NULL,
+ "tegra_bb.0");
+ if (!tegra_bb) {
+ pr_err("failed to get tegra_bb device pointer\n");
+ return -ENXIO;
+ }
+
+ return nvshm_rpc_program_register(NVSHM_RPC_PROGRAM_RSM, &program);
+}
+
+static void __exit prog_rsm_exit(void)
+{
+ nvshm_rpc_program_unregister(NVSHM_RPC_PROGRAM_RSM);
+}
+
+module_init(prog_rsm_init);
+module_exit(prog_rsm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hervé Fache <hfache@nvidia.com>");
+MODULE_DESCRIPTION("NVSHM RPC RSM program");
diff --git a/drivers/staging/nvshm/nvshm_rpc_shared.h b/drivers/staging/nvshm/nvshm_rpc_shared.h
new file mode 100644
index 000000000000..7ad128db692a
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_rpc_shared.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_SHARED_H
+#define __DRIVERS_STAGING_NVSHM_NVSHM_RPC_SHARED_H
+
+/*
+ * This file contains all data shared between AP and BB for RPC purposes
+ */
+
+/** All possible programs */
+enum nvshm_rpc_programs {
+ NVSHM_RPC_PROGRAM_TEST,
+ NVSHM_RPC_PROGRAM_RSM,
+ NVSHM_RPC_PROGRAMS_MAX
+};
+
+#endif /* #ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_SHARED_H */
diff --git a/drivers/staging/nvshm/nvshm_rpc_utils.c b/drivers/staging/nvshm/nvshm_rpc_utils.c
new file mode 100644
index 000000000000..8d5ba6b8f240
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_rpc_utils.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "%s:" fmt, __func__
+
+#include <nvshm_rpc_utils.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/export.h>
+#include <linux/sunrpc/xdr.h>
+
+/*
+ * Call fields (on top of header)
+ * - RPC version (=2)
+ * - Program
+ * - Program version
+ * - Procedure
+ * - Credentials (x2) (=0,0)
+ * - Verifier (x2) (=0,0)
+ */
+#define SUN_RPC_CALL_HDR_SIZE 8
+
+/*
+ * Call fields (on top of header)
+ * - Reply status (always accepted here)
+ * - Verifier (x2) (=0,0)
+ * - Accept status
+ * (Mismatch info to be allocated as opaque)
+ */
+#define SUN_RPC_ACC_REPLY_HDR_SIZE 4
+
+/*
+ * Check that there are enough bytes left in message payload, given length
+ * needed, and current read pointer
+ */
+static inline bool is_too_short(const struct nvshm_rpc_message *message,
+ const void *reader,
+ u32 data_needed)
+{
+ u32 data_left = message->length - (reader - message->payload);
+ if (data_left < data_needed) {
+ /* We use 1 to check for emptiness */
+ if (data_needed != 1)
+ pr_err("Not enough data left in buffer: %d < %d\n",
+ data_left, data_needed);
+
+ return true;
+ }
+
+ return false;
+}
+
+static int nvshm_rpc_utils_encode_args(const struct nvshm_rpc_datum_in *data,
+ u32 number,
+ u32 *writer)
+{
+ u32 n;
+
+ for (n = 0; n < number; ++n) {
+ const struct nvshm_rpc_datum_in *datum = &data[n];
+
+ if ((datum->type & TYPE_ARRAY_FLAG) == 0) {
+ switch (datum->type) {
+ case TYPE_SINT:
+ *writer++ = cpu_to_be32(datum->d.sint_data);
+ break;
+ case TYPE_UINT:
+ *writer++ = cpu_to_be32(datum->d.uint_data);
+ break;
+ case TYPE_STRING:
+ writer = xdr_encode_opaque(writer,
+ datum->d.string_data,
+ strlen(datum->d.string_data) + 1);
+ break;
+ case TYPE_BLOB:
+ writer = xdr_encode_opaque(writer,
+ datum->d.blob_data,
+ datum->length);
+ break;
+ default:
+ pr_err("unknown RPC type %d\n", datum->type);
+ return -EINVAL;
+ }
+ } else {
+ enum nvshm_rpc_datumtype type;
+
+ type = datum->type & ~TYPE_ARRAY_FLAG;
+ *writer++ = cpu_to_be32(datum->length);
+ if ((type == TYPE_SINT) || (type == TYPE_UINT)) {
+ const u32 *a;
+ u32 d;
+
+ a = (const u32 *) datum->d.blob_data;
+ for (d = 0; d < datum->length; ++d, ++a)
+ *writer++ = cpu_to_be32(*a);
+ } else if (type == TYPE_STRING) {
+ const char * const *a;
+ u32 d;
+
+ a = (const char * const *) datum->d.blob_data;
+ for (d = 0; d < datum->length; ++d, ++a)
+ writer = xdr_encode_opaque(writer, *a,
+ strlen(*a) + 1);
+ } else {
+ pr_err("invalid RPC type for array %d\n", type);
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+int nvshm_rpc_utils_encode_size(bool is_response,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number)
+{
+ int quad_length;
+ u32 n;
+
+ if (is_response)
+ quad_length = SUN_RPC_ACC_REPLY_HDR_SIZE;
+ else
+ quad_length = SUN_RPC_CALL_HDR_SIZE;
+ for (n = 0; n < number; ++n) {
+ const struct nvshm_rpc_datum_in *datum = &data[n];
+
+ if ((datum->type & TYPE_ARRAY_FLAG) == 0) {
+ switch (datum->type) {
+ case TYPE_SINT:
+ case TYPE_UINT:
+ ++quad_length;
+ break;
+ case TYPE_STRING:
+ ++quad_length;
+ quad_length += XDR_QUADLEN(
+ strlen(datum->d.string_data) + 1);
+ break;
+ case TYPE_BLOB:
+ ++quad_length;
+ quad_length += XDR_QUADLEN(datum->length);
+ break;
+ default:
+ pr_err("unknown RPC type %d\n", datum->type);
+ return -EINVAL;
+ }
+ } else {
+ enum nvshm_rpc_datumtype type;
+
+ type = datum->type & ~TYPE_ARRAY_FLAG;
+ ++quad_length;
+ if ((type == TYPE_SINT) || (type == TYPE_UINT)) {
+ quad_length += datum->length;
+ } else if (type == TYPE_STRING) {
+ const char * const *a;
+ u32 d;
+
+ a = (const char * const *) datum->d.blob_data;
+ for (d = 0; d < datum->length; ++d, ++a) {
+ u32 len = strlen(*a) + 1;
+
+ ++quad_length;
+ quad_length += XDR_QUADLEN(len);
+ }
+ } else {
+ pr_err("invalid RPC type for array %d\n", type);
+ return -EINVAL;
+ }
+ }
+ }
+ return quad_length << 2;
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_utils_encode_size);
+
+int nvshm_rpc_utils_encode_request(const struct nvshm_rpc_procedure *procedure,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number,
+ struct nvshm_rpc_message *message)
+{
+ u32 *writer = message->payload;
+
+ /* RPC version */
+ *writer++ = cpu_to_be32(2);
+ /* Procedure */
+ *writer++ = cpu_to_be32(procedure->program);
+ *writer++ = cpu_to_be32(procedure->version);
+ *writer++ = cpu_to_be32(procedure->procedure);
+ /* Authentication (AUTH_NONE, size = 0) */
+ *writer++ = cpu_to_be32(0);
+ *writer++ = cpu_to_be32(0);
+ /* Verifier (AUTH_NONE, size = 0) */
+ *writer++ = cpu_to_be32(0);
+ *writer++ = cpu_to_be32(0);
+ return nvshm_rpc_utils_encode_args(data, number, writer);
+}
+
+int nvshm_rpc_utils_encode_response(enum rpc_accept_stat status,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number,
+ struct nvshm_rpc_message *message)
+{
+ u32 *writer = message->payload;
+
+ /* Reply status (always accepted) */
+ *writer++ = cpu_to_be32(0);
+ /* Verifier (AUTH_NONE, size = 0) */
+ *writer++ = cpu_to_be32(0);
+ *writer++ = cpu_to_be32(0);
+ /* Accept status */
+ *writer++ = cpu_to_be32(status);
+ return nvshm_rpc_utils_encode_args(data, number, writer);
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_utils_encode_response);
+
+int nvshm_rpc_utils_make_request(
+ const struct nvshm_rpc_procedure *procedure,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number,
+ void (*callback)(struct nvshm_rpc_message *message, void *context),
+ void *context)
+{
+ int rc;
+ struct nvshm_rpc_message *request;
+ int length;
+
+ length = nvshm_rpc_utils_encode_size(false, data, number);
+ if (length < 0)
+ return length;
+
+ request = nvshm_rpc_allocrequest(length, callback, context);
+ if (!request)
+ return -ENOMEM;
+
+ rc = nvshm_rpc_utils_encode_request(procedure, data, number, request);
+ if (rc < 0)
+ goto error;
+
+ rc = nvshm_rpc_send(request);
+ if (rc < 0)
+ goto error;
+
+ return 0;
+error:
+ nvshm_rpc_free(request);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_utils_make_request);
+
+struct nvshm_rpc_message *nvshm_rpc_utils_prepare_response(
+ const struct nvshm_rpc_message *request,
+ enum rpc_accept_stat status,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number)
+{
+ struct nvshm_rpc_message *response;
+ int length;
+
+ length = nvshm_rpc_utils_encode_size(true, data, number);
+ if (length < 0)
+ return NULL;
+
+ response = nvshm_rpc_allocresponse(length, request);
+ if (!response)
+ return NULL;
+
+ if (nvshm_rpc_utils_encode_response(status, data, number, response)) {
+ nvshm_rpc_free(response);
+ return NULL;
+ }
+ return response;
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_utils_prepare_response);
+
+void nvshm_rpc_utils_decode_procedure(const struct nvshm_rpc_message *request,
+ struct nvshm_rpc_procedure *procedure)
+{
+ const u32 *reader = request->payload;
+
+ /* Skip RPC version */
+ reader += 1;
+ procedure->program = be32_to_cpup((__be32 *) reader++);
+ procedure->version = be32_to_cpup((__be32 *) reader++);
+ procedure->procedure = be32_to_cpup((__be32 *) reader);
+}
+
+enum rpc_accept_stat
+nvshm_rpc_utils_decode_status(const struct nvshm_rpc_message *response)
+{
+ const u32 *reader = response->payload;
+
+ /* Skip reply status and verifier */
+ reader += 3;
+ return be32_to_cpup((__be32 *) reader);
+}
+
+int nvshm_rpc_utils_decode_versions(
+ const struct nvshm_rpc_message *response,
+ u32 *version_min,
+ u32 *version_max)
+{
+ struct nvshm_rpc_datum_out versions[] = {
+ NVSHM_RPC_OUT_UINT(version_min),
+ NVSHM_RPC_OUT_UINT(version_max),
+ };
+ return nvshm_rpc_utils_decode_args(response, true, versions,
+ ARRAY_SIZE(versions));
+}
+
+int nvshm_rpc_utils_decode_args(const struct nvshm_rpc_message *message,
+ bool is_response,
+ struct nvshm_rpc_datum_out *data,
+ u32 number)
+{
+ const __be32 *reader = message->payload;
+ void *arrays[number];
+ u32 n, arrays_index = 0;
+ int rc = -EPROTO;
+
+ if (is_response) {
+ if (is_too_short(message, reader, SUN_RPC_ACC_REPLY_HDR_SIZE))
+ return rc;
+
+ reader += SUN_RPC_ACC_REPLY_HDR_SIZE;
+ } else {
+ if (is_too_short(message, reader, SUN_RPC_CALL_HDR_SIZE))
+ return rc;
+
+ reader += SUN_RPC_CALL_HDR_SIZE;
+ }
+
+ for (n = 0; n < number; ++n) {
+ struct nvshm_rpc_datum_out *datum = &data[n];
+ enum nvshm_rpc_datumtype type = datum->type & ~TYPE_ARRAY_FLAG;
+ u32 uint;
+
+ /* There is always a number, either the data or its length */
+ if (is_too_short(message, reader, sizeof(uint)))
+ goto err_mem_free;
+
+ uint = be32_to_cpup((__be32 *) reader);
+ reader++;
+ if ((datum->type & TYPE_ARRAY_FLAG) == 0) {
+ if (((type == TYPE_STRING) || (type == TYPE_BLOB)) &&
+ is_too_short(message, reader, sizeof(uint)))
+ goto err_mem_free;
+
+ switch (datum->type) {
+ case TYPE_SINT:
+ *datum->d.sint_data = (s32) uint;
+ break;
+ case TYPE_UINT:
+ *datum->d.uint_data = uint;
+ break;
+ case TYPE_STRING:
+ *datum->d.string_data = (const char *) reader;
+ reader += XDR_QUADLEN(uint);
+ break;
+ case TYPE_BLOB:
+ *datum->length = uint;
+ *datum->d.blob_data = reader;
+ reader += XDR_QUADLEN(uint);
+ break;
+ default:
+ pr_err("unknown RPC type %d\n", datum->type);
+ rc = -EINVAL;
+ goto err_mem_free;
+ }
+ } else {
+ *datum->length = uint;
+ if ((type == TYPE_SINT) || (type == TYPE_UINT)) {
+ u32 *a;
+ u32 d;
+
+ if (is_too_short(message, reader, uint * 4))
+ break;
+
+ a = kmalloc(uint * sizeof(u32), GFP_KERNEL);
+ if (!a) {
+ pr_err("kmalloc failed\n");
+ rc = -ENOMEM;
+ goto err_mem_free;
+ }
+
+ arrays[arrays_index++] = a;
+ *datum->d.blob_data = a;
+ for (d = 0; d < uint; ++d, ++a) {
+ *a = be32_to_cpup((__be32 *) reader);
+ reader++;
+ }
+ } else if (type == TYPE_STRING) {
+ const char **a;
+ u32 d;
+
+ a = kmalloc(uint * sizeof(const char *),
+ GFP_KERNEL);
+ if (!a) {
+ pr_err("kmalloc failed\n");
+ rc = -ENOMEM;
+ goto err_mem_free;
+ }
+
+ arrays[arrays_index++] = a;
+ *datum->d.blob_data = a;
+ for (d = 0; d < uint; ++d, ++a) {
+ u32 len;
+
+ if (is_too_short(message, reader,
+ sizeof(len)))
+ goto err_mem_free;
+
+ len = be32_to_cpup((__be32 *) reader);
+ reader++;
+ if (is_too_short(message, reader,
+ XDR_QUADLEN(len)))
+ goto err_mem_free;
+
+ *a = (const char *) reader;
+ reader += XDR_QUADLEN(len);
+ }
+ } else {
+ pr_err("invalid RPC type for array %d\n", type);
+ rc = -EINVAL;
+ goto err_mem_free;
+ }
+ }
+ }
+
+ /* Check that things went well and there is no more data in buffer */
+ if ((n == number) && is_too_short(message, reader, 1))
+ return 0;
+
+err_mem_free:
+ /* Failure: need to free what's been allocated */
+ for (n = 0; n < arrays_index; n++)
+ kfree(arrays[n]);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_utils_decode_args);
+
+int nvshm_rpc_utils_decode_response(
+ const struct nvshm_rpc_message *response,
+ enum rpc_accept_stat *status,
+ struct nvshm_rpc_datum_out *data,
+ u32 number,
+ u32 *version_min,
+ u32 *version_max)
+{
+ int rc = 0;
+
+ *status = nvshm_rpc_utils_decode_status(response);
+ if (*status == RPC_SUCCESS)
+ rc = nvshm_rpc_utils_decode_args(response, true, data, number);
+ else if ((*status == RPC_PROG_MISMATCH) && version_min && version_max)
+ rc = nvshm_rpc_utils_decode_versions(response, version_min,
+ version_max);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(nvshm_rpc_utils_decode_response);
diff --git a/drivers/staging/nvshm/nvshm_rpc_utils.h b/drivers/staging/nvshm/nvshm_rpc_utils.h
new file mode 100644
index 000000000000..0961edd5906e
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_rpc_utils.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_UTILS_H
+#define __DRIVERS_STAGING_NVSHM_NVSHM_RPC_UTILS_H
+
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/msg_prot.h>
+#include "nvshm_rpc.h"
+
+#define NVSHM_RPC_IN_UINT(data) { TYPE_UINT, 0, .d.uint_data = (data) }
+#define NVSHM_RPC_IN_SINT(data) { TYPE_SINT, 0, .d.sint_data = (data) }
+#define NVSHM_RPC_IN_STRING(data) { TYPE_STRING, 0, .d.string_data = (data) }
+#define NVSHM_RPC_IN_BLOB(data, len) { TYPE_BLOB, (len), .d.blob_data = (data) }
+#define NVSHM_RPC_IN_ARRAY(type, len, data) \
+ { TYPE_ARRAY_FLAG|type, (len), .d.blob_data = (data) }
+
+#define NVSHM_RPC_OUT_UINT(data) { TYPE_UINT, 0, .d.uint_data = (data) }
+#define NVSHM_RPC_OUT_SINT(data) { TYPE_SINT, 0, .d.sint_data = (data) }
+#define NVSHM_RPC_OUT_STRING(data) { TYPE_STRING, 0, .d.string_data = (data) }
+#define NVSHM_RPC_OUT_BLOB(data, len_p) \
+ { TYPE_BLOB, (len_p), .d.blob_data = (data) }
+#define NVSHM_RPC_OUT_ARRAY(type, len_p, data) \
+ { TYPE_ARRAY_FLAG|type, (len_p), .d.blob_data = (const void **)(data) }
+
+/** Known types of data */
+enum nvshm_rpc_datumtype {
+ TYPE_SINT = 0,
+ TYPE_UINT = 1,
+ TYPE_STRING = 2,
+ TYPE_BLOB = 3,
+ TYPE_ARRAY_FLAG = 0x1000,
+};
+
+/**
+ * Type for an input function parameter
+ *
+ * @param type Type of data
+ * @param length Length for blob
+ * @param *data Pointer to the value
+ */
+struct nvshm_rpc_datum_in {
+ enum nvshm_rpc_datumtype type;
+ u32 length;
+ union {
+ s32 sint_data;
+ u32 uint_data;
+ const char *string_data;
+ const char *blob_data;
+ } d;
+};
+
+/**
+ * Type for an output function parameter
+ *
+ * @param type Type of data
+ * @param length Length for blob
+ * @param *data Pointer to the value
+ */
+struct nvshm_rpc_datum_out {
+ enum nvshm_rpc_datumtype type;
+ u32 *length;
+ union {
+ s32 *sint_data;
+ u32 *uint_data;
+ const char **string_data;
+ const void **blob_data;
+ } d;
+};
+
+/**
+ * Type for a procedure
+ *
+ * @param program Program ID
+ * @param version Program version
+ * @param procedure Procedure ID
+ */
+struct nvshm_rpc_procedure {
+ u32 program;
+ u32 version;
+ u32 procedure;
+};
+
+/**
+ * Type for a function
+ *
+ * @param version Version requested by caller
+ * @param request Request message
+ * @param response Response message pointer to fill in
+ * @return Accept status (should be either RPC_SUCCESS or RPC_SYSTEM_ERR)
+ */
+typedef enum rpc_accept_stat (*nvshm_rpc_function_t)(
+ u32 version,
+ struct nvshm_rpc_message *request,
+ struct nvshm_rpc_message **response);
+
+/**
+ * Determines the size of message buffer to allocate given function data
+ *
+ * NOTE: this function accounts for header data too
+ *
+ * @param is_response Whether this is a response, or a request
+ * @param data Function parameters
+ * @param number Number of function parameters
+ * @return Size to allocate on success, negative on error
+ */
+int nvshm_rpc_utils_encode_size(
+ bool is_response,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number);
+
+/**
+ * Populate a request buffer given procedure and data
+ *
+ * @param procedure Unique ID of the procedure
+ * @param data Function parameters
+ * @param number Number of function parameters
+ * @param request Message to populate
+ * @return 0 on success, negative on error
+ */
+int nvshm_rpc_utils_encode_request(
+ const struct nvshm_rpc_procedure *procedure,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number,
+ struct nvshm_rpc_message *request);
+
+/**
+ * Populate a response buffer given procedure and data
+ *
+ * @param status Accept status of the request
+ * @param data Function parameters
+ * @param number Number of function parameters
+ * @param response Message to populate
+ * @return 0 on success, negative on error
+ */
+int nvshm_rpc_utils_encode_response(
+ enum rpc_accept_stat status,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number,
+ struct nvshm_rpc_message *response);
+
+/**
+ * Allocate, populate and send a request buffer given procedure, data and
+ * callback intormation
+ *
+ * @param procedure Unique ID of the procedure
+ * @param data Function parameters
+ * @param number Number of function parameters
+ * @param callback Callback to use to receive ASYNCHRONOUS responses
+ * @param context A user context to pass to the callback, if relevant
+ * @return 0 on success, negative on error
+ */
+int nvshm_rpc_utils_make_request(
+ const struct nvshm_rpc_procedure *procedure,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number,
+ void (*callback)(struct nvshm_rpc_message *message, void *context),
+ void *context);
+
+/**
+ * Allocate and populate a response buffer given request, status and data
+ *
+ * @param request Request message as received
+ * @param status Accept status of the request
+ * @param data Function parameters
+ * @param number Number of function parameters
+ * @return a response to send, or NULL on error
+ */
+struct nvshm_rpc_message *nvshm_rpc_utils_prepare_response(
+ const struct nvshm_rpc_message *request,
+ enum rpc_accept_stat status,
+ const struct nvshm_rpc_datum_in *data,
+ u32 number);
+
+/**
+ * Returns the procedure info from a request message
+ *
+ * NOTE: the return code is garbage if used on a response message
+ *
+ * @param procedure Procedure to fill in
+ * @param request Message to read from
+ */
+void nvshm_rpc_utils_decode_procedure(
+ const struct nvshm_rpc_message *request,
+ struct nvshm_rpc_procedure *procedure);
+
+/**
+ * Returns the error number from a response message
+ *
+ * NOTE: the return code is garbage if used on a request message
+ *
+ * @param response Message to read from
+ * @return Accept status
+ */
+enum rpc_accept_stat nvshm_rpc_utils_decode_status(
+ const struct nvshm_rpc_message *response);
+
+/**
+ * Returns the low and high version numbers supported
+ *
+ * NOTE: the return code is garbage if not used on a RPC_PROG_MISMATCH message
+ *
+ * @param response Message to read from
+ * @param version_min Minimum version supported by service for this program
+ * @param version_max Maximum version supported by service for this program
+ * @return 0 on success, negative on error
+ */
+int nvshm_rpc_utils_decode_versions(
+ const struct nvshm_rpc_message *response,
+ u32 *version_min,
+ u32 *version_max);
+
+/**
+ * Decode a message buffer to fill up function data
+ *
+ * This function takes a message buffer and runs through it to fill up the
+ * function parameters to be returned.
+ *
+ * @param message Message to read from
+ * @param is_response Whether this is a response, or a request
+ * @param data Function parameters to be filled in
+ * @param number Number of possible function parameters
+ * @return 0 on success, negative on error
+ */
+int nvshm_rpc_utils_decode_args(
+ const struct nvshm_rpc_message *message,
+ bool is_response,
+ struct nvshm_rpc_datum_out *data,
+ u32 number);
+
+/**
+ * Decode response and return accept status
+ *
+ * NOTE: the min/max versions supported are only valid if the status is
+ * RPC_PROG_MISMATCH and version_min/version_max are non-NULL pointers.
+ *
+ * @param response Message to read from
+ * @param status Accept status of the request
+ * @param data Function parameters to be filled in
+ * @param number Number of possible function parameters
+ * @param version_min Minimum version supported by service for this program
+ * @param version_max Maximum version supported by service for this program
+ * @return 0 on success, negative on error
+ */
+int nvshm_rpc_utils_decode_response(
+ const struct nvshm_rpc_message *response,
+ enum rpc_accept_stat *status,
+ struct nvshm_rpc_datum_out *data,
+ u32 number,
+ u32 *version_min,
+ u32 *version_max);
+
+#endif /* #ifndef __DRIVERS_STAGING_NVSHM_NVSHM_RPC_UTILS_H */
diff --git a/drivers/staging/nvshm/nvshm_stats.c b/drivers/staging/nvshm/nvshm_stats.c
new file mode 100644
index 000000000000..a858c175e10e
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_stats.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <linux/err.h>
+#include <linux/nvshm_stats.h>
+#include "nvshm_priv.h"
+#include "nvshm_iobuf.h"
+
+#define B2A(h, x) ((void *)(x) + ((int)(h)->mb_base_virt) - NVSHM_IPC_BB_BASE)
+
+struct nvshm_stats_desc {
+ /** The type of data. */
+ char type; /* enums are chars on the BB side*/
+ /** The name of the data field. */
+ char name[64];
+ char pad[3]; /* modem needs pointers aligned */
+ /** A pointer to the decode information if this is a sub
+ * structure entry (type = NVSHM_STATS_SUB). */
+ const struct nvshm_stats_desc *sub;
+ /** The offset of this entry from the start of the current
+ * structure. */
+ unsigned int offset;
+ /** The number of elements. */
+ int size;
+ /** size of each element. 0 if non applicable */
+ int elem_size;
+};
+
+/** The stats data header structure.
+ * This data structure is automatically added to the head of every
+ * top level stats data structure by the STATS_TOP_STRUCTURE_START()
+ * macro. It is used to hold stats system specific data (currently
+ * only the enabled flag). */
+struct data_header {
+ /** Indicates whether this stats data is enabled or not. */
+ unsigned int enabled;
+};
+
+/** Structure used to hold a stats entry in the stats system. */
+struct table_entry {
+ /** Pointer to header within the stats data.
+ * This will be NULL until the stats entry has been installed.
+ * This is assumed to be the start of the stats data and offsets
+ * to stats fields can be applied to this address. */
+ struct data_header *data;
+ /** Total size of stats data pointed to by data. */
+ unsigned int size;
+ /** Pointer to decode entries array. */
+ const struct nvshm_stats_desc *desc;
+
+};
+
+struct nvshm_stats_private {
+ const struct nvshm_handle *handle;
+ void *address;
+ int records_no;
+};
+
+/* the notifier list remains valid for the life of the kernel */
+static RAW_NOTIFIER_HEAD(notifier_list);
+
+/* priv gets reset at modem [re-]boot */
+static struct nvshm_stats_private priv;
+
+void nvshm_stats_init(struct nvshm_handle *handle)
+{
+ priv.handle = handle;
+ priv.address = handle->stats_base_virt;
+ priv.records_no = 0;
+ raw_notifier_call_chain(&notifier_list, NVSHM_STATS_MODEM_UP, NULL);
+}
+
+void nvshm_stats_cleanup(void)
+{
+ raw_notifier_call_chain(&notifier_list, NVSHM_STATS_MODEM_DOWN, NULL);
+}
+
+const u32 *nvshm_stats_top(const char *top_name,
+ struct nvshm_stats_iter *it)
+{
+ const struct table_entry *entry;
+ const u32 *rc = ERR_PTR(-ENOENT);
+ unsigned int i, total_no;
+
+ if (!priv.handle->stats_size)
+ return ERR_PTR(-ENODATA);
+
+ total_no = *(const unsigned int *) priv.address;
+ entry = priv.address + sizeof(unsigned int);
+ for (i = 0; i < total_no; i++, entry++) {
+ const struct nvshm_stats_desc *desc;
+
+ if (!entry->desc || !entry->data)
+ continue;
+
+ desc = B2A(priv.handle, entry->desc);
+ if (!strcmp(desc->name, top_name)) {
+ it->desc = desc;
+ it->data = B2A(priv.handle, entry->data);
+ rc = (const u32 *) it->data;
+ it->data += sizeof(*rc);
+ break;
+ }
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_top);
+
+int nvshm_stats_sub(const struct nvshm_stats_iter *it,
+ int index,
+ struct nvshm_stats_iter *sub_it)
+{
+ if (it->desc->type != NVSHM_STATS_SUB)
+ return -EINVAL;
+
+ if (index >= it->desc->size)
+ return -ERANGE;
+
+ sub_it->desc = B2A(priv.handle, it->desc->sub);
+ sub_it->data = it->data + index * it->desc->elem_size;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_sub);
+
+int nvshm_stats_next(struct nvshm_stats_iter *it)
+{
+ if ((it->desc->type != NVSHM_STATS_START) &&
+ (it->desc->type != NVSHM_STATS_END))
+ it->data += it->desc->size * it->desc->elem_size;
+
+ it->desc++;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_next);
+
+const char *nvshm_stats_name(const struct nvshm_stats_iter *it)
+{
+ return it->desc->name;
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_name);
+
+enum nvshm_stats_type nvshm_stats_type(const struct nvshm_stats_iter *it)
+{
+ return it->desc->type;
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_type);
+
+int nvshm_stats_elems(const struct nvshm_stats_iter *it)
+{
+ return it->desc->size;
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_elems);
+
+static inline int check_type_index(const struct nvshm_stats_iter *it,
+ enum nvshm_stats_type type,
+ int index)
+{
+ if (it->desc->type != type)
+ return -EINVAL;
+
+ if (index >= it->desc->size)
+ return -ERANGE;
+
+ return 0;
+}
+
+u32 *nvshm_stats_valueptr_uint32(const struct nvshm_stats_iter *it,
+ int index)
+{
+ u32 *array;
+ int rc;
+
+ rc = check_type_index(it, NVSHM_STATS_UINT32, index);
+ if (rc)
+ return ERR_PTR(rc);
+
+ array = (u32 *) it->data;
+ return &array[index];
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_valueptr_uint32);
+
+s32 *nvshm_stats_valueptr_sint32(const struct nvshm_stats_iter *it,
+ int index)
+{
+ s32 *array;
+ int rc;
+
+ rc = check_type_index(it, NVSHM_STATS_SINT32, index);
+ if (rc)
+ return ERR_PTR(rc);
+
+ array = (s32 *) it->data;
+ return &array[index];
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_valueptr_sint32);
+
+u64 *nvshm_stats_valueptr_uint64(const struct nvshm_stats_iter *it,
+ int index)
+{
+ u64 *array;
+ int rc;
+
+ rc = check_type_index(it, NVSHM_STATS_UINT64, index);
+ if (rc)
+ return ERR_PTR(rc);
+
+ array = (u64 *) it->data;
+ return &array[index];
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_valueptr_uint64);
+
+void nvshm_stats_register(struct notifier_block *nb)
+{
+ raw_notifier_chain_register(&notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_register);
+
+void nvshm_stats_unregister(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(nvshm_stats_unregister);
diff --git a/drivers/staging/nvshm/nvshm_tty.c b/drivers/staging/nvshm/nvshm_tty.c
new file mode 100644
index 000000000000..b5825f5675f2
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_tty.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2012-2013 NVIDIA Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <linux/skbuff.h>
+
+#include "nvshm_types.h"
+#include "nvshm_if.h"
+#include "nvshm_priv.h"
+#include "nvshm_iobuf.h"
+
+/* NVSHM interface */
+
+#define MAX_OUTPUT_SIZE 1500
+
+#define NVSHM_TTY_UP (1)
+
+/*
+ * This structure hold per tty line information like
+ * nvshm_iobuf queues and back reference to nvshm channel/driver
+ */
+
+struct nvshm_tty_line {
+ int nvshm_chan; /* nvshm channel */
+ int throttled;
+ struct tty_port port;
+ /* iobuf queues for nvshm flow control support */
+ struct nvshm_iobuf *io_queue_head;
+ struct nvshm_iobuf *io_queue_tail;
+ struct nvshm_channel *pchan;
+ int errno;
+ spinlock_t lock;
+};
+
+struct nvshm_tty_device {
+ int up;
+ struct tty_driver *tty_driver;
+ struct nvshm_handle *handle;
+ int nlines;
+ struct workqueue_struct *tty_wq;
+ struct work_struct tty_worker;
+ struct nvshm_tty_line line[NVSHM_MAX_CHANNELS];
+};
+
+static struct nvshm_tty_device tty_dev;
+
+static void nvshm_tty_rx_rewrite_line(int l)
+{
+ struct nvshm_iobuf *list;
+ struct tty_struct *tty = NULL;
+ int len, nbuff = 0;
+
+ tty = tty_port_tty_get(&tty_dev.line[l].port);
+
+ if (!tty)
+ return;
+
+ spin_lock(&tty_dev.line[l].lock);
+
+ while (tty_dev.line[l].io_queue_head) {
+ list = tty_dev.line[l].io_queue_head;
+ spin_unlock(&tty_dev.line[l].lock);
+ len = tty_insert_flip_string(tty,
+ NVSHM_B2A(tty_dev.handle,
+ list->npdu_data)
+ + list->data_offset,
+ list->length);
+ tty_flip_buffer_push(tty);
+ spin_lock(&tty_dev.line[l].lock);
+ if (len < list->length) {
+ list->data_offset += len;
+ list->length -= len;
+ spin_unlock(&tty_dev.line[l].lock);
+ tty_kref_put(tty);
+ return;
+ }
+ if (list->sg_next) {
+ /* Propagate ->next to the sg_next fragment
+ do not forget to move tail also */
+ if (tty_dev.line[l].io_queue_head !=
+ tty_dev.line[l].io_queue_tail) {
+ tty_dev.line[l].io_queue_head =
+ NVSHM_B2A(tty_dev.handle,
+ list->sg_next);
+ tty_dev.line[l].io_queue_head->next =
+ list->next;
+ } else {
+ tty_dev.line[l].io_queue_head =
+ NVSHM_B2A(tty_dev.handle,
+ list->sg_next);
+ tty_dev.line[l].io_queue_tail =
+ tty_dev.line[l].io_queue_head;
+ if (list->next != NULL)
+ pr_debug("%s:tail->next!=NULL\n",
+ __func__);
+ }
+ } else {
+ if (list->next) {
+ if (tty_dev.line[l].io_queue_head !=
+ tty_dev.line[l].io_queue_tail) {
+ tty_dev.line[l].io_queue_head =
+ NVSHM_B2A(tty_dev.handle,
+ list->next);
+ } else {
+ tty_dev.line[l].io_queue_head =
+ NVSHM_B2A(tty_dev.handle,
+ list->next);
+ tty_dev.line[l].io_queue_tail =
+ tty_dev.line[l].io_queue_head;
+ }
+ } else {
+ tty_dev.line[l].io_queue_tail = NULL;
+ tty_dev.line[l].io_queue_head = NULL;
+ }
+ }
+ nbuff++;
+ nvshm_iobuf_free((struct nvshm_iobuf *)list);
+ }
+ if (!tty_dev.line[l].io_queue_head)
+ tty_dev.line[l].throttled = 0;
+ spin_unlock(&tty_dev.line[l].lock);
+ tty_kref_put(tty);
+}
+
+/* Called in a workqueue when unthrottle is called */
+static void nvshm_tty_rx_rewrite(struct work_struct *work)
+{
+ int idx;
+
+ for (idx = 0; idx < tty_dev.nlines; idx++)
+ nvshm_tty_rx_rewrite_line(idx);
+}
+
+/*
+ * nvshm_tty_rx_event()
+ * NVSHM has received data insert them in tty flip buffer.
+ * If no data is available, turn flow control on
+ */
+void nvshm_tty_rx_event(struct nvshm_channel *chan,
+ struct nvshm_iobuf *iob)
+{
+ struct nvshm_tty_line *line = chan->data;
+ struct tty_struct *tty = NULL;
+ struct nvshm_iobuf *_phy_iob, *tmp;
+ int len;
+
+ /* line can be null if TTY install failed or not executed yet */
+ if (line)
+ tty = tty_port_tty_get(&line->port);
+
+ if (!tty) {
+ pr_warn("%s: data received on a closed/non-init TTY\n",
+ __func__);
+ nvshm_iobuf_free_cluster(iob);
+ return;
+ }
+
+ spin_lock(&line->lock);
+
+ if (!line->throttled) {
+ _phy_iob = iob;
+ while (_phy_iob) {
+ spin_unlock(&line->lock);
+ len = tty_insert_flip_string(tty,
+ NVSHM_B2A(tty_dev.handle,
+ iob->npdu_data)
+ + iob->data_offset,
+ iob->length);
+ tty_flip_buffer_push(tty);
+ spin_lock(&line->lock);
+ if (len < iob->length) {
+ line->throttled = 1;
+ iob->data_offset += len;
+ iob->length -= len;
+ goto queue;
+ }
+
+ tmp = iob;
+ /* Go next element: if ->sg_next follow it
+ and propagate ->next otherwise go next */
+ if (iob->sg_next) {
+ struct nvshm_iobuf *leaf;
+ _phy_iob = iob->sg_next;
+ if (_phy_iob) {
+ leaf = NVSHM_B2A(tty_dev.handle,
+ _phy_iob);
+ leaf->next = iob->next;
+ }
+ } else {
+ _phy_iob = iob->next;
+ }
+ iob = NVSHM_B2A(tty_dev.handle, _phy_iob);
+ nvshm_iobuf_free(tmp);
+ }
+ spin_unlock(&line->lock);
+ tty_kref_put(tty);
+ return;
+ }
+queue:
+ /* Queue into FIFO */
+ if (line->io_queue_tail) {
+ line->io_queue_tail->next =
+ NVSHM_A2B(tty_dev.handle, iob);
+ } else {
+ if (line->io_queue_head) {
+ line->io_queue_head->next =
+ NVSHM_A2B(tty_dev.handle, iob);
+ } else {
+ line->io_queue_head = iob;
+ }
+ }
+ line->io_queue_tail = iob;
+ spin_unlock(&line->lock);
+ queue_work(tty_dev.tty_wq, &tty_dev.tty_worker);
+ tty_kref_put(tty);
+ return;
+}
+
+void nvshm_tty_error_event(struct nvshm_channel *chan,
+ enum nvshm_error_id error)
+{
+ struct nvshm_tty_line *line = chan->data;
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(&line->port);
+ pr_debug("%s\n", __func__);
+ tty_dev.line[tty->index].errno = error;
+ tty_hangup(tty);
+ tty_kref_put(tty);
+}
+
+void nvshm_tty_start_tx(struct nvshm_channel *chan)
+{
+ struct nvshm_tty_line *line = chan->data;
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(&line->port);
+
+ pr_debug("%s\n", __func__);
+ tty_unthrottle(tty);
+ tty_kref_put(tty);
+}
+
+static struct nvshm_if_operations nvshm_tty_ops = {
+ .rx_event = nvshm_tty_rx_event,
+ .error_event = nvshm_tty_error_event,
+ .start_tx = nvshm_tty_start_tx
+};
+
+/* TTY interface */
+
+static int nvshm_tty_open(struct tty_struct *tty, struct file *f)
+{
+ struct nvshm_tty_line *line = tty->driver_data;
+ if (line)
+ return tty_port_open(&line->port, tty, f);
+ return -EIO;
+}
+
+static void nvshm_tty_close(struct tty_struct *tty, struct file *f)
+{
+ struct nvshm_tty_line *line = tty->driver_data;
+ if (line)
+ tty_port_close(&line->port, tty, f);
+}
+
+static void nvshm_tty_hangup(struct tty_struct *tty)
+{
+ struct nvshm_tty_line *line = tty->driver_data;
+ if (line)
+ tty_port_hangup(&line->port);
+}
+
+
+static int nvshm_tty_write_room(struct tty_struct *tty)
+{
+ return MAX_OUTPUT_SIZE;
+}
+
+static int nvshm_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int len)
+{
+ struct nvshm_iobuf *iob, *leaf = NULL, *list = NULL;
+ int to_send = 0, remain, idx = tty->index, ret;
+
+ if (!tty_dev.up)
+ return -EIO;
+
+ remain = len;
+ while (remain) {
+ to_send = remain < MAX_OUTPUT_SIZE ? remain : MAX_OUTPUT_SIZE;
+ iob = nvshm_iobuf_alloc(tty_dev.line[idx].pchan, to_send);
+ if (!iob) {
+ if (tty_dev.line[idx].errno) {
+ pr_err("%s iobuf alloc failed\n", __func__);
+ if (list)
+ nvshm_iobuf_free_cluster(list);
+ return -ENOMEM;
+ } else {
+ pr_err("%s: Xoff condition\n", __func__);
+ return 0;
+ }
+ }
+
+ iob->length = to_send;
+ iob->chan = tty_dev.line[idx].pchan->index;
+ remain -= to_send;
+ memcpy(NVSHM_B2A(tty_dev.handle,
+ iob->npdu_data +
+ iob->data_offset),
+ buf,
+ to_send);
+ buf += to_send;
+
+ if (!list) {
+ leaf = list = iob;
+ } else {
+ leaf->sg_next = NVSHM_A2B(tty_dev.handle, iob);
+ leaf = iob;
+ }
+ }
+ ret = nvshm_write(tty_dev.line[idx].pchan, list);
+
+ if (ret == 1)
+ tty_throttle(tty);
+
+ return len;
+}
+
+static void nvshm_tty_unthrottle(struct tty_struct *tty)
+{
+ int idx = tty->index;
+
+ pr_debug("%s\n", __func__);
+
+ if (!tty_dev.up)
+ return;
+
+ spin_lock(&tty_dev.line[idx].lock);
+ if (tty_dev.line[idx].throttled) {
+ spin_unlock(&tty_dev.line[idx].lock);
+ queue_work(tty_dev.tty_wq, &tty_dev.tty_worker);
+ return;
+ }
+ spin_unlock(&tty_dev.line[idx].lock);
+}
+
+static void nvshm_tty_dtr_rts(struct tty_port *tport, int onoff)
+{
+ pr_debug("%s\n", __func__);
+}
+
+static int nvshm_tty_carrier_raised(struct tty_port *tport)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static int nvshm_tty_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+ int idx = tty->index;
+
+ /* Set TTY flags */
+ set_bit(TTY_IO_ERROR, &tty->flags);
+ set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ tty->low_latency = 1;
+
+ pr_debug("%s line %d\n", __func__, idx);
+ tty_dev.line[idx].throttled = 0;
+ tty_dev.line[idx].pchan =
+ nvshm_open_channel(tty_dev.line[idx].nvshm_chan,
+ &nvshm_tty_ops,
+ &tty_dev.line[idx]);
+ if (!tty_dev.line[idx].pchan) {
+ pr_err("%s fail to open SHM chan\n", __func__);
+ return -EIO;
+ }
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ return 0;
+}
+
+static void nvshm_tty_shutdown(struct tty_port *tport)
+{
+ struct nvshm_tty_line *line =
+ container_of(tport, struct nvshm_tty_line, port);
+
+ if (line) {
+ pr_debug("%s\n", __func__);
+ nvshm_close_channel(line->pchan);
+ line->pchan = NULL;
+ }
+}
+
+static int nvshm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ int idx = tty->index;
+ struct nvshm_tty_line *line = &tty_dev.line[idx];
+ int ret = tty_standard_install(driver, tty);
+
+ pr_debug("%s\n", __func__);
+ if (ret == 0)
+ tty->driver_data = line;
+ return ret;
+}
+
+static const struct tty_operations nvshm_tty_ttyops = {
+ .open = nvshm_tty_open,
+ .close = nvshm_tty_close,
+ .hangup = nvshm_tty_hangup,
+ .write = nvshm_tty_write,
+ .write_room = nvshm_tty_write_room,
+ .unthrottle = nvshm_tty_unthrottle,
+ .install = nvshm_tty_install,
+};
+
+static const struct tty_port_operations nvshm_tty_port_ops = {
+ .dtr_rts = nvshm_tty_dtr_rts,
+ .carrier_raised = nvshm_tty_carrier_raised,
+ .shutdown = nvshm_tty_shutdown,
+ .activate = nvshm_tty_activate,
+};
+
+int nvshm_tty_init(struct nvshm_handle *handle)
+{
+ int ret, chan;
+
+ pr_debug("%s\n", __func__);
+
+ memset(&tty_dev, 0, sizeof(tty_dev));
+
+ tty_dev.tty_wq = create_singlethread_workqueue("NVSHM_tty");
+ INIT_WORK(&tty_dev.tty_worker, nvshm_tty_rx_rewrite);
+
+ tty_dev.tty_driver = alloc_tty_driver(NVSHM_MAX_CHANNELS);
+
+ if (tty_dev.tty_driver == NULL)
+ return -ENOMEM;
+
+ tty_dev.tty_driver->owner = THIS_MODULE;
+ tty_dev.tty_driver->driver_name = "nvshm_tty";
+ tty_dev.tty_driver->name = "ttySHM";
+ tty_dev.tty_driver->major = 0;
+ tty_dev.tty_driver->minor_start = 0;
+ tty_dev.tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_dev.tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ tty_dev.tty_driver->init_termios = tty_std_termios;
+ tty_dev.tty_driver->init_termios.c_iflag = 0;
+ tty_dev.tty_driver->init_termios.c_oflag = 0;
+ tty_dev.tty_driver->init_termios.c_cflag =
+ B115200 | CS8 | CREAD | CLOCAL;
+ tty_dev.tty_driver->init_termios.c_lflag = 0;
+ tty_dev.tty_driver->flags =
+ TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV;
+
+ tty_set_operations(tty_dev.tty_driver, &nvshm_tty_ttyops);
+
+ ret = tty_register_driver(tty_dev.tty_driver);
+
+ tty_dev.handle = handle;
+
+ for (chan = 0; chan < NVSHM_MAX_CHANNELS; chan++) {
+ if ((handle->chan[chan].map.type == NVSHM_CHAN_TTY)
+ || (handle->chan[chan].map.type == NVSHM_CHAN_LOG)) {
+ tty_dev.line[tty_dev.nlines].nvshm_chan = chan;
+ tty_dev.nlines++;
+ }
+ }
+
+ for (chan = 0; chan < tty_dev.nlines; chan++) {
+ pr_debug("%s: register tty#%d cha %d\n",
+ __func__, chan, tty_dev.line[chan].nvshm_chan);
+ spin_lock_init(&tty_dev.line[tty_dev.nlines].lock);
+ tty_port_init(&tty_dev.line[chan].port);
+ tty_dev.line[chan].port.ops = &nvshm_tty_port_ops;
+ tty_port_register_device(&tty_dev.line[chan].port,
+ tty_dev.tty_driver, chan, 0);
+ }
+
+ tty_dev.up = NVSHM_TTY_UP;
+ return 0;
+}
+
+void nvshm_tty_cleanup(void)
+{
+ int chan;
+
+ pr_debug("%s\n", __func__);
+ tty_dev.up = 0;
+ for (chan = 0; chan < tty_dev.nlines; chan++) {
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(&tty_dev.line[chan].port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+ /* No need to cleanup data as iobufs are invalid now */
+ /* Next nvshm_tty_init will do it */
+ pr_debug("%s unregister tty device %d\n", __func__, chan);
+ tty_unregister_device(tty_dev.tty_driver, chan);
+ }
+ destroy_workqueue(tty_dev.tty_wq);
+ tty_unregister_driver(tty_dev.tty_driver);
+ put_tty_driver(tty_dev.tty_driver);
+}
+
diff --git a/drivers/staging/nvshm/nvshm_types.h b/drivers/staging/nvshm/nvshm_types.h
new file mode 100644
index 000000000000..821e2c6e214a
--- /dev/null
+++ b/drivers/staging/nvshm/nvshm_types.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _NVSHM_TYPES_H
+#define _NVSHM_TYPES_H
+
+#include <linux/workqueue.h>
+#include <linux/platform_data/nvshm.h> /* NVSHM_SERIAL_BYTE_SIZE */
+
+/* NVSHM common types */
+
+/* Shared memory fixed offsets */
+
+#define NVSHM_IPC_BASE (0x0) /* IPC mailbox base offset */
+#define NVSHM_IPC_MAILBOX (0x0) /* IPC mailbox offset */
+#define NVSHM_IPC_RETCODE (0x4) /* IPC mailbox return code offset */
+#define NVSHM_IPC_SIZE (4096) /* IPC mailbox region size */
+#define NVSHM_CONFIG_OFFSET (8) /* shared memory config offset */
+#define NVSHM_MAX_CHANNELS (12) /* Maximum number of channels */
+#define NVSHM_CHAN_NAME_SIZE (27) /* max channel name size in chars */
+
+/* Versions: */
+#define NVSHM_MAJOR(v) (v >> 16)
+#define NVSHM_MINOR(v) (v & 0xFFFF)
+/** Version 1.3 is supported. Keep until modem image has reached v2.x in main */
+#define NVSHM_CONFIG_VERSION_1_3 (0x00010003)
+/** Version with statistics export support, otherwise compatible with v1.3 */
+#define NVSHM_CONFIG_VERSION (0x00020001)
+
+
+#define NVSHM_AP_POOL_ID (128) /* IOPOOL ID - use 128-255 for AP */
+
+#define NVSHM_RATE_LIMIT_TTY (128)
+#define NVSHM_RATE_LIMIT_LOG (256)
+#define NVSHM_RATE_LIMIT_NET (512)
+#define NVSHM_RATE_LIMIT_RPC (128)
+#define NVSHM_RATE_LIMIT_TRESHOLD (16)
+
+/* NVSHM_IPC mailbox messages ids */
+enum nvshm_ipc_mailbox {
+ /* Boot status */
+ NVSHM_IPC_BOOT_COLD_BOOT_IND = 0x01,
+ NVSHM_IPC_BOOT_FW_REQ,
+ NVSHM_IPC_BOOT_RESTART_FW_REQ,
+ NVSHM_IPC_BOOT_FW_CONF,
+ NVSHM_IPC_READY,
+
+ /* Boot errors */
+ NVSHM_IPC_BOOT_ERROR_BT2_HDR = 0x1000,
+ NVSHM_IPC_BOOT_ERROR_BT2_SIGN,
+ NVSHM_IPC_BOOT_ERROR_HWID,
+ NVSHM_IPC_BOOT_ERROR_APP_HDR,
+ NVSHM_IPC_BOOT_ERROR_APP_SIGN,
+ NVSHM_IPC_BOOT_ERROR_UNLOCK_HEADER,
+ NVSHM_IPC_BOOT_ERROR_UNLOCK_SIGN,
+ NVSHM_IPC_BOOT_ERROR_UNLOCK_PCID,
+
+ NVSHM_IPC_MAX_MSG = 0xFFFF
+};
+
+/* NVSHM Config */
+
+/* Channel type */
+enum nvshm_chan_type {
+ NVSHM_CHAN_UNMAP = 0,
+ NVSHM_CHAN_TTY,
+ NVSHM_CHAN_LOG,
+ NVSHM_CHAN_NET,
+ NVSHM_CHAN_RPC,
+};
+
+/* Channel mapping structure */
+struct nvshm_chan_map {
+ /* tty/net/log */
+ int type;
+ /* Name of device - reflected in sysfs */
+ char name[NVSHM_CHAN_NAME_SIZE+1];
+};
+
+/*
+ * This structure is set by BB after boot to give AP its current shmem mapping
+ * BB initialize all descriptor content and give initial empty element
+ * for each queue
+ * BB enqueue free AP descriptor element into AP queue
+ * AP initialize its queues pointer with empty descriptors offset
+ * and retreive its decriptors
+ * from ap queue.
+ */
+struct nvshm_config {
+ int version;
+ int shmem_size;
+ int region_ap_desc_offset;
+ int region_ap_desc_size;
+ int region_bb_desc_offset;
+ int region_bb_desc_size;
+ int region_ap_data_offset;
+ int region_ap_data_size;
+ int region_bb_data_offset;
+ int region_bb_data_size;
+ int queue_ap_offset;
+ int queue_bb_offset;
+ struct nvshm_chan_map chan_map[NVSHM_MAX_CHANNELS];
+ char serial[NVSHM_SERIAL_BYTE_SIZE];
+ int region_dxp1_stats_offset;
+ int region_dxp1_stats_size;
+ int guard;
+};
+
+/*
+ * This structure holds data fragments reference
+ * WARNING: ALL POINTERS ARE IN BASEBAND MAPPING
+ * NO POINTER SHOULD BE USED WITHOUT PROPER MACRO
+ * see nvshm_iobuf.h for reference
+ */
+struct nvshm_iobuf {
+ /* Standard iobuf part - This part is fixed and cannot be changed */
+ unsigned char *npdu_data;
+ unsigned short length;
+ unsigned short data_offset;
+ unsigned short total_length;
+ unsigned char ref;
+ unsigned char pool_id;
+ struct nvshm_iobuf *next;
+ struct nvshm_iobuf *sg_next;
+ unsigned short flags;
+ unsigned short _size;
+ void *_handle;
+ unsigned int _reserved;
+
+ /* Extended iobuf - This part is not yet fixed (under spec/review) */
+ struct nvshm_iobuf *qnext;
+ int chan;
+ int qflags;
+ int _reserved1;
+ int _reserved2;
+ int _reserved3;
+ int _reserved4;
+ int _reserved5;
+};
+
+/* channel structure */
+struct nvshm_channel {
+ int index;
+ struct nvshm_chan_map map;
+ struct nvshm_if_operations *ops;
+ void *data;
+ int rate_counter;
+ int xoff;
+ struct work_struct start_tx_work;
+};
+
+
+#endif /* _NVSHM_TYPES_H */
diff --git a/drivers/staging/ozwpan/Kbuild b/drivers/staging/ozwpan/Kbuild
index 6cc84cb3f0a6..08bc7938692d 100644
--- a/drivers/staging/ozwpan/Kbuild
+++ b/drivers/staging/ozwpan/Kbuild
@@ -14,6 +14,6 @@ ozwpan-y := \
ozcdev.o \
ozurbparanoia.o \
oztrace.o \
- ozevent.o
+ ozkobject.o
diff --git a/drivers/staging/ozwpan/Makefile b/drivers/staging/ozwpan/Makefile
new file mode 100644
index 000000000000..d2fdf8a1b5cf
--- /dev/null
+++ b/drivers/staging/ozwpan/Makefile
@@ -0,0 +1,25 @@
+# -----------------------------------------------------------------------------
+# Copyright .o) 2011 Ozmo I.o
+# Released under the GNU General Publ.o L.oense Version 2 (GPLv2).
+# -----------------------------------------------------------------------------
+ifneq ($(KERNELRELEASE),)
+# If invoked from kbuild.
+obj-m := ozwpan.o
+ozwpan-y := ozusbsvc.o ozusbsvc1.o ozurbparanoia.o oztrace.o ozproto.o ozpd.o ozmain.o ozkobject.o ozhcd.o ozeltbuf.o ozcdev.o
+
+else
+# If invoked dir.otly.
+KDIR := /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+
+default:
+ $(MAKE) -C $(KDIR) M=$(PWD) modules EXTRA_CFLAGS="-I$(PWD)"
+
+debug: OZ_CFLAGS=-DWANT_TRACE_DATA_FLOW
+debug:
+ $(MAKE) -C $(KDIR) M=$(PWD) modules EXTRA_CFLAGS="$(OZ_CFLAGS) -I$(PWD)"
+less_debug:
+ $(MAKE) -C $(KDIR) M=$(PWD) modules EXTRA_CFLAGS="-DDEBUG -I$(PWD)"
+
+endif
+
diff --git a/drivers/staging/ozwpan/README b/drivers/staging/ozwpan/README
index 7c055ec99544..bb1a69b94541 100644
--- a/drivers/staging/ozwpan/README
+++ b/drivers/staging/ozwpan/README
@@ -9,7 +9,7 @@ technology.
To operate the driver must be bound to a suitable network interface. This can
be done when the module is loaded (specifying the name of the network interface
-as a parameter - e.g. 'insmod ozwpan g_net_dev=go0') or can be bound after
+as a paramter - e.g. 'insmod ozwpan g_net_dev=go0') or can be bound after
loading using an ioctl call. See the ozappif.h file and the ioctls
OZ_IOCTL_ADD_BINDING and OZ_IOCTL_REMOVE_BINDING.
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
index b4febd79a68d..c2d30a7112f3 100644
--- a/drivers/staging/ozwpan/TODO
+++ b/drivers/staging/ozwpan/TODO
@@ -1,14 +1,14 @@
TODO:
- - Convert event tracing code to in-kernel tracing infrastructure
- - Check for remaining ioctl & check if that can be converted into
- sysfs entries
- - Convert debug prints to appropriate dev_debug or something better
- - Modify Kconfig to add CONFIG option for enabling/disabling event
- tracing.
+ - review user mode interface and determine if ioctls can be replaced
+ with something better. correctly export data structures to user mode
+ if ioctls are still required and allocate ioctl numbers from
+ ioctl-number.txt.
- check USB HCD implementation is complete and correct.
+ - remove any debug and trace code.
- code review by USB developer community.
- testing with as many devices as possible.
Please send any patches for this driver to
-Rupesh Gujare <rupesh.gujare@atmel.com>
+Rupesh Gujare <rgujare@ozmodevices.com>
+Chris Kelly <ckelly@ozmodevices.com>
and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ozwpan/ozappif.h b/drivers/staging/ozwpan/ozappif.h
index 449a6ba82337..ea1b271fdcda 100644
--- a/drivers/staging/ozwpan/ozappif.h
+++ b/drivers/staging/ozwpan/ozappif.h
@@ -6,8 +6,6 @@
#ifndef _OZAPPIF_H
#define _OZAPPIF_H
-#include "ozeventdef.h"
-
#define OZ_IOCTL_MAGIC 0xf4
struct oz_mac_addr {
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 224ccff75d4f..7e4559bfe377 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -11,18 +11,20 @@
#include <linux/etherdevice.h>
#include <linux/poll.h>
#include <linux/sched.h>
-#include "ozconfig.h"
#include "ozprotocol.h"
#include "oztrace.h"
#include "ozappif.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
-#include "ozevent.h"
#include "ozcdev.h"
+#include "ozkobject.h"
/*------------------------------------------------------------------------------
*/
#define OZ_RD_BUF_SZ 256
+#define OZ_MODE_TFTP 0x1
+#define OZ_MODE_SERIAL 0x0
+
struct oz_cdev {
dev_t devnum;
struct cdev cdev;
@@ -30,6 +32,8 @@ struct oz_cdev {
spinlock_t lock;
u8 active_addr[ETH_ALEN];
struct oz_pd *active_pd;
+ atomic_t ref_count;
+ u8 mode;
};
/* Per PD context for the serial service stored in the PD. */
@@ -37,14 +41,34 @@ struct oz_serial_ctx {
atomic_t ref_count;
u8 tx_seq_num;
u8 rx_seq_num;
+ u8 tx_done_seq_num;
+ u8 padding;
u8 rd_buf[OZ_RD_BUF_SZ];
int rd_in;
int rd_out;
+ spinlock_t rd_lock;
+ int dg_len[OZ_RD_BUF_SZ/4];
+ int dg_in;
+ int dg_out;
};
/*------------------------------------------------------------------------------
*/
static struct oz_cdev g_cdev;
-static struct class *g_oz_class;
+struct class *g_oz_class;
+struct device *g_oz_wpan_dev;
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+
+static void oz_cdev_elt_completion_callback(struct oz_pd *pd, long context)
+{
+ struct oz_serial_ctx *ctx;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+ ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
+ if (ctx)
+ ctx->tx_done_seq_num = (u8)context;
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
+}
/*------------------------------------------------------------------------------
* Context: process and softirq
*/
@@ -63,7 +87,7 @@ static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
*/
static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
{
- if (atomic_dec_and_test(&ctx->ref_count)) {
+ if (ctx && atomic_dec_and_test(&ctx->ref_count)) {
oz_trace("Dealloc serial context.\n");
kfree(ctx);
}
@@ -71,125 +95,221 @@ static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
/*------------------------------------------------------------------------------
* Context: process
*/
-static int oz_cdev_open(struct inode *inode, struct file *filp)
+int oz_cdev_open(struct inode *inode, struct file *filp)
{
struct oz_cdev *dev;
- oz_trace("oz_cdev_open()\n");
- oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode));
+
+ if (!atomic_add_unless(&g_cdev.ref_count, 1, 1)) {
+ oz_trace_msg(O, "OPEN %08X EBUSY\n",
+ (unsigned int)((uintptr_t)filp));
+ return -EBUSY;
+ }
+
dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
filp->private_data = dev;
+
+ oz_trace_msg(O, "OPEN %08X OK\n", (unsigned int)((uintptr_t)filp));
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
-static int oz_cdev_release(struct inode *inode, struct file *filp)
+int oz_cdev_release(struct inode *inode, struct file *filp)
{
- oz_trace("oz_cdev_release()\n");
+ atomic_dec(&g_cdev.ref_count);
+ oz_trace_msg(O, "CLOSE %08X\n", (unsigned int)((uintptr_t)filp));
return 0;
}
/*------------------------------------------------------------------------------
* Context: process
*/
-static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
+ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
loff_t *fpos)
{
int n;
int ix;
-
+ int is_tftp;
struct oz_pd *pd;
struct oz_serial_ctx *ctx;
+ oz_trace_msg(O, "READ I %X %04X\n",
+ (unsigned int)((uintptr_t)filp), (int)count);
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
+ is_tftp = (g_cdev.mode & OZ_MODE_TFTP) ? 1 : 0;
spin_unlock_bh(&g_cdev.lock);
- if (pd == NULL)
+ if (pd == NULL) {
+ oz_trace_msg(O, "READ O %X %04X\n",
+ (unsigned int)((uintptr_t)filp), (unsigned int)(-1));
return -1;
+ }
ctx = oz_cdev_claim_ctx(pd);
if (ctx == NULL)
goto out2;
- n = ctx->rd_in - ctx->rd_out;
- if (n < 0)
- n += OZ_RD_BUF_SZ;
- if (count > n)
+
+ spin_lock_bh(&ctx->rd_lock);
+
+ if (is_tftp) {
+ /* if n is non zero we have a datagram */
+ n = ctx->dg_len[ctx->dg_out];
+
+ if (n == 0) {
+ count = 0;
+ spin_unlock_bh(&ctx->rd_lock);
+ goto out1;
+ }
+
+ ix = ctx->rd_out;
+ spin_unlock_bh(&ctx->rd_lock);
+
+ /* copy n bytes of datagram to user bufer */
+ if ((ix + n) < OZ_RD_BUF_SZ) {
+ if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
+ count = 0;
+ goto out1;
+ }
+ spin_lock_bh(&ctx->rd_lock);
+ ctx->rd_out += n;
+ } else {
+ int b = (OZ_RD_BUF_SZ - ix);
+ /* datagram maybe split in between the end and start of
+ * the buffer */
+ if (copy_to_user(buf, &ctx->rd_buf[ix], b)) {
+ count = 0;
+ goto out1;
+ }
+ if (copy_to_user(&buf[b], ctx->rd_buf, n - b)) {
+ count = 0;
+ goto out1;
+ }
+ spin_lock_bh(&ctx->rd_lock);
+ ctx->rd_out = n - (OZ_RD_BUF_SZ - ix);
+ }
+
count = n;
- ix = ctx->rd_out;
- n = OZ_RD_BUF_SZ - ix;
- if (n > count)
- n = count;
- if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
- count = 0;
- goto out1;
- }
- ix += n;
- if (ix == OZ_RD_BUF_SZ)
- ix = 0;
- if (n < count) {
- if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
+
+ ctx->dg_len[ctx->dg_out] = 0;
+ ctx->dg_out++;
+ if ((OZ_RD_BUF_SZ/4) == ctx->dg_out)
+ ctx->dg_out = 0;
+ spin_unlock_bh(&ctx->rd_lock);
+ } else {
+ n = ctx->rd_in - ctx->rd_out;
+ if (n < 0)
+ n += OZ_RD_BUF_SZ;
+ if (count > n)
+ count = n;
+
+ ix = ctx->rd_out;
+ spin_unlock_bh(&ctx->rd_lock);
+ n = OZ_RD_BUF_SZ - ix;
+ if (n > count)
+ n = count;
+
+ if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
count = 0;
goto out1;
}
- ix = count-n;
+
+ ix += n;
+ if (ix == OZ_RD_BUF_SZ)
+ ix = 0;
+ if (n < count) {
+ if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
+ count = 0;
+ goto out1;
+ }
+ ix = count-n;
+ }
+
+ spin_lock_bh(&ctx->rd_lock);
+ ctx->rd_out = ix;
+ spin_unlock_bh(&ctx->rd_lock);
}
- ctx->rd_out = ix;
out1:
oz_cdev_release_ctx(ctx);
out2:
oz_pd_put(pd);
+ oz_trace_msg(O, "READ O %08X %04X\n",
+ (unsigned int)((uintptr_t)filp), (unsigned int)(count));
return count;
}
/*------------------------------------------------------------------------------
* Context: process
*/
-static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *fpos)
+ssize_t oz_cdev_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *fpos)
{
struct oz_pd *pd;
struct oz_elt_buf *eb;
struct oz_elt_info *ei;
struct oz_elt *elt;
+ struct oz_ext_elt *ext_elt;
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
- if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
- return -EINVAL;
+ oz_trace_msg(O, "WRITE I %08X %04X\n",
+ (unsigned int)((uintptr_t)filp), (int)count);
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
- if (pd == NULL)
+ if (pd == NULL) {
+ oz_trace_msg(O, "WRITE O %08X %04X\n",
+ (unsigned int)((uintptr_t)filp), (unsigned int)count);
return -1;
+ }
+ if (!(pd->state & OZ_PD_S_CONNECTED))
+ return -ENXIO;
eb = &pd->elt_buff;
ei = oz_elt_info_alloc(eb);
if (ei == NULL) {
count = 0;
goto out;
}
- elt = (struct oz_elt *)ei->data;
- app_hdr = (struct oz_app_hdr *)(elt+1);
- elt->length = sizeof(struct oz_app_hdr) + count;
- elt->type = OZ_ELT_APP_DATA;
- ei->app_id = OZ_APPID_SERIAL;
- ei->length = elt->length + sizeof(struct oz_elt);
- app_hdr->app_id = OZ_APPID_SERIAL;
+ spin_lock_bh(&g_cdev.lock);
+ if (g_cdev.mode & OZ_MODE_TFTP) {
+ spin_unlock_bh(&g_cdev.lock);
+ ei->app_id = OZ_APPID_SERIAL;
+ ei->flags |= OZ_EI_F_EXT_ELM;
+ ext_elt = (struct oz_ext_elt *)ei->data;
+ app_hdr = (struct oz_app_hdr *)(ext_elt+1);
+ app_hdr->app_id = OZ_APPID_TFTP;
+ ext_elt->length = sizeof(struct oz_app_hdr) + count;
+ ext_elt->type = OZ_ELT_APP_DATA_EX;
+ ei->length = ext_elt->length + sizeof(struct oz_ext_elt);
+ ext_elt->length = cpu_to_le16(ext_elt->length);
+ } else {
+ spin_unlock_bh(&g_cdev.lock);
+ ei->app_id = OZ_APPID_SERIAL;
+ elt = (struct oz_elt *)ei->data;
+ app_hdr = (struct oz_app_hdr *)(elt+1);
+ app_hdr->app_id = OZ_APPID_SERIAL;
+ elt->length = sizeof(struct oz_app_hdr) + count;
+ elt->type = OZ_ELT_APP_DATA;
+ ei->length = elt->length + sizeof(struct oz_elt);
+ }
if (copy_from_user(app_hdr+1, buf, count))
goto out;
- spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx) {
app_hdr->elt_seq_num = ctx->tx_seq_num++;
if (ctx->tx_seq_num == 0)
ctx->tx_seq_num = 1;
+ ei->callback = oz_cdev_elt_completion_callback;
+ ei->context = ctx->tx_seq_num;
spin_lock(&eb->lock);
if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
ei = NULL;
spin_unlock(&eb->lock);
}
- spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
out:
if (ei) {
count = 0;
@@ -198,23 +318,46 @@ out:
spin_unlock_bh(&eb->lock);
}
oz_pd_put(pd);
+ oz_trace_msg(O, "WRITE O %08X %04X\n",
+ (unsigned int)((uintptr_t)filp), (unsigned int)count);
return count;
}
/*------------------------------------------------------------------------------
* Context: process
*/
-static int oz_set_active_pd(const u8 *addr)
+int oz_set_active_pd(const u8 *addr)
{
int rc = 0;
struct oz_pd *pd;
struct oz_pd *old_pd;
+ struct oz_serial_ctx *ctx;
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
+ if (memcmp(g_cdev.active_addr, addr, ETH_ALEN) == 0) {
+ spin_unlock_bh(&g_cdev.lock);
+ oz_pd_put(pd);
+ return rc;
+ }
memcpy(g_cdev.active_addr, addr, ETH_ALEN);
old_pd = g_cdev.active_pd;
g_cdev.active_pd = pd;
+ oz_trace_msg(O, "Active PD:%08x\n",
+ (unsigned int)((uintptr_t)pd));
spin_unlock_bh(&g_cdev.lock);
+
+ /*Reset buffer pointers if new device is selected*/
+ ctx = oz_cdev_claim_ctx(pd);
+ if (ctx != NULL) {
+ spin_lock_bh(&ctx->rd_lock);
+ ctx->dg_in = 0;
+ ctx->dg_out = 0;
+ ctx->dg_len[0] = 0;
+ ctx->rd_out = 0;
+ ctx->rd_in = 0;
+ spin_unlock_bh(&ctx->rd_lock);
+ }
+
if (old_pd)
oz_pd_put(old_pd);
} else {
@@ -224,6 +367,7 @@ static int oz_set_active_pd(const u8 *addr)
g_cdev.active_pd = NULL;
memset(g_cdev.active_addr, 0,
sizeof(g_cdev.active_addr));
+ oz_trace_msg(O, "Active PD:00000000\n");
spin_unlock_bh(&g_cdev.lock);
if (pd)
oz_pd_put(pd);
@@ -236,8 +380,53 @@ static int oz_set_active_pd(const u8 *addr)
/*------------------------------------------------------------------------------
* Context: process
*/
-static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+void oz_get_active_pd(u8 *addr)
+{
+ spin_lock_bh(&g_cdev.lock);
+ memcpy(addr, g_cdev.active_addr, ETH_ALEN);
+ spin_unlock_bh(&g_cdev.lock);
+
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+u8 oz_get_serial_mode(void)
+{
+ u8 serial_mode;
+
+ spin_lock_bh(&g_cdev.lock);
+ serial_mode = g_cdev.mode;
+ spin_unlock_bh(&g_cdev.lock);
+ return serial_mode;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+void oz_set_serial_mode(u8 mode)
+{
+ u8 addr[ETH_ALEN];
+ struct oz_pd *pd;
+ struct oz_serial_ctx *ctx;
+
+ oz_get_active_pd(addr);
+ pd = oz_pd_find(addr);
+ if (!pd)
+ return;
+ ctx = oz_cdev_claim_ctx(pd);
+ if (!ctx) {
+ oz_pd_put(pd);
+ return;
+ }
+ spin_lock_bh(&g_cdev.lock);
+ g_cdev.mode = mode;
+ spin_unlock_bh(&g_cdev.lock);
+ oz_cdev_release_ctx(ctx);
+ oz_pd_put(pd);
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
+long oz_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int rc = 0;
if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
@@ -255,7 +444,6 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
switch (cmd) {
case OZ_IOCTL_GET_PD_LIST: {
struct oz_pd_list list;
- oz_trace("OZ_IOCTL_GET_PD_LIST\n");
memset(&list, 0, sizeof(list));
list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
if (copy_to_user((void __user *)arg, &list,
@@ -265,7 +453,6 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
break;
case OZ_IOCTL_SET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
- oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n");
if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
return -EFAULT;
rc = oz_set_active_pd(addr);
@@ -273,7 +460,6 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
break;
case OZ_IOCTL_GET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
- oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
memcpy(addr, g_cdev.active_addr, ETH_ALEN);
spin_unlock_bh(&g_cdev.lock);
@@ -302,28 +488,40 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
/*------------------------------------------------------------------------------
* Context: process
*/
-static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
+unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
{
unsigned int ret = 0;
struct oz_cdev *dev = filp->private_data;
- oz_trace("Poll called wait = %p\n", wait);
+
+ oz_trace_msg(O, "POLL I %08X\n", (unsigned int)((uintptr_t)filp));
+
spin_lock_bh(&dev->lock);
if (dev->active_pd) {
struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
+ spin_unlock_bh(&dev->lock);
if (ctx) {
+ spin_lock_bh(&ctx->rd_lock);
if (ctx->rd_in != ctx->rd_out)
ret |= POLLIN | POLLRDNORM;
+
+ if (ctx->tx_seq_num == ctx->tx_done_seq_num)
+ ret |= POLLOUT;
+ spin_unlock_bh(&ctx->rd_lock);
oz_cdev_release_ctx(ctx);
}
- }
- spin_unlock_bh(&dev->lock);
+ } else
+ spin_unlock_bh(&dev->lock);
+
if (wait)
poll_wait(filp, &dev->rdq, wait);
+
+ oz_trace_msg(O, "POLL O %08X %08X\n",
+ (unsigned int)((uintptr_t)filp), ret);
return ret;
}
/*------------------------------------------------------------------------------
*/
-static const struct file_operations oz_fops = {
+const struct file_operations oz_fops = {
.owner = THIS_MODULE,
.open = oz_cdev_open,
.release = oz_cdev_release,
@@ -338,13 +536,10 @@ static const struct file_operations oz_fops = {
int oz_cdev_register(void)
{
int err;
- struct device *dev;
memset(&g_cdev, 0, sizeof(g_cdev));
err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
if (err < 0)
goto out3;
- oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum),
- MINOR(g_cdev.devnum));
cdev_init(&g_cdev.cdev, &oz_fops);
g_cdev.cdev.owner = THIS_MODULE;
g_cdev.cdev.ops = &oz_fops;
@@ -358,13 +553,17 @@ int oz_cdev_register(void)
g_oz_class = class_create(THIS_MODULE, "ozmo_wpan");
if (IS_ERR(g_oz_class)) {
oz_trace("Failed to register ozmo_wpan class\n");
+ err = PTR_ERR(g_oz_class);
goto out1;
}
- dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan");
- if (IS_ERR(dev)) {
+ g_oz_wpan_dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL,
+ "ozwpan");
+ if (IS_ERR(g_oz_wpan_dev)) {
oz_trace("Failed to create sysfs entry for cdev\n");
+ err = PTR_ERR(g_oz_wpan_dev);
goto out1;
}
+ oz_create_sys_entry();
return 0;
out1:
cdev_del(&g_cdev.cdev);
@@ -381,6 +580,7 @@ int oz_cdev_deregister(void)
cdev_del(&g_cdev.cdev);
unregister_chrdev_region(g_cdev.devnum, 1);
if (g_oz_class) {
+ oz_destroy_sys_entry();
device_destroy(g_oz_class, g_cdev.devnum);
class_destroy(g_oz_class);
}
@@ -391,7 +591,6 @@ int oz_cdev_deregister(void)
*/
int oz_cdev_init(void)
{
- oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, NULL, 0);
oz_app_enable(OZ_APPID_SERIAL, 1);
return 0;
}
@@ -400,7 +599,6 @@ int oz_cdev_init(void)
*/
void oz_cdev_term(void)
{
- oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, NULL, 0);
oz_app_enable(OZ_APPID_SERIAL, 0);
}
/*------------------------------------------------------------------------------
@@ -410,7 +608,6 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
{
struct oz_serial_ctx *ctx;
struct oz_serial_ctx *old_ctx;
- oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, NULL, resume);
if (resume) {
oz_trace("Serial service resumed.\n");
return 0;
@@ -420,6 +617,7 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
return -ENOMEM;
atomic_set(&ctx->ref_count, 1);
ctx->tx_seq_num = 1;
+ ctx->tx_done_seq_num = 1;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1];
if (old_ctx) {
@@ -434,7 +632,6 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
(memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
- oz_trace("Active PD arrived.\n");
}
spin_unlock(&g_cdev.lock);
oz_trace("Serial service started.\n");
@@ -446,7 +643,6 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
void oz_cdev_stop(struct oz_pd *pd, int pause)
{
struct oz_serial_ctx *ctx;
- oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, NULL, pause);
if (pause) {
oz_trace("Serial service paused.\n");
return;
@@ -465,7 +661,6 @@ void oz_cdev_stop(struct oz_pd *pd, int pause)
spin_unlock(&g_cdev.lock);
if (pd) {
oz_pd_put(pd);
- oz_trace("Active PD departed.\n");
}
oz_trace("Serial service stopped.\n");
}
@@ -481,6 +676,11 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
int space;
int copy_sz;
int ix;
+ int is_tftp;
+
+ spin_lock_bh(&g_cdev.lock);
+ is_tftp = (g_cdev.mode & OZ_MODE_TFTP) ? 1 : 0;
+ spin_unlock_bh(&g_cdev.lock);
ctx = oz_cdev_claim_ctx(pd);
if (ctx == NULL) {
@@ -488,7 +688,7 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
return;
}
- app_hdr = (struct oz_app_hdr *)(elt+1);
+ app_hdr = (struct oz_app_hdr *)(oz_elt_data(elt));
/* If sequence number is non-zero then check it is not a duplicate.
*/
if (app_hdr->elt_seq_num != 0) {
@@ -500,10 +700,11 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
}
}
ctx->rx_seq_num = app_hdr->elt_seq_num;
- len = elt->length - sizeof(struct oz_app_hdr);
- data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr);
+ len = oz_elt_data_len(elt) - sizeof(struct oz_app_hdr);
+ data = ((u8 *)(app_hdr + 1));
if (len <= 0)
goto out;
+ spin_lock_bh(&ctx->rd_lock);
space = ctx->rd_out - ctx->rd_in - 1;
if (space < 0)
space += OZ_RD_BUF_SZ;
@@ -511,6 +712,19 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
oz_trace("Not enough space:%d %d\n", len, space);
len = space;
}
+
+
+ if (is_tftp) {
+ if (len != 0) {
+ /* remember length of datagram */
+ ctx->dg_len[ctx->dg_in] = len;
+
+ ctx->dg_in++;
+ if ((OZ_RD_BUF_SZ/4) == ctx->dg_in)
+ ctx->dg_in = 0;
+ }
+ }
+
ix = ctx->rd_in;
copy_sz = OZ_RD_BUF_SZ - ix;
if (copy_sz > len)
@@ -525,7 +739,14 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
ix = len;
}
ctx->rd_in = ix;
+ spin_unlock_bh(&ctx->rd_lock);
wake_up(&g_cdev.rdq);
out:
oz_cdev_release_ctx(ctx);
}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_cdev_heartbeat(struct oz_pd *pd)
+{
+}
diff --git a/drivers/staging/ozwpan/ozcdev.h b/drivers/staging/ozwpan/ozcdev.h
index dd11935a093f..d0dc3dc5d5f1 100644
--- a/drivers/staging/ozwpan/ozcdev.h
+++ b/drivers/staging/ozwpan/ozcdev.h
@@ -6,6 +6,7 @@
#ifndef _OZCDEV_H
#define _OZCDEV_H
+extern struct device *g_oz_wpan_dev;
int oz_cdev_register(void);
int oz_cdev_deregister(void);
int oz_cdev_init(void);
@@ -13,5 +14,8 @@ void oz_cdev_term(void);
int oz_cdev_start(struct oz_pd *pd, int resume);
void oz_cdev_stop(struct oz_pd *pd, int pause);
void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt);
+void oz_cdev_heartbeat(struct oz_pd *pd);
+int oz_set_active_pd(const u8 *addr);
+void oz_get_active_pd(u8 *addr);
#endif /* _OZCDEV_H */
diff --git a/drivers/staging/ozwpan/ozconfig.h b/drivers/staging/ozwpan/ozconfig.h
deleted file mode 100644
index 43e6373a009c..000000000000
--- a/drivers/staging/ozwpan/ozconfig.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * ---------------------------------------------------------------------------*/
-#ifndef _OZCONFIG_H
-#define _OZCONFIG_H
-
-/* #define WANT_TRACE */
-#ifdef WANT_TRACE
-#define WANT_VERBOSE_TRACE
-#endif /* #ifdef WANT_TRACE */
-/* #define WANT_URB_PARANOIA */
-
-/* #define WANT_PRE_2_6_39 */
-#define WANT_EVENT_TRACE
-
-/* These defines determine what verbose trace is displayed. */
-#ifdef WANT_VERBOSE_TRACE
-/* #define WANT_TRACE_STREAM */
-/* #define WANT_TRACE_URB */
-/* #define WANT_TRACE_CTRL_DETAIL */
-#define WANT_TRACE_HUB
-/* #define WANT_TRACE_RX_FRAMES */
-/* #define WANT_TRACE_TX_FRAMES */
-#endif /* WANT_VERBOSE_TRACE */
-
-#endif /* _OZCONFIG_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
index ac90fc7f5441..a52fecef85fb 100644
--- a/drivers/staging/ozwpan/ozeltbuf.c
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -6,7 +6,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
@@ -132,8 +131,6 @@ int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
{
struct oz_elt_stream *st;
- oz_trace("oz_elt_stream_create(0x%x)\n", id);
-
st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
if (st == NULL)
return -ENOMEM;
@@ -152,7 +149,7 @@ int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
{
struct list_head *e;
struct oz_elt_stream *st = NULL;
- oz_trace("oz_elt_stream_delete(0x%x)\n", id);
+
spin_lock_bh(&buf->lock);
e = buf->stream_list.next;
while (e != &buf->stream_list) {
@@ -175,9 +172,6 @@ int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
list_del_init(&ei->link);
list_del_init(&ei->link_order);
st->buf_count -= ei->length;
- oz_trace2(OZ_TRACE_STREAM, "Stream down: %d %d %d\n",
- st->buf_count,
- ei->length, atomic_read(&st->ref_count));
oz_elt_stream_put(st);
oz_elt_info_free(buf, ei);
}
@@ -242,8 +236,6 @@ int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
st->buf_count += ei->length;
/* Add to list in stream. */
list_add_tail(&ei->link, &st->elt_list);
- oz_trace2(OZ_TRACE_STREAM, "Stream up: %d %d\n",
- st->buf_count, ei->length);
/* Check if we have too much buffered for this stream. If so
* start dropping elements until we are back in bounds.
*/
@@ -283,8 +275,12 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
ei = container_of(e, struct oz_elt_info, link_order);
e = e->next;
if ((*len + ei->length) <= max_len) {
- app_hdr = (struct oz_app_hdr *)
- &ei->data[sizeof(struct oz_elt)];
+ if (ei->flags & OZ_EI_F_EXT_ELM)
+ app_hdr = (struct oz_app_hdr *)
+ &ei->data[sizeof(struct oz_ext_elt)];
+ else
+ app_hdr = (struct oz_app_hdr *)
+ &ei->data[sizeof(struct oz_elt)];
app_hdr->elt_seq_num = buf->tx_seq_num[ei->app_id]++;
if (buf->tx_seq_num[ei->app_id] == 0)
buf->tx_seq_num[ei->app_id] = 1;
@@ -293,9 +289,6 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
list_del(&ei->link_order);
if (ei->stream) {
ei->stream->buf_count -= ei->length;
- oz_trace2(OZ_TRACE_STREAM,
- "Stream down: %d %d\n",
- ei->stream->buf_count, ei->length);
oz_elt_stream_put(ei->stream);
ei->stream = NULL;
}
diff --git a/drivers/staging/ozwpan/ozeltbuf.h b/drivers/staging/ozwpan/ozeltbuf.h
index 03c12f57b9bb..2c184be4adab 100644
--- a/drivers/staging/ozwpan/ozeltbuf.h
+++ b/drivers/staging/ozwpan/ozeltbuf.h
@@ -23,7 +23,7 @@ struct oz_elt_stream {
u8 id;
};
-#define OZ_MAX_ELT_PAYLOAD 255
+#define OZ_MAX_ELT_PAYLOAD 1024
struct oz_elt_info {
struct list_head link;
struct list_head link_order;
@@ -32,12 +32,13 @@ struct oz_elt_info {
oz_elt_callback_t callback;
long context;
struct oz_elt_stream *stream;
- u8 data[sizeof(struct oz_elt) + OZ_MAX_ELT_PAYLOAD];
+ u8 data[sizeof(struct oz_ext_elt) + OZ_MAX_ELT_PAYLOAD];
int length;
unsigned magic;
};
/* Flags values */
#define OZ_EI_F_MARKED 0x1
+#define OZ_EI_F_EXT_ELM 0x2
struct oz_elt_buf {
spinlock_t lock;
diff --git a/drivers/staging/ozwpan/ozevent.c b/drivers/staging/ozwpan/ozevent.c
deleted file mode 100644
index 77e86753610d..000000000000
--- a/drivers/staging/ozwpan/ozevent.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include "ozconfig.h"
-#ifdef WANT_EVENT_TRACE
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-#include "oztrace.h"
-#include "ozevent.h"
-#include "ozappif.h"
-/*------------------------------------------------------------------------------
- * Although the event mask is logically part of the oz_evtdev structure, it is
- * needed outside of this file so define it separately to avoid the need to
- * export definition of struct oz_evtdev.
- */
-u32 g_evt_mask;
-/*------------------------------------------------------------------------------
- */
-#define OZ_MAX_EVTS 2048 /* Must be power of 2 */
-struct oz_evtdev {
- struct dentry *root_dir;
- int evt_in;
- int evt_out;
- int missed_events;
- int present;
- atomic_t users;
- spinlock_t lock;
- struct oz_event evts[OZ_MAX_EVTS];
-};
-
-static struct oz_evtdev g_evtdev;
-
-/*------------------------------------------------------------------------------
- * Context: process
- */
-void oz_event_init(void)
-{
- /* Because g_evtdev is static external all fields initially zero so no
- * need to reinitialized those.
- */
- oz_trace("Event tracing initialized\n");
- spin_lock_init(&g_evtdev.lock);
- atomic_set(&g_evtdev.users, 0);
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-void oz_event_term(void)
-{
- oz_trace("Event tracing terminated\n");
-}
-/*------------------------------------------------------------------------------
- * Context: any
- */
-void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4)
-{
- unsigned long irqstate;
- int ix;
- spin_lock_irqsave(&g_evtdev.lock, irqstate);
- ix = (g_evtdev.evt_in + 1) & (OZ_MAX_EVTS - 1);
- if (ix != g_evtdev.evt_out) {
- struct oz_event *e = &g_evtdev.evts[g_evtdev.evt_in];
- e->jiffies = jiffies;
- e->evt = evt;
- e->ctx1 = ctx1;
- e->ctx2 = ctx2;
- e->ctx3 = (__u32)(unsigned long)ctx3;
- e->ctx4 = ctx4;
- g_evtdev.evt_in = ix;
- } else {
- g_evtdev.missed_events++;
- }
- spin_unlock_irqrestore(&g_evtdev.lock, irqstate);
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-#ifdef CONFIG_DEBUG_FS
-static void oz_events_clear(struct oz_evtdev *dev)
-{
- unsigned long irqstate;
- oz_trace("Clearing events\n");
- spin_lock_irqsave(&dev->lock, irqstate);
- dev->evt_in = dev->evt_out = 0;
- dev->missed_events = 0;
- spin_unlock_irqrestore(&dev->lock, irqstate);
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-static int oz_events_open(struct inode *inode, struct file *filp)
-{
- oz_trace("oz_evt_open()\n");
- oz_trace("Open flags: 0x%x\n", filp->f_flags);
- if (atomic_add_return(1, &g_evtdev.users) == 1) {
- oz_events_clear(&g_evtdev);
- return nonseekable_open(inode, filp);
- } else {
- atomic_dec(&g_evtdev.users);
- return -EBUSY;
- }
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-static int oz_events_release(struct inode *inode, struct file *filp)
-{
- oz_events_clear(&g_evtdev);
- atomic_dec(&g_evtdev.users);
- g_evt_mask = 0;
- oz_trace("oz_evt_release()\n");
- return 0;
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-static ssize_t oz_events_read(struct file *filp, char __user *buf, size_t count,
- loff_t *fpos)
-{
- struct oz_evtdev *dev = &g_evtdev;
- int rc = 0;
- int nb_evts = count / sizeof(struct oz_event);
- int n;
- int sz;
-
- n = dev->evt_in - dev->evt_out;
- if (n < 0)
- n += OZ_MAX_EVTS;
- if (nb_evts > n)
- nb_evts = n;
- if (nb_evts == 0)
- goto out;
- n = OZ_MAX_EVTS - dev->evt_out;
- if (n > nb_evts)
- n = nb_evts;
- sz = n * sizeof(struct oz_event);
- if (copy_to_user(buf, &dev->evts[dev->evt_out], sz)) {
- rc = -EFAULT;
- goto out;
- }
- if (n == nb_evts)
- goto out2;
- n = nb_evts - n;
- if (copy_to_user(buf + sz, dev->evts, n * sizeof(struct oz_event))) {
- rc = -EFAULT;
- goto out;
- }
-out2:
- dev->evt_out = (dev->evt_out + nb_evts) & (OZ_MAX_EVTS - 1);
- rc = nb_evts * sizeof(struct oz_event);
-out:
- return rc;
-}
-/*------------------------------------------------------------------------------
- */
-static const struct file_operations oz_events_fops = {
- .owner = THIS_MODULE,
- .open = oz_events_open,
- .release = oz_events_release,
- .read = oz_events_read,
-};
-/*------------------------------------------------------------------------------
- * Context: process
- */
-void oz_debugfs_init(void)
-{
- struct dentry *parent;
-
- parent = debugfs_create_dir("ozwpan", NULL);
- if (parent == NULL) {
- oz_trace("Failed to create debugfs directory ozmo\n");
- return;
- } else {
- g_evtdev.root_dir = parent;
- if (debugfs_create_file("events", S_IRUSR, parent, NULL,
- &oz_events_fops) == NULL)
- oz_trace("Failed to create file ozmo/events\n");
- if (debugfs_create_x32("event_mask", S_IRUSR | S_IWUSR, parent,
- &g_evt_mask) == NULL)
- oz_trace("Failed to create file ozmo/event_mask\n");
- }
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-void oz_debugfs_remove(void)
-{
- debugfs_remove_recursive(g_evtdev.root_dir);
-}
-#endif /* CONFIG_DEBUG_FS */
-#endif /* WANT_EVENT_TRACE */
diff --git a/drivers/staging/ozwpan/ozevent.h b/drivers/staging/ozwpan/ozevent.h
deleted file mode 100644
index 32f6f9859c41..000000000000
--- a/drivers/staging/ozwpan/ozevent.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZEVENT_H
-#define _OZEVENT_H
-#include "ozconfig.h"
-#include "ozeventdef.h"
-
-#ifdef WANT_EVENT_TRACE
-extern u32 g_evt_mask;
-void oz_event_init(void);
-void oz_event_term(void);
-void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4);
-void oz_debugfs_init(void);
-void oz_debugfs_remove(void);
-#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4) \
- do { \
- if ((1<<(__evt)) & g_evt_mask) \
- oz_event_log2(__evt, __ctx1, __ctx2, __ctx3, __ctx4); \
- } while (0)
-
-#else
-#define oz_event_init()
-#define oz_event_term()
-#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4)
-#define oz_debugfs_init()
-#define oz_debugfs_remove()
-#endif /* WANT_EVENT_TRACE */
-
-#endif /* _OZEVENT_H */
diff --git a/drivers/staging/ozwpan/ozeventdef.h b/drivers/staging/ozwpan/ozeventdef.h
deleted file mode 100644
index 4b938981671a..000000000000
--- a/drivers/staging/ozwpan/ozeventdef.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZEVENTDEF_H
-#define _OZEVENTDEF_H
-
-#define OZ_EVT_RX_FRAME 0
-#define OZ_EVT_RX_PROCESS 1
-#define OZ_EVT_TX_FRAME 2
-#define OZ_EVT_TX_ISOC 3
-#define OZ_EVT_URB_SUBMIT 4
-#define OZ_EVT_URB_DONE 5
-#define OZ_EVT_URB_CANCEL 6
-#define OZ_EVT_CTRL_REQ 7
-#define OZ_EVT_CTRL_CNF 8
-#define OZ_EVT_CTRL_LOCAL 9
-#define OZ_EVT_CONNECT_REQ 10
-#define OZ_EVT_CONNECT_RSP 11
-#define OZ_EVT_EP_CREDIT 12
-#define OZ_EVT_EP_BUFFERING 13
-#define OZ_EVT_TX_ISOC_DONE 14
-#define OZ_EVT_TX_ISOC_DROP 15
-#define OZ_EVT_TIMER_CTRL 16
-#define OZ_EVT_TIMER 17
-#define OZ_EVT_PD_STATE 18
-#define OZ_EVT_SERVICE 19
-#define OZ_EVT_DEBUG 20
-
-struct oz_event {
- __u32 jiffies;
- __u8 evt;
- __u8 ctx1;
- __u16 ctx2;
- __u32 ctx3;
- __u32 ctx4;
-};
-
-#endif /* _OZEVENTDEF_H */
diff --git a/drivers/staging/ozwpan/ozeventtrace.h b/drivers/staging/ozwpan/ozeventtrace.h
new file mode 100644
index 000000000000..def0878b3fc1
--- /dev/null
+++ b/drivers/staging/ozwpan/ozeventtrace.h
@@ -0,0 +1,219 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ozwpan
+
+#if !defined(_OZEVENTTRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define _OZEVENTTRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/usb.h>
+
+#define MAX_URB_LEN 16
+#define MAX_FRAME_LEN 32
+#define MAX_MSG_LEN 128
+
+TRACE_EVENT(urb_in,
+
+ TP_PROTO(struct urb *oz_urb),
+
+ TP_ARGS(oz_urb),
+
+ TP_STRUCT__entry(
+ __field(uintptr_t, urb)
+ __field(u32, endpoint)
+ __field(u32, buffer_length)
+ __field(u32, inc_length)
+ __array(u8, buffer, MAX_URB_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->urb = (uintptr_t)oz_urb;
+ __entry->endpoint = usb_pipeendpoint(oz_urb->pipe);
+ if (usb_pipein(oz_urb->pipe))
+ __entry->endpoint |= 0x80;
+ __entry->buffer_length = oz_urb->transfer_buffer_length;
+ __entry->inc_length = oz_urb->transfer_buffer_length
+ <= MAX_URB_LEN ? oz_urb->transfer_buffer_length : MAX_URB_LEN;
+ if ((__entry->endpoint == 0x00) ||
+ (__entry->endpoint == 0x80)) {
+ __entry->buffer_length = 8;
+ __entry->inc_length = 8;
+ memcpy(__entry->buffer, oz_urb->setup_packet, 8);
+ } else {
+ memcpy(__entry->buffer, oz_urb->transfer_buffer,
+ __entry->inc_length);
+ }
+ ),
+
+ TP_printk("%08x,%02x,%03x,%03x,"
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
+ (u32)__entry->urb, __entry->endpoint, __entry->buffer_length,
+ __entry->inc_length, __entry->buffer[0], __entry->buffer[1],
+ __entry->buffer[2], __entry->buffer[3], __entry->buffer[4],
+ __entry->buffer[5], __entry->buffer[6], __entry->buffer[7],
+ __entry->buffer[8], __entry->buffer[9], __entry->buffer[10],
+ __entry->buffer[11], __entry->buffer[12], __entry->buffer[13],
+ __entry->buffer[14], __entry->buffer[15])
+);
+
+TRACE_EVENT(urb_out,
+
+ TP_PROTO(struct urb *oz_urb, int status),
+
+ TP_ARGS(oz_urb, status),
+
+ TP_STRUCT__entry(
+ __field(uintptr_t, urb)
+ __field(u32, endpoint)
+ __field(u32, status)
+ __field(u32, actual_length)
+ __field(u32, inc_length)
+ __array(u8, buffer, MAX_URB_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->urb = (uintptr_t)oz_urb;
+ __entry->endpoint = usb_pipeendpoint(oz_urb->pipe);
+ __entry->status = status;
+ if (usb_pipein(oz_urb->pipe))
+ __entry->endpoint |= 0x80;
+ __entry->actual_length = oz_urb->actual_length;
+ __entry->inc_length = oz_urb->actual_length
+ <= MAX_URB_LEN ? oz_urb->actual_length : MAX_URB_LEN;
+ if (usb_pipein(oz_urb->pipe))
+ memcpy(__entry->buffer, oz_urb->transfer_buffer,
+ __entry->inc_length);
+ ),
+
+ TP_printk("%08x,%08x,%02x,%03x,%03x,"
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
+ (u32)__entry->urb, __entry->status, __entry->endpoint,
+ __entry->actual_length, __entry->inc_length, __entry->buffer[0],
+ __entry->buffer[1], __entry->buffer[2], __entry->buffer[3],
+ __entry->buffer[4], __entry->buffer[5], __entry->buffer[6],
+ __entry->buffer[7], __entry->buffer[8], __entry->buffer[9],
+ __entry->buffer[10], __entry->buffer[11], __entry->buffer[12],
+ __entry->buffer[13], __entry->buffer[14], __entry->buffer[15])
+);
+
+TRACE_EVENT(rx_frame,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(u32, inc_len)
+ __field(u32, orig_len)
+ __array(u8, data, MAX_FRAME_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->orig_len = skb->len;
+ __entry->inc_len = skb->len < MAX_FRAME_LEN ?
+ skb->len : MAX_FRAME_LEN;
+ memcpy(__entry->data, (u8 *)skb_network_header(skb),
+ __entry->inc_len);
+ ),
+
+ TP_printk("%03x,%03x,%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x", __entry->orig_len,
+ __entry->inc_len, __entry->data[0], __entry->data[1],
+ __entry->data[2], __entry->data[3], __entry->data[4],
+ __entry->data[5], __entry->data[6], __entry->data[7],
+ __entry->data[8], __entry->data[9], __entry->data[10],
+ __entry->data[11], __entry->data[12], __entry->data[13],
+ __entry->data[14], __entry->data[15], __entry->data[16],
+ __entry->data[17], __entry->data[18], __entry->data[19],
+ __entry->data[20], __entry->data[21], __entry->data[22],
+ __entry->data[23], __entry->data[24], __entry->data[25],
+ __entry->data[26], __entry->data[27], __entry->data[28],
+ __entry->data[29], __entry->data[30], __entry->data[31])
+);
+
+TRACE_EVENT(tx_frame,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(u32, inc_len)
+ __field(u32, orig_len)
+ __array(u8, data, MAX_FRAME_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->orig_len = skb->len - 14;
+ __entry->inc_len = __entry->orig_len
+ < MAX_FRAME_LEN ?
+ __entry->orig_len
+ : MAX_FRAME_LEN;
+ memcpy(__entry->data, (u8 *)skb_network_header(skb),
+ __entry->inc_len);
+ ),
+
+ TP_printk("%03x,%03x,%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x", __entry->orig_len,
+ __entry->inc_len, __entry->data[0], __entry->data[1],
+ __entry->data[2], __entry->data[3], __entry->data[4],
+ __entry->data[5], __entry->data[6], __entry->data[7],
+ __entry->data[8], __entry->data[9], __entry->data[10],
+ __entry->data[11], __entry->data[12], __entry->data[13],
+ __entry->data[14], __entry->data[15], __entry->data[16],
+ __entry->data[17], __entry->data[18], __entry->data[19],
+ __entry->data[20], __entry->data[21], __entry->data[22],
+ __entry->data[23], __entry->data[24], __entry->data[25],
+ __entry->data[26], __entry->data[27], __entry->data[28],
+ __entry->data[29], __entry->data[30], __entry->data[31])
+);
+
+DECLARE_EVENT_CLASS(debug_msg,
+
+ TP_PROTO(char *fmt, va_list arg),
+
+ TP_ARGS(fmt, arg),
+
+ TP_STRUCT__entry(
+ __array(char, msg, MAX_MSG_LEN)
+ ),
+
+ TP_fast_assign(
+ snprintf(__entry->msg, MAX_MSG_LEN, fmt, arg);
+ ),
+
+ TP_printk("%s", __entry->msg)
+);
+
+DEFINE_EVENT(debug_msg, hcd_msg_evt,
+ TP_PROTO(char *fmt, va_list arg),
+ TP_ARGS(fmt, arg)
+);
+
+DEFINE_EVENT(debug_msg, isoc_msg_evt,
+ TP_PROTO(char *fmt, va_list arg),
+ TP_ARGS(fmt, arg)
+);
+
+DEFINE_EVENT(debug_msg, info_msg_evt,
+ TP_PROTO(char *fmt, va_list arg),
+ TP_ARGS(fmt, arg)
+);
+
+
+#endif /*_OZEVENTTRACE_H*/
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE ozeventtrace
+#include <trace/define_trace.h>
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index 8ac26f584fd4..42c8a0880740 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -26,22 +26,17 @@
*/
#include <linux/platform_device.h>
#include <linux/usb.h>
-#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "linux/usb/hcd.h"
#include <asm/unaligned.h>
-#include "ozconfig.h"
#include "ozusbif.h"
#include "oztrace.h"
#include "ozurbparanoia.h"
-#include "ozevent.h"
#include "ozhcd.h"
-/*------------------------------------------------------------------------------
- * Number of units of buffering to capture for an isochronous IN endpoint before
- * allowing data to be indicated up.
- */
-#define OZ_IN_BUFFERING_UNITS 50
+
+#define OZ_HUB_DEBOUNCE_TIMEOUT 1500
+
/* Name of our platform device.
*/
#define OZ_PLAT_DEV_NAME "ozwpan"
@@ -51,6 +46,9 @@
/* Get endpoint object from the containing link.
*/
#define ep_from_link(__e) container_of((__e), struct oz_endpoint, link)
+/*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec)
+ */
+#define EP0_TIMEOUT_COUNTER 13
/*------------------------------------------------------------------------------
* Used to link urbs together and also store some status information for each
* urb.
@@ -62,7 +60,7 @@ struct oz_urb_link {
struct oz_port *port;
u8 req_id;
u8 ep_num;
- unsigned long submit_jiffies;
+ unsigned submit_counter;
};
/* Holds state information about a USB endpoint.
@@ -71,7 +69,8 @@ struct oz_endpoint {
struct list_head urb_list; /* List of oz_urb_link items. */
struct list_head link; /* For isoc ep, links in to isoc
lists of oz_port. */
- unsigned long last_jiffies;
+ struct timespec timestamp;
+ int credit2;
int credit;
int credit_ceiling;
u8 ep_num;
@@ -81,6 +80,7 @@ struct oz_endpoint {
int in_ix;
int out_ix;
int buffered_units;
+ u8 max_buffer_units;
unsigned flags;
int start_frame;
};
@@ -88,6 +88,13 @@ struct oz_endpoint {
#define OZ_F_EP_BUFFERING 0x1
#define OZ_F_EP_HAVE_STREAM 0x2
+
+/* Buffer_size.
+Total size of buffer (in bytes) for the endpoint buffer for isochronous data,
+and for stashing BULK or INT data if a URB is not available.
+*/
+#define OZ_EP_BUFFER_SIZE_ISOC (1024*24)
+#define OZ_EP_BUFFER_SIZE_INT (512)
/* Holds state information about a USB interface.
*/
struct oz_interface {
@@ -189,6 +196,7 @@ static DEFINE_SPINLOCK(g_tasklet_lock);
static struct tasklet_struct g_urb_process_tasklet;
static struct tasklet_struct g_urb_cancel_tasklet;
static atomic_t g_pending_urbs = ATOMIC_INIT(0);
+static atomic_t g_usb_frame_number = ATOMIC_INIT(0);
static const struct hc_driver g_oz_hc_drv = {
.description = g_hcd_name,
.product_desc = "Ozmo Devices WPAN",
@@ -237,9 +245,14 @@ static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
{
int i;
- for (i = 0; i < OZ_NB_PORTS; i++) {
- if (ozhcd->ports[i].bus_addr == bus_addr)
- return i;
+
+ if (0 != bus_addr) {
+ for (i = 0; i < OZ_NB_PORTS; i++) {
+ if (ozhcd->ports[i].bus_addr == bus_addr)
+ return i;
+ }
+
+ return -1;
}
return ozhcd->conn_port;
}
@@ -279,7 +292,8 @@ static void oz_free_urb_link(struct oz_urb_link *urbl)
g_link_pool_size++;
}
spin_unlock_irqrestore(&g_link_lock, irq_state);
- kfree(urbl);
+ if (urbl)
+ kfree(urbl);
}
}
/*------------------------------------------------------------------------------
@@ -327,7 +341,7 @@ static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
* disabled.
* Context: softirq or process
*/
-static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
+struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb)
{
struct oz_urb_link *urbl;
struct list_head *e;
@@ -346,12 +360,27 @@ static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb
* Context: softirq or process
*/
static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
- int status, unsigned long submit_jiffies)
+ int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned long irq_state;
struct oz_urb_link *cancel_urbl = NULL;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
+
+ if (usb_pipeisoc(urb->pipe)) {
+ if (status < 0) {
+ int i;
+ urb->transfer_buffer_length = 0;
+ for (i = 0; i < urb->number_of_packets; i++) {
+ urb->iso_frame_desc[i].actual_length = 0;
+ urb->iso_frame_desc[i].status = status;
+ }
+ } else {
+ /* ISOC checks transfer_buffer_length */
+ urb->transfer_buffer_length = urb->actual_length;
+ }
+ }
+ oz_trace_urb_out(urb, status);
usb_hcd_unlink_urb_from_ep(hcd, urb);
/* Clear hcpriv which will prevent it being put in the cancel list
* in the event that an attempt is made to cancel it.
@@ -374,14 +403,7 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
if (oz_forget_urb(urb)) {
oz_trace("OZWPAN: ERROR Unknown URB %p\n", urb);
} else {
- static unsigned long last_time;
atomic_dec(&g_pending_urbs);
- oz_trace2(OZ_TRACE_URB,
- "%lu: giveback_urb(%p,%x) %lu %lu pending:%d\n",
- jiffies, urb, status, jiffies-submit_jiffies,
- jiffies-last_time, atomic_read(&g_pending_urbs));
- last_time = jiffies;
- oz_event_log(OZ_EVT_URB_DONE, 0, 0, urb, status);
usb_hcd_giveback_urb(hcd, urb, status);
}
spin_lock(&g_tasklet_lock);
@@ -396,7 +418,6 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
*/
static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
{
- oz_trace("oz_ep_free()\n");
if (port) {
struct list_head list;
struct oz_hcd *ozhcd = port->ozhcd;
@@ -411,19 +432,17 @@ static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
list_splice_tail(&list, &ozhcd->orphanage);
spin_unlock_bh(&ozhcd->hcd_lock);
}
- oz_trace("Freeing endpoint memory\n");
kfree(ep);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
-static void oz_complete_buffered_urb(struct oz_port *port,
- struct oz_endpoint *ep,
+void oz_complete_buffered_urb(struct oz_port *port, struct oz_endpoint *ep,
struct urb *urb)
{
- u8 data_len, available_space, copy_len;
+ int data_len, available_space, copy_len;
- memcpy(&data_len, &ep->buffer[ep->out_ix], sizeof(u8));
+ data_len = ep->buffer[ep->out_ix];
if (data_len <= urb->transfer_buffer_length)
available_space = data_len;
else
@@ -448,9 +467,7 @@ static void oz_complete_buffered_urb(struct oz_port *port,
ep->out_ix = 0;
ep->buffered_units--;
- oz_trace("Trying to give back buffered frame of size=%d\n",
- available_space);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/*------------------------------------------------------------------------------
@@ -469,7 +486,7 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
urbl = oz_alloc_urb_link();
if (!urbl)
return -ENOMEM;
- urbl->submit_jiffies = jiffies;
+ urbl->submit_counter = 0;
urbl->urb = urb;
urbl->req_id = req_id;
urbl->ep_num = ep_addr;
@@ -483,14 +500,25 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
if (urb->unlinked) {
spin_unlock_bh(&port->ozhcd->hcd_lock);
oz_trace("urb %p unlinked so complete immediately\n", urb);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
oz_free_urb_link(urbl);
return 0;
}
- if (in_dir)
- ep = port->in_ep[ep_addr];
- else
- ep = port->out_ep[ep_addr];
+ if (in_dir) {
+ if (port->in_ep[ep_addr]) {
+ ep = port->in_ep[ep_addr];
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (port->out_ep[ep_addr]) {
+ ep = port->out_ep[ep_addr];
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+ }
/*For interrupt endpoint check for buffered data
* & complete urb
@@ -506,14 +534,13 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
if (ep && port->hpd) {
list_add_tail(&urbl->link, &ep->urb_list);
if (!in_dir && ep_addr && (ep->credit < 0)) {
- ep->last_jiffies = jiffies;
+ getrawmonotonic(&ep->timestamp);
ep->credit = 0;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num,
- 0, NULL, ep->credit);
}
} else {
err = -EPIPE;
}
+out:
spin_unlock_bh(&port->ozhcd->hcd_lock);
if (err)
oz_free_urb_link(urbl);
@@ -583,6 +610,35 @@ static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
return urb;
}
/*------------------------------------------------------------------------------
+ * Finds an urbl given request it, then set submit_count to 1, thus heartbeat
+ * count this value up to EP0_TIMEOUT.
+ */
+void oz_hcd_mark_urb_submitted(void *hport, int ep_ix, u8 req_id)
+{
+ struct oz_port *port = (struct oz_port *)hport;
+ struct oz_hcd *ozhcd = port->ozhcd;
+ struct oz_urb_link *urbl = 0;
+ struct oz_endpoint *ep;
+ unsigned long irq_state;
+ unsigned found = 0;
+ spin_lock_bh(&ozhcd->hcd_lock);
+ spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ ep = port->out_ep[ep_ix];
+ if (ep) {
+ struct list_head *e;
+ list_for_each(e, &ep->urb_list) {
+ urbl = container_of(e, struct oz_urb_link, link);
+ if (urbl->req_id == req_id) {
+ urbl->submit_counter = 1;
+ found = 1;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
+ spin_unlock_bh(&ozhcd->hcd_lock);
+}
+/*------------------------------------------------------------------------------
* Pre-condition: Port lock must be held.
* Context: softirq
*/
@@ -635,7 +691,8 @@ void *oz_hcd_pd_arrived(void *hpd)
void *hport = NULL;
struct oz_hcd *ozhcd = NULL;
struct oz_endpoint *ep;
- oz_trace("oz_hcd_pd_arrived()\n");
+ static int n = OZ_NB_PORTS;
+ int j;
ozhcd = oz_hcd_claim();
if (ozhcd == NULL)
return NULL;
@@ -646,32 +703,38 @@ void *oz_hcd_pd_arrived(void *hpd)
spin_lock_bh(&ozhcd->hcd_lock);
if (ozhcd->conn_port >= 0) {
spin_unlock_bh(&ozhcd->hcd_lock);
- oz_trace("conn_port >= 0\n");
goto out;
}
+ j = n;
for (i = 0; i < OZ_NB_PORTS; i++) {
- struct oz_port *port = &ozhcd->ports[i];
+ struct oz_port *port;
+ if (++j >= OZ_NB_PORTS)
+ j = 0;
+ port = &ozhcd->ports[j];
spin_lock(&port->port_lock);
if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
oz_acquire_port(port, hpd);
+ port->bus_addr = 0;
+ port->config_num = 0;
spin_unlock(&port->port_lock);
break;
}
spin_unlock(&port->port_lock);
}
if (i < OZ_NB_PORTS) {
- oz_trace("Setting conn_port = %d\n", i);
- ozhcd->conn_port = i;
+ ozhcd->conn_port = j;
/* Attach out endpoint 0.
*/
- ozhcd->ports[i].out_ep[0] = ep;
+ ozhcd->ports[j].out_ep[0] = ep;
ep = NULL;
- hport = &ozhcd->ports[i];
+ hport = &ozhcd->ports[j];
spin_unlock_bh(&ozhcd->hcd_lock);
if (ozhcd->flags & OZ_HDC_F_SUSPENDED) {
- oz_trace("Resuming root hub\n");
+ oz_trace_msg(H, "usb_hcd_resume_root_hub()\n");
usb_hcd_resume_root_hub(ozhcd->hcd);
}
+ n = j;
+ oz_trace_msg(H, "usb_hcd_poll_rh()\n");
usb_hcd_poll_rh_status(ozhcd->hcd);
} else {
spin_unlock_bh(&ozhcd->hcd_lock);
@@ -695,9 +758,9 @@ void oz_hcd_pd_departed(void *hport)
void *hpd;
struct oz_endpoint *ep = NULL;
- oz_trace("oz_hcd_pd_departed()\n");
+ oz_trace("%s:\n", __func__);
if (port == NULL) {
- oz_trace("oz_hcd_pd_departed() port = 0\n");
+ oz_trace("%s: port = 0\n", __func__);
return;
}
ozhcd = port->ozhcd;
@@ -708,7 +771,6 @@ void oz_hcd_pd_departed(void *hport)
spin_lock_bh(&ozhcd->hcd_lock);
if ((ozhcd->conn_port >= 0) &&
(port == &ozhcd->ports[ozhcd->conn_port])) {
- oz_trace("Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_lock(&port->port_lock);
@@ -721,9 +783,10 @@ void oz_hcd_pd_departed(void *hport)
hpd = port->hpd;
port->hpd = NULL;
port->bus_addr = 0xff;
+ port->config_num = 0;
port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
port->flags |= OZ_PORT_F_CHANGED;
- port->status &= ~USB_PORT_STAT_CONNECTION;
+ port->status &= ~(USB_PORT_STAT_CONNECTION|USB_PORT_STAT_ENABLE);
port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
/* If there is an endpont 0 then clear the pointer while we hold
* the spinlock be we deallocate it after releasing the lock.
@@ -735,6 +798,7 @@ void oz_hcd_pd_departed(void *hport)
spin_unlock_bh(&port->port_lock);
if (ep)
oz_ep_free(port, ep);
+ oz_trace_msg(H, "usb_hcd_poll_rh_status()\n");
usb_hcd_poll_rh_status(ozhcd->hcd);
oz_usb_put(hpd);
}
@@ -766,9 +830,6 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
struct urb *urb;
int err = 0;
- oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, NULL, status);
- oz_trace("oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
- length, offset, total_size);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb)
return;
@@ -800,56 +861,11 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
}
}
urb->actual_length = total_size;
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
-#ifdef WANT_TRACE
-static void oz_display_conf_type(u8 t)
-{
- switch (t) {
- case USB_REQ_GET_STATUS:
- oz_trace("USB_REQ_GET_STATUS - cnf\n");
- break;
- case USB_REQ_CLEAR_FEATURE:
- oz_trace("USB_REQ_CLEAR_FEATURE - cnf\n");
- break;
- case USB_REQ_SET_FEATURE:
- oz_trace("USB_REQ_SET_FEATURE - cnf\n");
- break;
- case USB_REQ_SET_ADDRESS:
- oz_trace("USB_REQ_SET_ADDRESS - cnf\n");
- break;
- case USB_REQ_GET_DESCRIPTOR:
- oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
- break;
- case USB_REQ_SET_DESCRIPTOR:
- oz_trace("USB_REQ_SET_DESCRIPTOR - cnf\n");
- break;
- case USB_REQ_GET_CONFIGURATION:
- oz_trace("USB_REQ_GET_CONFIGURATION - cnf\n");
- break;
- case USB_REQ_SET_CONFIGURATION:
- oz_trace("USB_REQ_SET_CONFIGURATION - cnf\n");
- break;
- case USB_REQ_GET_INTERFACE:
- oz_trace("USB_REQ_GET_INTERFACE - cnf\n");
- break;
- case USB_REQ_SET_INTERFACE:
- oz_trace("USB_REQ_SET_INTERFACE - cnf\n");
- break;
- case USB_REQ_SYNCH_FRAME:
- oz_trace("USB_REQ_SYNCH_FRAME - cnf\n");
- break;
- }
-}
-#else
-#define oz_display_conf_type(__x)
-#endif /* WANT_TRACE */
-/*------------------------------------------------------------------------------
- * Context: softirq
- */
static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
u8 rcode, u8 config_num)
{
@@ -865,7 +881,7 @@ static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
} else {
rc = -ENOMEM;
}
- oz_complete_urb(hcd, urb, rc, 0);
+ oz_complete_urb(hcd, urb, rc);
}
/*------------------------------------------------------------------------------
* Context: softirq
@@ -876,21 +892,24 @@ static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
struct usb_hcd *hcd = port->ozhcd->hcd;
int rc = 0;
if (rcode == 0) {
- struct usb_host_config *config;
- struct usb_host_interface *intf;
- oz_trace("Set interface %d alt %d\n", if_num, alt);
- oz_clean_endpoints_for_interface(hcd, port, if_num);
- config = &urb->dev->config[port->config_num-1];
- intf = &config->intf_cache[if_num]->altsetting[alt];
- if (oz_build_endpoints_for_interface(hcd, port, intf,
- GFP_ATOMIC))
+ if (port->config_num > 0) {
+ struct usb_host_config *config;
+ struct usb_host_interface *intf;
+
+ oz_clean_endpoints_for_interface(hcd, port, if_num);
+ config = &urb->dev->config[port->config_num-1];
+ intf = &config->intf_cache[if_num]->altsetting[alt];
+ if (oz_build_endpoints_for_interface(hcd, port, intf,
+ GFP_ATOMIC))
+ rc = -ENOMEM;
+ else
+ port->iface[if_num].alt = alt;
+ } else
rc = -ENOMEM;
- else
- port->iface[if_num].alt = alt;
} else {
rc = -ENOMEM;
}
- oz_complete_urb(hcd, urb, rc, 0);
+ oz_complete_urb(hcd, urb, rc);
}
/*------------------------------------------------------------------------------
* Context: softirq
@@ -905,11 +924,9 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
unsigned windex;
unsigned wvalue;
- oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, NULL, rcode);
- oz_trace("oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb) {
- oz_trace("URB not found\n");
+ oz_trace("URB not found: %p\n", urb);
return;
}
setup = (struct usb_ctrlrequest *)urb->setup_packet;
@@ -917,7 +934,6 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
wvalue = le16_to_cpu(setup->wValue);
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
/* Standard requests */
- oz_display_conf_type(setup->bRequest);
switch (setup->bRequest) {
case USB_REQ_SET_CONFIGURATION:
oz_hcd_complete_set_config(port, urb, rcode,
@@ -928,12 +944,11 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
(u8)windex, (u8)wvalue);
break;
default:
- oz_complete_urb(hcd, urb, 0, 0);
+ oz_complete_urb(hcd, urb, 0);
}
} else {
int copy_len;
- oz_trace("VENDOR-CLASS - cnf\n");
if (data_len) {
if (data_len <= urb->transfer_buffer_length)
copy_len = data_len;
@@ -942,14 +957,14 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
}
- oz_complete_urb(hcd, urb, 0, 0);
+ oz_complete_urb(hcd, urb, 0);
}
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
- int data_len)
+ int data_len)
{
int space;
int copy_len;
@@ -959,7 +974,10 @@ static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
if (space < 0)
space += ep->buffer_size;
if (space < (data_len+1)) {
- oz_trace("Buffer full\n");
+ oz_trace_msg(I, "EP:%02X u:%d FULL len:%d spc:%d\n",
+ ep->ep_num | USB_DIR_IN,
+ ep->buffered_units,
+ data_len, space);
return -1;
}
ep->buffer[ep->in_ix] = (u8)data_len;
@@ -1012,15 +1030,35 @@ void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
copy_len = urb->transfer_buffer_length;
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
return;
} else {
- oz_trace("buffering frame as URB is not available\n");
oz_hcd_buffer_data(ep, data, data_len);
}
break;
case USB_ENDPOINT_XFER_ISOC:
- oz_hcd_buffer_data(ep, data, data_len);
+ if (oz_hcd_buffer_data(ep, data, data_len) != 0) {
+ int len;
+ int copy_len;
+ while (ep->buffered_units > ep->max_buffer_units) {
+ len = ep->buffer[ep->out_ix];
+ if (++ep->out_ix == ep->buffer_size)
+ ep->out_ix = 0;
+ copy_len = ep->buffer_size - ep->out_ix;
+ if (copy_len > len)
+ copy_len = len;
+ if (copy_len < len)
+ ep->out_ix = len - copy_len;
+ else
+ ep->out_ix += copy_len;
+
+ if (ep->out_ix == ep->buffer_size)
+ ep->out_ix = 0;
+
+ ep->buffered_units--;
+ }
+ ep->flags |= OZ_F_EP_BUFFERING;
+ }
break;
}
done:
@@ -1031,7 +1069,7 @@ done:
*/
static inline int oz_usb_get_frame_number(void)
{
- return jiffies_to_msecs(get_jiffies_64());
+ return atomic_inc_return(&g_usb_frame_number);
}
/*------------------------------------------------------------------------------
* Context: softirq
@@ -1047,7 +1085,8 @@ int oz_hcd_heartbeat(void *hport)
struct list_head *n;
struct urb *urb;
struct oz_endpoint *ep;
- unsigned long now = jiffies;
+ struct timespec ts, delta;
+ getrawmonotonic(&ts);
INIT_LIST_HEAD(&xfr_list);
/* Check the OUT isoc endpoints to see if any URB data can be sent.
*/
@@ -1056,12 +1095,11 @@ int oz_hcd_heartbeat(void *hport)
ep = ep_from_link(e);
if (ep->credit < 0)
continue;
- ep->credit += jiffies_to_msecs(now - ep->last_jiffies);
+ delta = timespec_sub(ts, ep->timestamp);
+ ep->credit += div64_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
if (ep->credit > ep->credit_ceiling)
ep->credit = ep->credit_ceiling;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, NULL,
- ep->credit);
- ep->last_jiffies = now;
+ ep->timestamp = ts;
while (ep->credit && !list_empty(&ep->urb_list)) {
urbl = list_first_entry(&ep->urb_list,
struct oz_urb_link, link);
@@ -1069,8 +1107,8 @@ int oz_hcd_heartbeat(void *hport)
if ((ep->credit + 1) < urb->number_of_packets)
break;
ep->credit -= urb->number_of_packets;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, NULL,
- ep->credit);
+ if (ep->credit < 0)
+ ep->credit = 0;
list_move_tail(&urbl->link, &xfr_list);
}
}
@@ -1078,40 +1116,34 @@ int oz_hcd_heartbeat(void *hport)
/* Send to PD and complete URBs.
*/
list_for_each_safe(e, n, &xfr_list) {
- unsigned long t;
urbl = container_of(e, struct oz_urb_link, link);
urb = urbl->urb;
- t = urbl->submit_jiffies;
list_del_init(e);
urb->error_count = 0;
urb->start_frame = oz_usb_get_frame_number();
oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
oz_free_urb_link(urbl);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, t);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check the IN isoc endpoints to see if any URBs can be completed.
*/
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each(e, &port->isoc_in_ep) {
struct oz_endpoint *ep = ep_from_link(e);
+
if (ep->flags & OZ_F_EP_BUFFERING) {
- if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
+ if (ep->buffered_units >= ep->max_buffer_units) {
ep->flags &= ~OZ_F_EP_BUFFERING;
ep->credit = 0;
- oz_event_log(OZ_EVT_EP_CREDIT,
- ep->ep_num | USB_DIR_IN,
- 0, NULL, ep->credit);
- ep->last_jiffies = now;
+ ep->credit2 = 0;
+ ep->timestamp = ts;
ep->start_frame = 0;
- oz_event_log(OZ_EVT_EP_BUFFERING,
- ep->ep_num | USB_DIR_IN, 0, NULL, 0);
}
continue;
}
- ep->credit += jiffies_to_msecs(now - ep->last_jiffies);
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
- 0, NULL, ep->credit);
- ep->last_jiffies = now;
+ delta = timespec_sub(ts, ep->timestamp);
+ ep->credit += div64_u64(timespec_to_ns(&delta)+5000, NSEC_PER_MSEC);
+ ep->timestamp = ts;
while (!list_empty(&ep->urb_list)) {
struct oz_urb_link *urbl =
list_first_entry(&ep->urb_list,
@@ -1120,8 +1152,6 @@ int oz_hcd_heartbeat(void *hport)
int len = 0;
int copy_len;
int i;
- if ((ep->credit + 1) < urb->number_of_packets)
- break;
if (ep->buffered_units < urb->number_of_packets)
break;
urb->actual_length = 0;
@@ -1154,8 +1184,29 @@ int oz_hcd_heartbeat(void *hport)
ep->start_frame += urb->number_of_packets;
list_move_tail(&urbl->link, &xfr_list);
ep->credit -= urb->number_of_packets;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
- 0, NULL, ep->credit);
+ ep->credit2 += urb->number_of_packets;
+ }
+ if (ep->buffered_units == 0) {
+ oz_trace_msg(I, "EP:%02X Buffer under run\n",
+ ep->ep_num | USB_DIR_IN);
+ ep->flags |= OZ_F_EP_BUFFERING;
+ continue;
+ }
+ if (ep->credit2 >= 1000)
+ {
+ static int buffered_units=-1;
+ static int max_buffer_units=-1;
+ {
+ int diff = ep->buffered_units - buffered_units;
+ oz_trace_msg(I, "u:%d o:%04d b:%d\n",
+ ep->credit2,
+ ep->credit2 + diff,
+ ep->buffered_units);
+
+ buffered_units = ep->buffered_units;
+ max_buffer_units = ep->max_buffer_units;
+ }
+ ep->credit2 = 0;
}
}
if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
@@ -1168,7 +1219,7 @@ int oz_hcd_heartbeat(void *hport)
urb = urbl->urb;
list_del_init(e);
oz_free_urb_link(urbl);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check if there are any ep0 requests that have timed out.
* If so resent to PD.
@@ -1180,11 +1231,14 @@ int oz_hcd_heartbeat(void *hport)
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_safe(e, n, &ep->urb_list) {
urbl = container_of(e, struct oz_urb_link, link);
- if (time_after(now, urbl->submit_jiffies+HZ/2)) {
- oz_trace("%ld: Request 0x%p timeout\n",
- now, urbl->urb);
- urbl->submit_jiffies = now;
+ if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) {
+ oz_trace_msg(M, "URB:%08X timeout %02X\n",
+ (unsigned int)((uintptr_t)urbl->urb),
+ urbl->req_id);
list_move_tail(e, &xfr_list);
+ urbl->submit_counter = 0;
+ } else if (urbl->submit_counter) {
+ urbl->submit_counter++;
}
}
if (!list_empty(&ep->urb_list))
@@ -1194,7 +1248,7 @@ int oz_hcd_heartbeat(void *hport)
while (e != &xfr_list) {
urbl = container_of(e, struct oz_urb_link, link);
e = e->next;
- oz_trace("Resending request to PD.\n");
+ oz_trace_msg(M, "Resending request to PD.\n");
oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
oz_free_urb_link(urbl);
}
@@ -1210,9 +1264,17 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
- int if_ix = intf->desc.bInterfaceNumber;
+ int if_ix;
int request_heartbeat = 0;
+
+ if (intf == NULL)
+ return -ENOMEM;
+
+ if_ix = intf->desc.bInterfaceNumber;
oz_trace("interface[%d] = %p\n", if_ix, intf);
+ if (if_ix >= port->num_iface || port->iface == NULL)
+ return -ENOMEM;
+
for (i = 0; i < intf->desc.bNumEndpoints; i++) {
struct usb_host_endpoint *hep = &intf->endpoint[i];
u8 ep_addr = hep->desc.bEndpointAddress;
@@ -1220,15 +1282,14 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_endpoint *ep;
int buffer_size = 0;
- oz_trace("%d bEndpointAddress = %x\n", i, ep_addr);
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
switch (hep->desc.bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_ISOC:
- buffer_size = 24*1024;
+ buffer_size = OZ_EP_BUFFER_SIZE_ISOC;
break;
case USB_ENDPOINT_XFER_INT:
- buffer_size = 128;
+ buffer_size = OZ_EP_BUFFER_SIZE_INT;
break;
}
}
@@ -1242,13 +1303,11 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
ep->ep_num = ep_num;
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
- oz_trace("wMaxPacketSize = %d\n",
- hep->desc.wMaxPacketSize);
ep->credit_ceiling = 200;
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
+ ep->max_buffer_units =
+ oz_get_up_max_buffer_units(port->hpd);
ep->flags |= OZ_F_EP_BUFFERING;
- oz_event_log(OZ_EVT_EP_BUFFERING,
- ep->ep_num | USB_DIR_IN, 1, NULL, 0);
} else {
ep->flags |= OZ_F_EP_HAVE_STREAM;
if (oz_usb_stream_create(port->hpd, ep_num))
@@ -1372,7 +1431,6 @@ static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
oz_clean_endpoints_for_interface(hcd, port, i);
spin_lock_bh(&ozhcd->hcd_lock);
if (port->iface) {
- oz_trace("Freeing interfaces object.\n");
kfree(port->iface);
port->iface = NULL;
}
@@ -1411,8 +1469,7 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
int port_ix = -1;
struct oz_port *port = NULL;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_process_ep0_urb(%p)\n", jiffies, urb);
- port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
+ port_ix = oz_get_port_from_addr(ozhcd, (usb_pipedevice(urb->pipe)));
if (port_ix < 0) {
rc = -EPIPE;
goto out;
@@ -1432,17 +1489,10 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
windex = le16_to_cpu(setup->wIndex);
wvalue = le16_to_cpu(setup->wValue);
wlength = le16_to_cpu(setup->wLength);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequestType = %x\n",
- setup->bRequestType);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wValue = %x\n", wvalue);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wIndex = %x\n", windex);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wLength = %x\n", wlength);
req_id = port->next_req_id++;
hpd = oz_claim_hpd(port);
if (hpd == NULL) {
- oz_trace("Cannot claim port\n");
rc = -EPIPE;
goto out;
}
@@ -1452,34 +1502,23 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
*/
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
- oz_trace("USB_REQ_GET_DESCRIPTOR - req\n");
break;
case USB_REQ_SET_ADDRESS:
- oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest,
- 0, NULL, setup->bRequestType);
- oz_trace("USB_REQ_SET_ADDRESS - req\n");
- oz_trace("Port %d address is 0x%x\n", ozhcd->conn_port,
- (u8)le16_to_cpu(setup->wValue));
spin_lock_bh(&ozhcd->hcd_lock);
if (ozhcd->conn_port >= 0) {
ozhcd->ports[ozhcd->conn_port].bus_addr =
(u8)le16_to_cpu(setup->wValue);
- oz_trace("Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_unlock_bh(&ozhcd->hcd_lock);
complete = 1;
break;
case USB_REQ_SET_CONFIGURATION:
- oz_trace("USB_REQ_SET_CONFIGURATION - req\n");
break;
case USB_REQ_GET_CONFIGURATION:
/* We short circuit this case and reply directly since
* we have the selected configuration number cached.
*/
- oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0,
- NULL, setup->bRequestType);
- oz_trace("USB_REQ_GET_CONFIGURATION - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
@@ -1493,22 +1532,16 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
/* We short circuit this case and reply directly since
* we have the selected interface alternative cached.
*/
- oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0,
- NULL, setup->bRequestType);
- oz_trace("USB_REQ_GET_INTERFACE - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
port->iface[(u8)windex].alt;
- oz_trace("interface = %d alt = %d\n",
- windex, port->iface[(u8)windex].alt);
complete = 1;
} else {
rc = -EPIPE;
}
break;
case USB_REQ_SET_INTERFACE:
- oz_trace("USB_REQ_SET_INTERFACE - req\n");
break;
}
}
@@ -1539,8 +1572,7 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
oz_usb_put(hpd);
out:
if (rc || complete) {
- oz_trace("Completing request locally\n");
- oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ oz_complete_urb(ozhcd->hcd, urb, rc);
} else {
oz_usb_request_heartbeat(port->hpd);
}
@@ -1565,14 +1597,14 @@ static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
/* Check if there is a device at the port - refuse if not.
*/
if ((port->flags & OZ_PORT_F_PRESENT) == 0)
- return -EPIPE;
+ return -ENODEV;
ep_addr = usb_pipeendpoint(urb->pipe);
if (ep_addr) {
/* If the request is not for EP0 then queue it.
*/
if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
urb, 0))
- rc = -EPIPE;
+ rc = -ENOENT;
} else {
oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
}
@@ -1604,7 +1636,7 @@ static void oz_urb_process_tasklet(unsigned long unused)
oz_free_urb_link(urbl);
rc = oz_urb_process(ozhcd, urb);
if (rc)
- oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ oz_complete_urb(ozhcd->hcd, urb, rc);
spin_lock_irqsave(&g_tasklet_lock, irq_state);
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
@@ -1627,12 +1659,12 @@ static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
unsigned long irq_state;
u8 ix;
if (port == NULL) {
- oz_trace("ERRORERROR: oz_urb_cancel(%p) port is null\n", urb);
+ oz_trace("%s: urb=%p port is null\n", __func__, urb);
return;
}
ozhcd = port->ozhcd;
if (ozhcd == NULL) {
- oz_trace("ERRORERROR: oz_urb_cancel(%p) ozhcd is null\n", urb);
+ oz_trace("%s: urb=%p ozhcd is null\n", __func__, urb);
return;
}
@@ -1657,7 +1689,7 @@ static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
urbl = container_of(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
list_del(e);
- oz_trace("Found urb in orphanage\n");
+ oz_trace("Found urb in orphanage urb=%p\n", urb);
goto out;
}
}
@@ -1673,7 +1705,7 @@ out2:
if (urbl) {
urb->actual_length = 0;
oz_free_urb_link(urbl);
- oz_complete_urb(ozhcd->hcd, urb, -EPIPE, 0);
+ oz_complete_urb(ozhcd->hcd, urb, -ENOENT);
}
}
/*------------------------------------------------------------------------------
@@ -1713,7 +1745,7 @@ static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
urbl = list_first_entry(&ozhcd->orphanage,
struct oz_urb_link, link);
list_del(&urbl->link);
- oz_complete_urb(ozhcd->hcd, urbl->urb, status, 0);
+ oz_complete_urb(ozhcd->hcd, urbl->urb, status);
oz_free_urb_link(urbl);
}
}
@@ -1723,7 +1755,6 @@ static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
*/
static int oz_hcd_start(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_start()\n");
hcd->power_budget = 200;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
@@ -1734,29 +1765,15 @@ static int oz_hcd_start(struct usb_hcd *hcd)
*/
static void oz_hcd_stop(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_stop()\n");
+ oz_trace("%s:\n", __func__);
}
/*------------------------------------------------------------------------------
* Context: unknown
*/
static void oz_hcd_shutdown(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_shutdown()\n");
-}
-/*------------------------------------------------------------------------------
- * Context: any
- */
-#ifdef WANT_EVENT_TRACE
-static u8 oz_get_irq_ctx(void)
-{
- u8 irq_info = 0;
- if (in_interrupt())
- irq_info |= 1;
- if (in_irq())
- irq_info |= 2;
- return irq_info;
+ oz_trace("%s:\n", __func__);
}
-#endif /* WANT_EVENT_TRACE */
/*------------------------------------------------------------------------------
* Called to queue an urb for the device.
* This function should return a non-zero error code if it fails the urb but
@@ -1772,30 +1789,24 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
struct oz_port *port;
unsigned long irq_state;
struct oz_urb_link *urbl;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_enqueue(%p)\n",
- jiffies, urb);
- oz_event_log(OZ_EVT_URB_SUBMIT, oz_get_irq_ctx(),
- (u16)urb->number_of_packets, urb, urb->pipe);
if (unlikely(ozhcd == NULL)) {
- oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not ozhcd.\n",
- jiffies, urb);
- return -EPIPE;
+ oz_trace("Refused urb(%p) not ozhcd.\n", urb);
+ return -ENODEV;
}
if (unlikely(hcd->state != HC_STATE_RUNNING)) {
- oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not running.\n",
- jiffies, urb);
- return -EPIPE;
+ oz_trace("Refused urb(%p) not running.\n", urb);
+ return -ENODEV;
}
- port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
+ port_ix = oz_get_port_from_addr(ozhcd, (usb_pipedevice(urb->pipe)));
if (port_ix < 0)
- return -EPIPE;
+ return -ENODEV;
port = &ozhcd->ports[port_ix];
if (port == NULL)
- return -EPIPE;
+ return -ENODEV;
if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
oz_trace("Refusing URB port_ix = %d devnum = %d\n",
port_ix, urb->dev->devnum);
- return -EPIPE;
+ return -ENODEV;
}
urb->hcpriv = port;
/* Put request in queue for processing by tasklet.
@@ -1805,6 +1816,7 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return -ENOMEM;
urbl->urb = urb;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
+ oz_trace_urb_in(urb);
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (unlikely(rc)) {
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
@@ -1826,24 +1838,21 @@ static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct oz_urb_link *urbl = NULL;
struct list_head *e;
if (unlikely(ep == NULL))
- return NULL;
+ return 0;
list_for_each(e, &ep->urb_list) {
urbl = container_of(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
list_del_init(e);
if (usb_pipeisoc(urb->pipe)) {
+ oz_trace_msg(M, "oz_remove_urb:%p\n", urb);
ep->credit -= urb->number_of_packets;
if (ep->credit < 0)
ep->credit = 0;
- oz_event_log(OZ_EVT_EP_CREDIT,
- usb_pipein(urb->pipe) ?
- (ep->ep_num | USB_DIR_IN) : ep->ep_num,
- 0, NULL, ep->credit);
}
return urbl;
}
}
- return NULL;
+ return 0;
}
/*------------------------------------------------------------------------------
* Called to dequeue a previously submitted urb for the device.
@@ -1855,7 +1864,7 @@ static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct oz_urb_link *urbl = NULL;
int rc;
unsigned long irq_state;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_dequeue(%p)\n", jiffies, urb);
+
urbl = oz_alloc_urb_link();
if (unlikely(urbl == NULL))
return -ENOMEM;
@@ -1891,7 +1900,7 @@ static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
- oz_trace("oz_hcd_endpoint_disable\n");
+ oz_trace("%s:\n", __func__);
}
/*------------------------------------------------------------------------------
* Context: unknown
@@ -1899,14 +1908,13 @@ static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
- oz_trace("oz_hcd_endpoint_reset\n");
+ oz_trace("%s:\n", __func__);
}
/*------------------------------------------------------------------------------
* Context: unknown
*/
static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_get_frame_number\n");
return oz_usb_get_frame_number();
}
/*------------------------------------------------------------------------------
@@ -1919,19 +1927,24 @@ static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
int i;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_status_data()\n");
buf[0] = 0;
-
+ buf[1] = 0;
spin_lock_bh(&ozhcd->hcd_lock);
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
- oz_trace2(OZ_TRACE_HUB, "Port %d changed\n", i);
- ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
- buf[0] |= 1<<(i+1);
+ if (i < 7)
+ buf[0] |= 1<<(i+1);
+ else
+ buf[1] |= 1<<(i-7);
}
}
+ oz_trace_msg(H, "HUBSTS : %02X%02X\n",
+ (unsigned char)(buf[1]), (unsigned char)(buf[0]));
spin_unlock_bh(&ozhcd->hcd_lock);
- return buf[0] ? 1 : 0;
+ if (buf[1] != 0 || buf[0] != 0)
+ return 2;
+ else
+ return 0;
}
/*------------------------------------------------------------------------------
* Context: process
@@ -1939,7 +1952,6 @@ static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
static void oz_get_hub_descriptor(struct usb_hcd *hcd,
struct usb_hub_descriptor *desc)
{
- oz_trace2(OZ_TRACE_HUB, "GetHubDescriptor\n");
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
@@ -1958,59 +1970,65 @@ static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned set_bits = 0;
unsigned clear_bits = 0;
- oz_trace2(OZ_TRACE_HUB, "SetPortFeature\n");
+ void *hpd;
+
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
- return -EPIPE;
+ return -ENODEV;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ oz_trace("USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ oz_trace("USB_PORT_FEAT_ENABLE\n");
break;
case USB_PORT_FEAT_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ oz_trace("USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ oz_trace("USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ oz_trace("USB_PORT_FEAT_RESET\n");
set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
clear_bits = USB_PORT_STAT_RESET;
ozhcd->ports[port_id-1].bus_addr = 0;
+ hpd = oz_claim_hpd(&ozhcd->ports[port_id-1]);
+ if (hpd != NULL) {
+ oz_usb_reset_device(hpd);
+ oz_usb_put(hpd);
+ }
break;
case USB_PORT_FEAT_POWER:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ oz_trace("USB_PORT_FEAT_POWER\n");
set_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ oz_trace("USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ oz_trace("USB_PORT_FEAT_C_CONNECTION\n");
break;
case USB_PORT_FEAT_C_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ oz_trace("USB_PORT_FEAT_C_ENABLE\n");
break;
case USB_PORT_FEAT_C_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ oz_trace("USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ oz_trace("USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ oz_trace("USB_PORT_FEAT_C_RESET\n");
break;
case USB_PORT_FEAT_TEST:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ oz_trace("USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ oz_trace("USB_PORT_FEAT_INDICATOR\n");
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ oz_trace("Other %d\n", wvalue);
break;
}
if (set_bits || clear_bits) {
@@ -2019,8 +2037,9 @@ static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
port->status |= set_bits;
spin_unlock_bh(&port->port_lock);
}
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
- port->status);
+
+ oz_trace_msg(H, "Port[%d]: %08X\n", port_id,
+ ozhcd->ports[port_id-1].status);
return err;
}
/*------------------------------------------------------------------------------
@@ -2033,60 +2052,60 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
u8 port_id = (u8)windex;
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned clear_bits = 0;
- oz_trace2(OZ_TRACE_HUB, "ClearPortFeature\n");
+
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
- return -EPIPE;
+ return -ENODEV;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ oz_trace("USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ oz_trace("USB_PORT_FEAT_ENABLE\n");
clear_bits = USB_PORT_STAT_ENABLE;
break;
case USB_PORT_FEAT_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ oz_trace("USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ oz_trace("USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ oz_trace("USB_PORT_FEAT_RESET\n");
break;
case USB_PORT_FEAT_POWER:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ oz_trace("USB_PORT_FEAT_POWER\n");
clear_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ oz_trace("USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ oz_trace("USB_PORT_FEAT_C_CONNECTION\n");
clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
break;
case USB_PORT_FEAT_C_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ oz_trace("USB_PORT_FEAT_C_ENABLE\n");
clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
break;
case USB_PORT_FEAT_C_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ oz_trace("USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ oz_trace("USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ oz_trace("USB_PORT_FEAT_C_RESET\n");
clear_bits = (USB_PORT_FEAT_C_RESET << 16);
break;
case USB_PORT_FEAT_TEST:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ oz_trace("USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ oz_trace("USB_PORT_FEAT_INDICATOR\n");
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ oz_trace("Other %d\n", wvalue);
break;
}
if (clear_bits) {
@@ -2094,7 +2113,8 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
port->status &= ~clear_bits;
spin_unlock_bh(&port->port_lock);
}
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
+
+ oz_trace_msg(H, "Port[%d]: %08X\n", port_id,
ozhcd->ports[port_id-1].status);
return err;
}
@@ -2106,12 +2126,12 @@ static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
struct oz_hcd *ozhcd;
u32 status = 0;
if ((windex < 1) || (windex > OZ_NB_PORTS))
- return -EPIPE;
+ return -ENODEV;
ozhcd = oz_hcd_private(hcd);
- oz_trace2(OZ_TRACE_HUB, "GetPortStatus windex = %d\n", windex);
+ ozhcd->ports[windex-1].flags &= ~OZ_PORT_F_CHANGED;
status = ozhcd->ports[windex-1].status;
put_unaligned(cpu_to_le32(status), (__le32 *)buf);
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = %x\n", windex, status);
+ oz_trace_msg(H, "Port[%d]: %08X\n", windex, status);
return 0;
}
/*------------------------------------------------------------------------------
@@ -2121,10 +2141,13 @@ static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
u16 windex, char *buf, u16 wlength)
{
int err = 0;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_control()\n");
+
+ oz_trace_msg(H, "HUBCTL: %04X %04X %04X %04X\n",
+ req_type, wvalue, windex, wlength);
+
switch (req_type) {
case ClearHubFeature:
- oz_trace2(OZ_TRACE_HUB, "ClearHubFeature: %d\n", req_type);
+ oz_trace("ClearHubFeature: %d\n", req_type);
break;
case ClearPortFeature:
err = oz_clear_port_feature(hcd, wvalue, windex);
@@ -2133,7 +2156,7 @@ static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
- oz_trace2(OZ_TRACE_HUB, "GetHubStatus: req_type = 0x%x\n",
+ oz_trace("GetHubStatus: req_type = 0x%x\n",
req_type);
put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
break;
@@ -2141,13 +2164,13 @@ static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
err = oz_get_port_status(hcd, windex, buf);
break;
case SetHubFeature:
- oz_trace2(OZ_TRACE_HUB, "SetHubFeature: %d\n", req_type);
+ oz_trace("SetHubFeature: %d\n", req_type);
break;
case SetPortFeature:
err = oz_set_port_feature(hcd, wvalue, windex);
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other: %d\n", req_type);
+ oz_trace("Other: %d\n", req_type);
break;
}
return err;
@@ -2158,7 +2181,7 @@ static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_suspend()\n");
+
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
hcd->state = HC_STATE_SUSPENDED;
@@ -2172,7 +2195,7 @@ static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
static int oz_hcd_bus_resume(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_resume()\n");
+
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
@@ -2184,7 +2207,7 @@ static int oz_hcd_bus_resume(struct usb_hcd *hcd)
*/
static void oz_plat_shutdown(struct platform_device *dev)
{
- oz_trace("oz_plat_shutdown()\n");
+ oz_trace("%s:\n", __func__);
}
/*------------------------------------------------------------------------------
* Context: process
@@ -2195,10 +2218,10 @@ static int oz_plat_probe(struct platform_device *dev)
int err;
struct usb_hcd *hcd;
struct oz_hcd *ozhcd;
- oz_trace("oz_plat_probe()\n");
+
hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
if (hcd == NULL) {
- oz_trace("Failed to created hcd object OK\n");
+ oz_trace("Failed to created hcd object\n");
return -ENOMEM;
}
ozhcd = oz_hcd_private(hcd);
@@ -2217,9 +2240,9 @@ static int oz_plat_probe(struct platform_device *dev)
port->bus_addr = 0xff;
spin_lock_init(&port->port_lock);
}
- err = usb_add_hcd(hcd, 0, 0);
+ err = usb_add_hcd(hcd, -1, 0);
if (err) {
- oz_trace("Failed to add hcd object OK\n");
+ oz_trace("Failed to add hcd object\n");
usb_put_hcd(hcd);
return -1;
}
@@ -2235,7 +2258,7 @@ static int oz_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct oz_hcd *ozhcd;
- oz_trace("oz_plat_remove()\n");
+
if (hcd == NULL)
return -1;
ozhcd = oz_hcd_private(hcd);
@@ -2243,9 +2266,7 @@ static int oz_plat_remove(struct platform_device *dev)
if (ozhcd == g_ozhcd)
g_ozhcd = NULL;
spin_unlock_bh(&g_hcdlock);
- oz_trace("Clearing orphanage\n");
- oz_hcd_clear_orphanage(ozhcd, -EPIPE);
- oz_trace("Removing hcd\n");
+ oz_hcd_clear_orphanage(ozhcd, -ENODEV);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
oz_empty_link_pool();
@@ -2256,7 +2277,7 @@ static int oz_plat_remove(struct platform_device *dev)
*/
static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
{
- oz_trace("oz_plat_suspend()\n");
+ oz_trace("%s:\n", __func__);
return 0;
}
/*------------------------------------------------------------------------------
@@ -2264,7 +2285,7 @@ static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
*/
static int oz_plat_resume(struct platform_device *dev)
{
- oz_trace("oz_plat_resume()\n");
+ oz_trace("%s:\n", __func__);
return 0;
}
/*------------------------------------------------------------------------------
@@ -2275,10 +2296,10 @@ int oz_hcd_init(void)
int err;
if (usb_disabled())
return -ENODEV;
+
tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
err = platform_driver_register(&g_oz_plat_drv);
- oz_trace("platform_driver_register() returned %d\n", err);
if (err)
goto error;
g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
@@ -2286,11 +2307,9 @@ int oz_hcd_init(void)
err = -ENOMEM;
goto error1;
}
- oz_trace("platform_device_alloc() succeeded\n");
err = platform_device_add(g_plat_dev);
if (err)
goto error2;
- oz_trace("platform_device_add() succeeded\n");
return 0;
error2:
platform_device_put(g_plat_dev);
@@ -2299,7 +2318,7 @@ error1:
error:
tasklet_disable(&g_urb_process_tasklet);
tasklet_disable(&g_urb_cancel_tasklet);
- oz_trace("oz_hcd_init() failed %d\n", err);
+ oz_trace("HCD Init failed: %d\n", err);
return err;
}
/*------------------------------------------------------------------------------
@@ -2307,6 +2326,7 @@ error:
*/
void oz_hcd_term(void)
{
+ msleep(OZ_HUB_DEBOUNCE_TIMEOUT);
tasklet_kill(&g_urb_process_tasklet);
tasklet_kill(&g_urb_cancel_tasklet);
platform_device_unregister(g_plat_dev);
diff --git a/drivers/staging/ozwpan/ozkobject.c b/drivers/staging/ozwpan/ozkobject.c
new file mode 100644
index 000000000000..9b85ef55da15
--- /dev/null
+++ b/drivers/staging/ozwpan/ozkobject.c
@@ -0,0 +1,304 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/sysfs.h>
+#include <linux/version.h>
+#include "ozpd.h"
+#include "ozcdev.h"
+#include "ozproto.h"
+#include "oztrace.h"
+#include "ozkobject.h"
+#include "ozappif.h"
+
+static ssize_t devices_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int i, count, s;
+ unsigned state;
+ int ret = 0;
+ u8 devices[(ETH_ALEN + sizeof(unsigned)) * OZ_MAX_PDS];
+
+ count = oz_get_pd_status_list(devices, OZ_MAX_PDS);
+ s = sprintf(buf, "Total: %d\n", count);
+ buf += s;
+ ret += s;
+ for (i = 0; i < count; i++) {
+ ret += sprintf(buf, "%pm", (void *)&devices[i * (ETH_ALEN +
+ sizeof(unsigned))]);
+ buf += (ETH_ALEN * 2);
+ ret += sprintf(buf++, "\t");
+ memcpy(&state, &devices[(i * (ETH_ALEN + sizeof(unsigned))) +
+ ETH_ALEN], sizeof(unsigned));
+ switch (state) {
+ case OZ_PD_S_IDLE:
+ s = sprintf(buf, "IDLE\n");
+ buf += s;
+ ret += s;
+ break;
+ case OZ_PD_S_CONNECTED:
+ s = sprintf(buf, "CONNECTED\n");
+ buf += s;
+ ret += s;
+ break;
+ case OZ_PD_S_STOPPED:
+ s = sprintf(buf, "STOPPED\n");
+ buf += s;
+ ret += s;
+ break;
+ case OZ_PD_S_SLEEP:
+ s = sprintf(buf, "SLEEP\n");
+ buf += s;
+ ret += s;
+ break;
+ }
+
+ }
+ return ret;
+}
+
+u8 oz_str_to_hex(const char *st)
+{
+ u8 t1 = 0;
+ char arr[3];
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
+ char **pt = NULL;
+#endif
+
+ memcpy(arr, st, 2);
+ arr[2] = '\0';
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
+ t1 = (u8) simple_strtoul(arr, pt, 16);
+#else
+ if (kstrtou8(arr, 16, &t1))
+ oz_trace("Invalid string received\n");
+#endif
+ return t1;
+}
+
+static ssize_t stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int i;
+ u8 mac_addr[6];
+ struct oz_pd *pd;
+
+ if (count >= 12) {
+ for (i = 0; i < 6; i++) {
+ mac_addr[i] = oz_str_to_hex(buf);
+ buf += 2;
+ }
+
+ pd = oz_pd_find(mac_addr);
+ if (pd && (!(pd->state & OZ_PD_S_CONNECTED))) {
+ oz_pd_stop(pd);
+ oz_pd_put(pd);
+ } else
+ oz_pd_put(pd);
+ }
+
+ return count;
+}
+
+static ssize_t select_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int i;
+ int ret = 0;
+ u8 mac_addr[6];
+
+ oz_get_active_pd(mac_addr);
+
+ for (i = 0; i < 6; i++) {
+ ret += sprintf(buf, "%02x", mac_addr[i]);
+ buf += 2;
+ }
+ ret += sprintf(buf, "\n");
+ return ret;
+}
+
+static ssize_t select_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int i;
+ u8 mac_addr[6];
+
+ if (count >= 12) {
+ for (i = 0; i < 6; i++) {
+ mac_addr[i] = oz_str_to_hex(buf);
+ buf += 2;
+ }
+
+ oz_set_active_pd(mac_addr);
+ }
+ return count;
+}
+
+static ssize_t bind_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char nw_list[OZ_MAX_NW_IF * OZ_MAX_BINDING_LEN] = {0};
+ int count, i, s;
+ int ret = 0;
+
+ count = oz_get_binding_list(nw_list, OZ_MAX_NW_IF);
+ for (i = 0; i < count; i++) {
+ s = sprintf(buf, "%s\n", nw_list + (i * OZ_MAX_BINDING_LEN));
+ ret += s;
+ buf += s;
+ }
+ return ret;
+}
+
+static ssize_t bind_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char name[OZ_MAX_BINDING_LEN];
+ char *p = NULL;
+
+ memcpy(name, buf + 2, count);
+ p = strstr(name, "\n");
+ if (p)
+ *p = '\0';
+
+ switch (*buf) {
+ case 'a':
+ oz_binding_add(name);
+ break;
+ case 'r':
+ oz_binding_remove(name);
+ break;
+ }
+ return count;
+}
+
+static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ u8 mode;
+ int ret;
+
+ mode = oz_get_serial_mode();
+ ret = sprintf(buf, "0x%02x\n", mode);
+ return ret;
+}
+
+static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 new_mode;
+ if (count >= 4) {
+ new_mode = oz_str_to_hex(buf + 2);
+ oz_set_serial_mode(new_mode);
+ } else {
+ printk(KERN_ERR "Invalid mode\n");
+ }
+ return count;
+}
+
+static ssize_t debug_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int ret = 0;
+ u32 debug = g_debug;
+ int i;
+
+ for (i = 0; i < 'Z'-'A'+1; i++) {
+ if (debug & (1<<i))
+ *(buf + ret++) = 'A' + i;
+ }
+
+ return ret;
+}
+
+static ssize_t debug_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 new_debug = 0;
+ const char *t = buf;
+
+ if (count > 1 && count < 33) {
+ while (*t) {
+ char symbol = *t;
+ if ('A' <= symbol && symbol <= 'Z')
+ new_debug |= 1<<(symbol - 'A');
+ t++;
+ }
+
+ if (0 != new_debug) {
+ g_debug = new_debug;
+ }
+ else
+ printk(KERN_ERR "Invalid debug\n");
+ } else {
+ if (1 == count && *t == '\0')
+ g_debug = 0;
+ }
+
+ return count;
+}
+
+static ssize_t fptr_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int ret;
+
+ ret = sprintf(buf, "p->oz_protocol_init = 0x%p\n", oz_protocol_init);
+ return ret;
+
+}
+
+static struct kobj_attribute devices_attribute =
+ __ATTR(devices, 0400, devices_show, NULL);
+
+static struct kobj_attribute stop_attribute =
+ __ATTR(stop, 0200, NULL, stop_store);
+
+static struct kobj_attribute select_attribute =
+ __ATTR(select, 0600, select_show, select_store);
+
+static struct kobj_attribute bind_attribute =
+ __ATTR(bind, 0600, bind_show, bind_store);
+
+static struct kobj_attribute mode_attribute =
+ __ATTR(mode, 0600, mode_show, mode_store);
+
+static struct kobj_attribute debug_attribute =
+ __ATTR(debug, 0600, debug_show, debug_store);
+
+static struct kobj_attribute fptr_attribute =
+ __ATTR(fptr, 0400, fptr_show, NULL);
+
+static struct attribute *attrs[] = {
+ &devices_attribute.attr,
+ &stop_attribute.attr,
+ &select_attribute.attr,
+ &bind_attribute.attr,
+ &mode_attribute.attr,
+ &debug_attribute.attr,
+ &fptr_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+void oz_create_sys_entry(void)
+{
+ int retval;
+
+ retval = sysfs_create_group(&g_oz_wpan_dev->kobj, &attr_group);
+ if (retval)
+ oz_trace("Can not create attribute group\n");
+
+}
+
+void oz_destroy_sys_entry(void)
+{
+ sysfs_remove_group(&g_oz_wpan_dev->kobj, &attr_group);
+}
diff --git a/drivers/staging/ozwpan/ozkobject.h b/drivers/staging/ozwpan/ozkobject.h
new file mode 100644
index 000000000000..0557228001d4
--- /dev/null
+++ b/drivers/staging/ozwpan/ozkobject.h
@@ -0,0 +1,17 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * -----------------------------------------------------------------------------
+ */
+
+#ifndef _OZKOBJECT_H
+#define _OZKOBJECT_H
+
+#define OZ_MAX_NW_IF 6
+
+void oz_create_sys_entry(void);
+void oz_destroy_sys_entry(void);
+void oz_set_serial_mode(u8 mode);
+u8 oz_get_serial_mode(void);
+
+#endif /* _OZKOBJECT_H */
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
index 57a0cbd58551..4287f1067208 100644
--- a/drivers/staging/ozwpan/ozmain.c
+++ b/drivers/staging/ozwpan/ozmain.c
@@ -10,32 +10,28 @@
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
-#include "ozconfig.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozcdev.h"
#include "oztrace.h"
-#include "ozevent.h"
/*------------------------------------------------------------------------------
* The name of the 802.11 mac device. Empty string is the default value but a
* value can be supplied as a parameter to the module. An empty string means
* bind to nothing. '*' means bind to all netcards - this includes non-802.11
* netcards. Bindings can be added later using an IOCTL.
*/
-static char *g_net_dev = "";
+char *g_net_dev = "";
/*------------------------------------------------------------------------------
* Context: process
*/
static int __init ozwpan_init(void)
{
- oz_event_init();
+ if (oz_protocol_init(g_net_dev))
+ return -1;
oz_cdev_register();
- oz_protocol_init(g_net_dev);
oz_app_enable(OZ_APPID_USB, 1);
oz_apps_init();
-#ifdef CONFIG_DEBUG_FS
- oz_debugfs_init();
-#endif
+ printk(KERN_DEBUG "p->oz_protocol_init = 0x%p\n", oz_protocol_init);
return 0;
}
/*------------------------------------------------------------------------------
@@ -46,10 +42,6 @@ static void __exit ozwpan_exit(void)
oz_protocol_term();
oz_apps_term();
oz_cdev_deregister();
- oz_event_term();
-#ifdef CONFIG_DEBUG_FS
- oz_debugfs_remove();
-#endif
}
/*------------------------------------------------------------------------------
*/
@@ -59,6 +51,6 @@ module_exit(ozwpan_exit);
MODULE_AUTHOR("Chris Kelly");
MODULE_DESCRIPTION("Ozmo Devices USB over WiFi hcd driver");
-MODULE_VERSION("1.0.13");
+MODULE_VERSION("10.00.01.02.05");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index f8b9da080c4b..ad4acb59e8d9 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -9,13 +9,11 @@
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/errno.h>
-#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "oztrace.h"
-#include "ozevent.h"
#include "ozcdev.h"
#include "ozusbsvc.h"
#include <asm/unaligned.h>
@@ -24,6 +22,7 @@
/*------------------------------------------------------------------------------
*/
#define OZ_MAX_TX_POOL_SIZE 6
+#define AC_VO 0x106
/*------------------------------------------------------------------------------
*/
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
@@ -82,6 +81,33 @@ static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
NULL,
NULL,
OZ_APPID_SERIAL},
+
+ {oz_def_app_init,
+ oz_def_app_term,
+ oz_def_app_start,
+ oz_def_app_stop,
+ oz_def_app_rx,
+ NULL,
+ NULL,
+ OZ_APPID_UNUSED3},
+
+ {oz_def_app_init,
+ oz_def_app_term,
+ oz_def_app_start,
+ oz_def_app_stop,
+ oz_def_app_rx,
+ NULL,
+ NULL,
+ OZ_APPID_UNUSED4},
+
+ {NULL,
+ NULL,
+ NULL,
+ NULL,
+ oz_cdev_rx,
+ NULL,
+ NULL,
+ OZ_APPID_TFTP},
};
/*------------------------------------------------------------------------------
* Context: process
@@ -121,23 +147,6 @@ static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
void oz_pd_set_state(struct oz_pd *pd, unsigned state)
{
pd->state = state;
- oz_event_log(OZ_EVT_PD_STATE, 0, 0, NULL, state);
-#ifdef WANT_TRACE
- switch (state) {
- case OZ_PD_S_IDLE:
- oz_trace("PD State: OZ_PD_S_IDLE\n");
- break;
- case OZ_PD_S_CONNECTED:
- oz_trace("PD State: OZ_PD_S_CONNECTED\n");
- break;
- case OZ_PD_S_STOPPED:
- oz_trace("PD State: OZ_PD_S_STOPPED\n");
- break;
- case OZ_PD_S_SLEEP:
- oz_trace("PD State: OZ_PD_S_SLEEP\n");
- break;
- }
-#endif /* WANT_TRACE */
}
/*------------------------------------------------------------------------------
* Context: softirq or process
@@ -162,6 +171,7 @@ struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
if (pd) {
int i;
+
atomic_set(&pd->ref_count, 2);
for (i = 0; i < OZ_APPID_MAX; i++)
spin_lock_init(&pd->app_lock[i]);
@@ -179,19 +189,28 @@ struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
pd->last_sent_frame = &pd->tx_queue;
spin_lock_init(&pd->stream_lock);
INIT_LIST_HEAD(&pd->stream_list);
+ tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
+ (unsigned long)pd);
+ tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
+ (unsigned long)pd);
+ hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pd->heartbeat.function = oz_pd_heartbeat_event;
+ pd->timeout.function = oz_pd_timeout_event;
}
return pd;
}
-/*------------------------------------------------------------------------------
- * Context: softirq or process
- */
-void oz_pd_destroy(struct oz_pd *pd)
+static void oz_pd_free(struct work_struct *work)
{
+ struct oz_pd *pd;
struct list_head *e;
struct oz_tx_frame *f;
struct oz_isoc_stream *st;
struct oz_farewell *fwell;
- oz_trace("Destroying PD\n");
+ pd = container_of(work, struct oz_pd, workitem);
+ oz_trace_msg(M, "Destroying PD:%p\n", pd);
+ tasklet_kill(&pd->heartbeat_tasklet);
+ tasklet_kill(&pd->timeout_tasklet);
/* Delete any streams.
*/
e = pd->stream_list.next;
@@ -226,10 +245,34 @@ void oz_pd_destroy(struct oz_pd *pd)
pd->tx_pool = e->next;
kfree(container_of(e, struct oz_tx_frame, link));
}
- if (pd->net_dev)
+ if (pd->net_dev) {
+ oz_trace_msg(M, "dev_put(%p)\n", pd->net_dev);
dev_put(pd->net_dev);
+ }
kfree(pd);
}
+
+
+/*------------------------------------------------------------------------------
+ * Context: softirq or Process
+ */
+void oz_pd_destroy(struct oz_pd *pd)
+{
+ int ret;
+
+ if (hrtimer_active(&pd->timeout))
+ hrtimer_cancel(&pd->timeout);
+ if (hrtimer_active(&pd->heartbeat))
+ hrtimer_cancel(&pd->heartbeat);
+
+ memset(&pd->workitem, 0, sizeof(pd->workitem));
+ INIT_WORK(&pd->workitem, oz_pd_free);
+ ret = schedule_work(&pd->workitem);
+
+ if (ret)
+ oz_trace("failed to schedule workitem\n");
+}
+
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
@@ -238,12 +281,12 @@ int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
const struct oz_app_if *ai;
int rc = 0;
oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
+ if (apps & (1<<OZ_APPID_TFTP))
+ apps |= 1<<OZ_APPID_SERIAL;
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
- if (ai->start(pd, resume)) {
+ if (ai->start && ai->start(pd, resume)) {
rc = -1;
- oz_trace("Unabled to start service %d\n",
- ai->app_id);
break;
}
oz_polling_lock_bh();
@@ -262,6 +305,8 @@ void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
{
const struct oz_app_if *ai;
oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
+ if (apps & (1<<OZ_APPID_TFTP))
+ apps |= 1<<OZ_APPID_SERIAL;
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
oz_polling_lock_bh();
@@ -272,7 +317,8 @@ void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
pd->paused_apps &= ~(1<<ai->app_id);
}
oz_polling_unlock_bh();
- ai->stop(pd, pause);
+ if (ai->stop)
+ ai->stop(pd, pause);
}
}
}
@@ -289,8 +335,8 @@ void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
more = 1;
}
}
- if (more)
- oz_pd_request_heartbeat(pd);
+ if ((!more) && (hrtimer_active(&pd->heartbeat)))
+ hrtimer_cancel(&pd->heartbeat);
if (pd->mode & OZ_F_ISOC_ANYTIME) {
int count = 8;
while (count-- && (oz_send_isoc_frame(pd) >= 0))
@@ -303,9 +349,9 @@ void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
void oz_pd_stop(struct oz_pd *pd)
{
u16 stop_apps = 0;
- oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
- oz_pd_indicate_farewells(pd);
+ oz_trace_msg(M, "oz_pd_stop() State = 0x%x\n", pd->state);
oz_polling_lock_bh();
+ oz_pd_indicate_farewells(pd);
stop_apps = pd->total_apps;
pd->total_apps = 0;
pd->paused_apps = 0;
@@ -315,9 +361,10 @@ void oz_pd_stop(struct oz_pd *pd)
oz_pd_set_state(pd, OZ_PD_S_STOPPED);
/* Remove from PD list.*/
list_del(&pd->link);
+
+
oz_polling_unlock_bh();
- oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
- oz_timer_delete(pd, 0);
+ oz_trace_msg(M, "pd ref count = %d\n", atomic_read(&pd->ref_count));
oz_pd_put(pd);
}
/*------------------------------------------------------------------------------
@@ -327,16 +374,22 @@ int oz_pd_sleep(struct oz_pd *pd)
{
int do_stop = 0;
u16 stop_apps = 0;
+ char mac_buf[20];
+ char *envp[2];
+
+ snprintf(mac_buf, sizeof(mac_buf), "ID_MAC=%pm", pd->mac_addr);
+ envp[0] = mac_buf;
+ envp[1] = NULL;
oz_polling_lock_bh();
if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
oz_polling_unlock_bh();
return 0;
}
- if (pd->keep_alive_j && pd->session_id) {
+ if (pd->keep_alive && pd->session_id) {
+ if (pd->keep_alive >= OZ_KALIVE_INFINITE)
+ oz_pd_indicate_farewells(pd);
oz_pd_set_state(pd, OZ_PD_S_SLEEP);
- pd->pulse_time_j = jiffies + pd->keep_alive_j;
- oz_trace("Sleep Now %lu until %lu\n",
- jiffies, pd->pulse_time_j);
+ kobject_uevent_env(&g_oz_wpan_dev->kobj, KOBJ_CHANGE, envp);
} else {
do_stop = 1;
}
@@ -346,7 +399,7 @@ int oz_pd_sleep(struct oz_pd *pd)
oz_pd_stop(pd);
} else {
oz_services_stop(pd, stop_apps, 1);
- oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
+ oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
}
return do_stop;
}
@@ -386,8 +439,6 @@ static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
} else {
kfree(f);
}
- oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
- pd->nb_queued_isoc_frames);
}
/*------------------------------------------------------------------------------
* Context: softirq or process
@@ -402,12 +453,13 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
f = NULL;
}
spin_unlock_bh(&pd->tx_frame_lock);
- kfree(f);
+ if (f)
+ kfree(f);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-static void oz_set_more_bit(struct sk_buff *skb)
+void oz_set_more_bit(struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
oz_hdr->control |= OZ_F_MORE_DATA;
@@ -415,7 +467,7 @@ static void oz_set_more_bit(struct sk_buff *skb)
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
+void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
@@ -472,6 +524,7 @@ static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
skb_reset_network_header(skb);
skb->dev = dev;
skb->protocol = htons(OZ_ETHERTYPE);
+ skb->priority = AC_VO;
if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
dev->dev_addr, skb->len) < 0)
goto fail;
@@ -541,21 +594,14 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
oz_set_last_pkt_nb(pd, skb);
if ((int)atomic_read(&g_submitted_isoc) <
OZ_MAX_SUBMITTED_ISOC) {
+ oz_trace_skb(skb, 'T');
if (dev_queue_xmit(skb) < 0) {
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Dropping ISOC Frame\n");
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
}
atomic_inc(&g_submitted_isoc);
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Sending ISOC Frame, nb_isoc= %d\n",
- pd->nb_queued_isoc_frames);
return 0;
} else {
kfree_skb(skb);
- oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
}
}
@@ -563,18 +609,14 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
pd->last_sent_frame = e;
skb = oz_build_frame(pd, f);
spin_unlock(&pd->tx_frame_lock);
+ if (skb == 0)
+ return -1;
+
if (more_data)
oz_set_more_bit(skb);
- oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
- if (skb) {
- oz_event_log(OZ_EVT_TX_FRAME,
- 0,
- (((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
- NULL, f->hdr.pkt_num);
- if (dev_queue_xmit(skb) < 0)
- return -1;
-
- }
+ oz_trace_skb(skb, 'T');
+ if (dev_queue_xmit(skb) < 0)
+ return -1;
return 0;
}
/*------------------------------------------------------------------------------
@@ -635,7 +677,6 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
return 0;
skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL) {
- oz_trace("Cannot alloc skb\n");
oz_elt_info_free_chain(&pd->elt_buff, &list);
return -1;
}
@@ -659,7 +700,7 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
memcpy(elt, ei->data, ei->length);
elt = oz_next_elt(elt);
}
- oz_event_log(OZ_EVT_TX_ISOC, 0, 0, NULL, 0);
+ oz_trace_skb(skb, 'T');
dev_queue_xmit(skb);
oz_elt_info_free_chain(&pd->elt_buff, &list);
return 0;
@@ -684,8 +725,6 @@ void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
break;
- oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
- pkt_num, pd->nb_queued_frames);
if (first == NULL)
first = e;
last = e;
@@ -736,7 +775,8 @@ int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
st = NULL;
}
spin_unlock_bh(&pd->stream_lock);
- kfree(st);
+ if (st)
+ kfree(st);
return 0;
}
/*------------------------------------------------------------------------------
@@ -744,7 +784,8 @@ int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
*/
static void oz_isoc_stream_free(struct oz_isoc_stream *st)
{
- kfree_skb(st->skb);
+ if (st->skb)
+ kfree_skb(st->skb);
kfree(st);
}
/*------------------------------------------------------------------------------
@@ -768,8 +809,6 @@ int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
static void oz_isoc_destructor(struct sk_buff *skb)
{
atomic_dec(&g_submitted_isoc);
- oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
- 0, skb, 0);
}
/*------------------------------------------------------------------------------
* Context: softirq
@@ -807,13 +846,14 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
skb->dev = dev;
skb->protocol = htons(OZ_ETHERTYPE);
/* For audio packet set priority to AC_VO */
- skb->priority = 0x7;
+ skb->priority = AC_VO;
size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
oz_hdr = (struct oz_hdr *)skb_put(skb, size);
}
memcpy(skb_put(skb, len), data, len);
size += len;
- if (++nb_units < pd->ms_per_isoc) {
+ if ((++nb_units < pd->ms_per_isoc)
+ && ((pd->max_tx_size - size) > len)) {
spin_lock_bh(&pd->stream_lock);
st->skb = skb;
st->nb_units = nb_units;
@@ -845,11 +885,19 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
struct oz_tx_frame *isoc_unit = NULL;
int nb = pd->nb_queued_isoc_frames;
+ struct list_head *e;
+ struct oz_tx_frame *f;
if (nb >= pd->isoc_latency) {
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Dropping ISOC Unit nb= %d\n",
- nb);
- goto out;
+ spin_lock(&pd->tx_frame_lock);
+ list_for_each(e, &pd->tx_queue) {
+ f = container_of(e, struct oz_tx_frame,
+ link);
+ if (f->skb != NULL) {
+ oz_tx_isoc_free(pd, f);
+ break;
+ }
+ }
+ spin_unlock(&pd->tx_frame_lock);
}
isoc_unit = oz_tx_frame_alloc(pd);
if (isoc_unit == NULL)
@@ -860,28 +908,19 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
list_add_tail(&isoc_unit->link, &pd->tx_queue);
pd->nb_queued_isoc_frames++;
spin_unlock_bh(&pd->tx_frame_lock);
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
- pd->nb_queued_isoc_frames, pd->nb_queued_frames);
- oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
- skb, atomic_read(&g_submitted_isoc));
return 0;
}
-
/*In ANYTIME mode Xmit unit immediately*/
if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
atomic_inc(&g_submitted_isoc);
- oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
- skb, atomic_read(&g_submitted_isoc));
+ oz_trace_skb(skb, 'T');
if (dev_queue_xmit(skb) < 0) {
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
return -1;
} else
return 0;
}
-out: oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
- kfree_skb(skb);
+out: kfree_skb(skb);
return -1;
}
@@ -926,18 +965,8 @@ void oz_pd_indicate_farewells(struct oz_pd *pd)
{
struct oz_farewell *f;
const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
- while (1) {
- oz_polling_lock_bh();
- if (list_empty(&pd->farewell_list)) {
- oz_polling_unlock_bh();
- break;
- }
- f = list_first_entry(&pd->farewell_list,
- struct oz_farewell, link);
- list_del(&f->link);
- oz_polling_unlock_bh();
+ list_for_each_entry(f, &pd->farewell_list, link) {
if (ai->farewell)
ai->farewell(pd, f->ep_num, f->report, f->len);
- kfree(f);
}
}
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
index fbf47cbab8a9..a4270fe69ee5 100644
--- a/drivers/staging/ozwpan/ozpd.h
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -6,6 +6,7 @@
#ifndef _OZPD_H_
#define _OZPD_H_
+#include <linux/interrupt.h>
#include "ozeltbuf.h"
/* PD state
@@ -21,6 +22,14 @@
#define OZ_TIMER_HEARTBEAT 2
#define OZ_TIMER_STOP 3
+
+/* Tasklet Scheduled flag.
+ */
+#define OZ_TASKLET_SCHED_TIMEOUT 0
+#define OZ_TASKLET_SCHED_HEARTBEAT 1
+
+
+#define ETH_STRING_LEN 17
/* Data structure that hold information on a frame for transmisson. This is
* built when the frame is first transmitted and is used to rebuild the frame
* if a re-transmission is required.
@@ -47,7 +56,7 @@ struct oz_farewell {
struct list_head link;
u8 ep_num;
u8 index;
- u8 report[1];
+ u8 report[32];
u8 len;
};
@@ -68,18 +77,16 @@ struct oz_pd {
u8 isoc_sent;
u32 last_rx_pkt_num;
u32 last_tx_pkt_num;
+ struct timespec last_rx_timestamp;
u32 trigger_pkt_num;
- unsigned long pulse_time_j;
- unsigned long timeout_time_j;
- unsigned long pulse_period_j;
- unsigned long presleep_j;
- unsigned long keep_alive_j;
- unsigned long last_rx_time_j;
+ unsigned long pulse_time;
+ ktime_t pulse_period;
+ unsigned long presleep;
+ unsigned long keep_alive;
struct oz_elt_buf elt_buff;
void *app_ctx[OZ_APPID_MAX];
spinlock_t app_lock[OZ_APPID_MAX];
int max_tx_size;
- u8 heartbeat_requested;
u8 mode;
u8 ms_per_isoc;
unsigned isoc_latency;
@@ -95,6 +102,14 @@ struct oz_pd {
spinlock_t stream_lock;
struct list_head stream_list;
struct net_device *net_dev;
+ struct hrtimer heartbeat;
+ struct hrtimer timeout;
+ u8 timeout_type;
+ struct tasklet_struct heartbeat_tasklet;
+ struct tasklet_struct timeout_tasklet;
+ unsigned long tasklet_sched;
+ struct work_struct workitem;
+ u8 up_audio_buf;
};
#define OZ_MAX_QUEUED_FRAMES 4
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index 3badf1537adb..1066c4e49ad7 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -10,7 +10,6 @@
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
-#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
@@ -18,7 +17,6 @@
#include "ozusbsvc.h"
#include "oztrace.h"
#include "ozappif.h"
-#include "ozevent.h"
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#include <net/psnap.h>
@@ -30,48 +28,32 @@
#define OZ_DO_STOP 1
#define OZ_DO_SLEEP 2
-/* States of the timer.
- */
-#define OZ_TIMER_IDLE 0
-#define OZ_TIMER_SET 1
-#define OZ_TIMER_IN_HANDLER 2
-
#define OZ_MAX_TIMER_POOL_SIZE 16
+/*------------------------------------------------------------------------------
+ * Number of units of buffering to capture for an isochronous IN endpoint before
+ * allowing data to be indicated up.
+ */
+#define OZ_IN_BUFFERING_UNITS 100
/*------------------------------------------------------------------------------
*/
struct oz_binding {
struct packet_type ptype;
char name[OZ_MAX_BINDING_LEN];
- struct oz_binding *next;
-};
-
-struct oz_timer {
struct list_head link;
- struct oz_pd *pd;
- unsigned long due_time;
- int type;
};
+
/*------------------------------------------------------------------------------
* Static external variables.
*/
static DEFINE_SPINLOCK(g_polling_lock);
static LIST_HEAD(g_pd_list);
-static struct oz_binding *g_binding ;
+static LIST_HEAD(g_binding);
static DEFINE_SPINLOCK(g_binding_lock);
static struct sk_buff_head g_rx_queue;
static u8 g_session_id;
static u16 g_apps = 0x1;
static int g_processing_rx;
-static struct timer_list g_timer;
-static struct oz_timer *g_cur_timer;
-static struct list_head *g_timer_pool;
-static int g_timer_pool_count;
-static int g_timer_state = OZ_TIMER_IDLE;
-static LIST_HEAD(g_timer_list);
-/*------------------------------------------------------------------------------
- */
-static void oz_protocol_timer_start(void);
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
@@ -114,9 +96,8 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
return;
}
oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
- oz_hdr->last_pkt_num = 0;
+ oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
put_unaligned(0, &oz_hdr->pkt_num);
- oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, NULL, 0);
elt->type = OZ_ELT_CONNECT_RSP;
elt->length = sizeof(struct oz_elt_connect_rsp);
memset(body, 0, sizeof(struct oz_elt_connect_rsp));
@@ -126,7 +107,7 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
body->session_id = pd->session_id;
put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
}
- oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
+ oz_trace_skb(skb, 'T');
dev_queue_xmit(skb);
return;
}
@@ -139,39 +120,41 @@ static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
switch (kalive & OZ_KALIVE_TYPE_MASK) {
case OZ_KALIVE_SPECIAL:
- pd->keep_alive_j =
- oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20);
+ pd->keep_alive = (keep_alive * OZ_KALIVE_INFINITE);
break;
case OZ_KALIVE_SECS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000);
+ pd->keep_alive = (keep_alive*1000);
break;
case OZ_KALIVE_MINS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60);
+ pd->keep_alive = (keep_alive*1000*60);
break;
case OZ_KALIVE_HOURS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60);
+ pd->keep_alive = (keep_alive*1000*60*60);
break;
default:
- pd->keep_alive_j = 0;
+ pd->keep_alive = 0;
}
- oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
+static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
{
if (presleep)
- pd->presleep_j = oz_ms_to_jiffies(presleep*100);
+ pd->presleep = presleep*100;
else
- pd->presleep_j = OZ_PRESLEEP_TOUT_J;
- oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
+ pd->presleep = OZ_PRESLEEP_TOUT;
+ if (start_timer) {
+ spin_unlock(&g_polling_lock);
+ oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
+ spin_lock(&g_polling_lock);
+ }
}
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
- const u8 *pd_addr, struct net_device *net_dev)
+ const u8 *pd_addr, struct net_device *net_dev, u32 pkt_num)
{
struct oz_pd *pd;
struct oz_elt_connect_req *body =
@@ -190,7 +173,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
pd = oz_pd_alloc(pd_addr);
if (pd == NULL)
return NULL;
- pd->last_rx_time_j = jiffies;
+ getnstimeofday(&pd->last_rx_timestamp);
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd2 = container_of(e, struct oz_pd, link);
@@ -209,13 +192,15 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
}
if (pd->net_dev != net_dev) {
old_net_dev = pd->net_dev;
+ oz_trace_msg(M, "dev_hold(%p)\n", net_dev);
dev_hold(net_dev);
pd->net_dev = net_dev;
}
- oz_trace("Host vendor: %d\n", body->host_vendor);
pd->max_tx_size = OZ_MAX_TX_SIZE;
pd->mode = body->mode;
pd->pd_info = body->pd_info;
+ pd->up_audio_buf = body->up_audio_buf > 0 ? body->up_audio_buf :
+ OZ_IN_BUFFERING_UNITS;
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
pd->ms_per_isoc = body->ms_per_isoc;
if (!pd->ms_per_isoc)
@@ -236,12 +221,10 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
}
if (body->max_len_div16)
pd->max_tx_size = ((u16)body->max_len_div16)<<4;
- oz_trace("Max frame:%u Ms per isoc:%u\n",
- pd->max_tx_size, pd->ms_per_isoc);
pd->max_stream_buffering = 3*1024;
- pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J;
- pd->pulse_period_j = OZ_QUANTUM_J;
- pd_set_presleep(pd, body->presleep);
+ pd->pulse_period = ktime_set(OZ_QUANTUM / MSEC_PER_SEC, (OZ_QUANTUM %
+ MSEC_PER_SEC) * NSEC_PER_MSEC);
+ pd_set_presleep(pd, body->presleep, 0);
pd_set_keepalive(pd, body->keep_alive);
new_apps &= le16_to_cpu(get_unaligned(&body->apps));
@@ -273,9 +256,6 @@ done:
u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
spin_unlock_bh(&g_polling_lock);
oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
- oz_timer_delete(pd, OZ_TIMER_STOP);
- oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
- new_apps, pd->total_apps, pd->paused_apps);
if (start_apps) {
if (oz_services_start(pd, start_apps, 0))
rsp_status = OZ_STATUS_TOO_MANY_PDS;
@@ -289,6 +269,11 @@ done:
} else {
spin_unlock_bh(&g_polling_lock);
}
+
+ /* CONNECT_REQ was sent without AR bit,
+ but firmware does check LPN field to identify correcponding
+ CONNECT_RSP field. */
+ pd->trigger_pkt_num = pkt_num;
oz_send_conn_rsp(pd, rsp_status);
if (rsp_status != OZ_STATUS_SUCCESS) {
if (stop_needed)
@@ -296,8 +281,10 @@ done:
oz_pd_put(pd);
pd = NULL;
}
- if (old_net_dev)
+ if (old_net_dev) {
+ oz_trace_msg(M, "dev_put(%p)", old_net_dev);
dev_put(old_net_dev);
+ }
if (free_pd)
oz_pd_destroy(free_pd);
return pd;
@@ -316,6 +303,7 @@ static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
return;
f->ep_num = ep_num;
f->index = index;
+ f->len = len;
memcpy(f->report, report, len);
oz_trace("RX: Adding farewell report\n");
spin_lock(&g_polling_lock);
@@ -342,15 +330,11 @@ static void oz_rx_frame(struct sk_buff *skb)
int length;
struct oz_pd *pd = NULL;
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+ struct timespec current_time;
int dup = 0;
u32 pkt_num;
- oz_event_log(OZ_EVT_RX_PROCESS, 0,
- (((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,
- NULL, oz_hdr->pkt_num);
- oz_trace2(OZ_TRACE_RX_FRAMES,
- "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
- oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
+ oz_trace_skb(skb, 'R');
mac_hdr = skb_mac_header(skb);
src_addr = &mac_hdr[ETH_ALEN] ;
length = skb->len;
@@ -362,23 +346,36 @@ static void oz_rx_frame(struct sk_buff *skb)
goto done;
}
+
pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
pd = oz_pd_find(src_addr);
if (pd) {
- pd->last_rx_time_j = jiffies;
- oz_timer_add(pd, OZ_TIMER_TOUT,
- pd->last_rx_time_j + pd->presleep_j, 1);
+ if (!(pd->state & OZ_PD_S_CONNECTED)) {
+ char mac_buf[20];
+ char *envp[2];
+ snprintf(mac_buf, sizeof(mac_buf), "ID_MAC=%pm",
+ pd->mac_addr);
+ envp[0] = mac_buf;
+ envp[1] = NULL;
+ oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
+ kobject_uevent_env(&g_oz_wpan_dev->kobj, KOBJ_CHANGE,
+ envp);
+ }
+ getnstimeofday(&current_time);
+ if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
+ (pd->presleep < MSEC_PER_SEC)) {
+ oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
+ pd->last_rx_timestamp = current_time;
+ }
if (pkt_num != pd->last_rx_pkt_num) {
pd->last_rx_pkt_num = pkt_num;
} else {
dup = 1;
- oz_trace("Duplicate frame\n");
}
}
if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
- oz_trace2(OZ_TRACE_RX_FRAMES, "Received TRIGGER Frame\n");
pd->last_sent_frame = &pd->tx_queue;
if (oz_hdr->control & OZ_F_ACK) {
/* Retire completed frames */
@@ -396,29 +393,26 @@ static void oz_rx_frame(struct sk_buff *skb)
length -= sizeof(struct oz_hdr);
elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
- while (length >= sizeof(struct oz_elt)) {
- length -= sizeof(struct oz_elt) + elt->length;
+ while (length >= oz_elt_hdr_len(elt)) {
+ length -= oz_elt_len(elt);
if (length < 0)
break;
switch (elt->type) {
case OZ_ELT_CONNECT_REQ:
- oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, NULL, 0);
- oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
- pd = oz_connect_req(pd, elt, src_addr, skb->dev);
+ pd = oz_connect_req(pd, elt, src_addr, skb->dev,
+ pkt_num);
break;
case OZ_ELT_DISCONNECT:
- oz_trace("RX: OZ_ELT_DISCONNECT\n");
if (pd)
oz_pd_sleep(pd);
break;
case OZ_ELT_UPDATE_PARAM_REQ: {
struct oz_elt_update_param *body =
(struct oz_elt_update_param *)(elt + 1);
- oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
spin_lock(&g_polling_lock);
pd_set_keepalive(pd, body->keepalive);
- pd_set_presleep(pd, body->presleep);
+ pd_set_presleep(pd, body->presleep, 1);
spin_unlock(&g_polling_lock);
}
}
@@ -426,16 +420,16 @@ static void oz_rx_frame(struct sk_buff *skb)
case OZ_ELT_FAREWELL_REQ: {
struct oz_elt_farewell *body =
(struct oz_elt_farewell *)(elt + 1);
- oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
oz_add_farewell(pd, body->ep_num,
body->index, body->report,
elt->length + 1 - sizeof(*body));
}
break;
case OZ_ELT_APP_DATA:
+ case OZ_ELT_APP_DATA_EX:
if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
struct oz_app_hdr *app_hdr =
- (struct oz_app_hdr *)(elt+1);
+ (struct oz_app_hdr *)(oz_elt_data(elt));
if (dup)
break;
oz_handle_app_elt(pd, app_hdr->app_id, elt);
@@ -451,23 +445,44 @@ done:
oz_pd_put(pd);
consume_skb(skb);
}
+
+static int oz_net_notifier(struct notifier_block *nb, unsigned long event,
+ void *ndev)
+{
+ struct net_device *dev = ndev;
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ case NETDEV_DOWN:
+ oz_trace_msg(M, "%s: event %s\n", __func__,
+ (event == NETDEV_UNREGISTER) ?
+ "NETDEV_UNREGISTER" : "NETDEV_DOWN");
+ oz_binding_remove(dev->name);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nb_oz_net_notifier = {
+ .notifier_call = oz_net_notifier
+};
/*------------------------------------------------------------------------------
* Context: process
*/
void oz_protocol_term(void)
{
- struct list_head *chain;
- del_timer_sync(&g_timer);
+ struct oz_binding *b, *t;
+
/* Walk the list of bindings and remove each one.
*/
spin_lock_bh(&g_binding_lock);
- while (g_binding) {
- struct oz_binding *b = g_binding;
- g_binding = b->next;
+ list_for_each_entry_safe(b, t, &g_binding, link) {
+ list_del(&b->link);
spin_unlock_bh(&g_binding_lock);
dev_remove_pack(&b->ptype);
- if (b->ptype.dev)
+ if (b->ptype.dev) {
+ oz_trace_msg(M, "dev_put(%p)\n", b->ptype.dev);
dev_put(b->ptype.dev);
+ }
kfree(b);
spin_lock_bh(&g_binding_lock);
}
@@ -486,250 +501,128 @@ void oz_protocol_term(void)
oz_pd_put(pd);
spin_lock_bh(&g_polling_lock);
}
- chain = g_timer_pool;
- g_timer_pool = NULL;
spin_unlock_bh(&g_polling_lock);
- while (chain) {
- struct oz_timer *t = container_of(chain, struct oz_timer, link);
- chain = chain->next;
- kfree(t);
- }
+ unregister_netdevice_notifier(&nb_oz_net_notifier);
oz_trace("Protocol stopped\n");
}
/*------------------------------------------------------------------------------
* Context: softirq
*/
-static void oz_pd_handle_timer(struct oz_pd *pd, int type)
+void oz_pd_heartbeat_handler(unsigned long data)
{
+ struct oz_pd *pd = (struct oz_pd *)data;
+ u16 apps = 0;
+ spin_lock_bh(&g_polling_lock);
+ if (pd->state & OZ_PD_S_CONNECTED)
+ apps = pd->total_apps;
+ spin_unlock_bh(&g_polling_lock);
+ if (apps)
+ oz_pd_heartbeat(pd, apps);
+ clear_bit(OZ_TASKLET_SCHED_HEARTBEAT, &pd->tasklet_sched);
+ oz_pd_put(pd);
+}
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+void oz_pd_timeout_handler(unsigned long data)
+{
+ int type;
+ struct oz_pd *pd = (struct oz_pd *)data;
+
+ spin_lock_bh(&g_polling_lock);
+ type = pd->timeout_type;
+ spin_unlock_bh(&g_polling_lock);
switch (type) {
case OZ_TIMER_TOUT:
+ oz_trace_msg(M, "OZ_TIMER_TOUT:\n");
oz_pd_sleep(pd);
break;
case OZ_TIMER_STOP:
+ oz_trace_msg(M, "OZ_TIMER_STOP:\n");
oz_pd_stop(pd);
break;
- case OZ_TIMER_HEARTBEAT: {
- u16 apps = 0;
- spin_lock_bh(&g_polling_lock);
- pd->heartbeat_requested = 0;
- if (pd->state & OZ_PD_S_CONNECTED)
- apps = pd->total_apps;
- spin_unlock_bh(&g_polling_lock);
- if (apps)
- oz_pd_heartbeat(pd, apps);
- }
- break;
}
+ clear_bit(OZ_TASKLET_SCHED_TIMEOUT, &pd->tasklet_sched);
+ oz_pd_put(pd);
}
/*------------------------------------------------------------------------------
- * Context: softirq
+ * Context: Interrupt
*/
-static void oz_protocol_timer(unsigned long arg)
+enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
{
- struct oz_timer *t;
- struct oz_timer *t2;
struct oz_pd *pd;
- spin_lock_bh(&g_polling_lock);
- if (!g_cur_timer) {
- /* This happens if we remove the current timer but can't stop
- * the timer from firing. In this case just get out.
+
+ pd = container_of(timer, struct oz_pd, heartbeat);
+ hrtimer_forward(timer,
+ hrtimer_get_expires(timer), pd->pulse_period);
+ oz_pd_get(pd);
+ if (!test_and_set_bit(OZ_TASKLET_SCHED_HEARTBEAT, &pd->tasklet_sched)) {
+ /* schedule tasklet! */
+ tasklet_schedule(&pd->heartbeat_tasklet);
+ } else {
+ /* oz_pd_heartbeat_handler is already scheduled or running.
+ * decrement pd counter.
*/
- oz_event_log(OZ_EVT_TIMER, 0, 0, NULL, 0);
- spin_unlock_bh(&g_polling_lock);
- return;
- }
- g_timer_state = OZ_TIMER_IN_HANDLER;
- t = g_cur_timer;
- g_cur_timer = NULL;
- list_del(&t->link);
- spin_unlock_bh(&g_polling_lock);
- do {
- pd = t->pd;
- oz_event_log(OZ_EVT_TIMER, 0, t->type, NULL, 0);
- oz_pd_handle_timer(pd, t->type);
- spin_lock_bh(&g_polling_lock);
- if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
- t->link.next = g_timer_pool;
- g_timer_pool = &t->link;
- g_timer_pool_count++;
- t = NULL;
- }
- if (!list_empty(&g_timer_list)) {
- t2 = container_of(g_timer_list.next,
- struct oz_timer, link);
- if (time_before_eq(t2->due_time, jiffies))
- list_del(&t2->link);
- else
- t2 = NULL;
- } else {
- t2 = NULL;
- }
- spin_unlock_bh(&g_polling_lock);
oz_pd_put(pd);
- kfree(t);
- t = t2;
- } while (t);
- g_timer_state = OZ_TIMER_IDLE;
- oz_protocol_timer_start();
+ }
+ return HRTIMER_RESTART;
}
/*------------------------------------------------------------------------------
- * Context: softirq
+ * Context: Interrupt
*/
-static void oz_protocol_timer_start(void)
+enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
{
- spin_lock_bh(&g_polling_lock);
- if (!list_empty(&g_timer_list)) {
- g_cur_timer =
- container_of(g_timer_list.next, struct oz_timer, link);
- if (g_timer_state == OZ_TIMER_SET) {
- oz_event_log(OZ_EVT_TIMER_CTRL, 3,
- (u16)g_cur_timer->type, NULL,
- (unsigned)g_cur_timer->due_time);
- mod_timer(&g_timer, g_cur_timer->due_time);
- } else {
- oz_event_log(OZ_EVT_TIMER_CTRL, 4,
- (u16)g_cur_timer->type, NULL,
- (unsigned)g_cur_timer->due_time);
- g_timer.expires = g_cur_timer->due_time;
- g_timer.function = oz_protocol_timer;
- g_timer.data = 0;
- add_timer(&g_timer);
- }
- g_timer_state = OZ_TIMER_SET;
+ struct oz_pd *pd;
+
+ pd = container_of(timer, struct oz_pd, timeout);
+ oz_pd_get(pd);
+ if (!test_and_set_bit(OZ_TASKLET_SCHED_TIMEOUT, &pd->tasklet_sched)) {
+ /* Schedule tasklet! */
+ tasklet_schedule(&pd->timeout_tasklet);
} else {
- oz_trace("No queued timers\n");
+ /* oz_pd_timeout_handler is already scheduled or running.
+ * decrement pd counter.
+ */
+ oz_pd_put(pd);
}
- spin_unlock_bh(&g_polling_lock);
+ return HRTIMER_NORESTART;
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
- int remove)
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
{
- struct list_head *e;
- struct oz_timer *t = NULL;
- int restart_needed = 0;
- oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, NULL, (unsigned)due_time);
- spin_lock(&g_polling_lock);
- if (remove) {
- list_for_each(e, &g_timer_list) {
- t = container_of(e, struct oz_timer, link);
- if ((t->pd == pd) && (t->type == type)) {
- if (g_cur_timer == t) {
- restart_needed = 1;
- g_cur_timer = NULL;
- }
- list_del(e);
- break;
- }
- t = NULL;
- }
- }
- if (!t) {
- if (g_timer_pool) {
- t = container_of(g_timer_pool, struct oz_timer, link);
- g_timer_pool = g_timer_pool->next;
- g_timer_pool_count--;
+ spin_lock_bh(&g_polling_lock);
+ switch (type) {
+ case OZ_TIMER_TOUT:
+ case OZ_TIMER_STOP:
+ if (hrtimer_active(&pd->timeout)) {
+ hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC));
+ hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
} else {
- t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC);
- }
- if (t) {
- t->pd = pd;
- t->type = type;
- oz_pd_get(pd);
- }
- }
- if (t) {
- struct oz_timer *t2;
- t->due_time = due_time;
- list_for_each(e, &g_timer_list) {
- t2 = container_of(e, struct oz_timer, link);
- if (time_before(due_time, t2->due_time)) {
- if (t2 == g_cur_timer) {
- g_cur_timer = NULL;
- restart_needed = 1;
- }
- break;
- }
- }
- list_add_tail(&t->link, e);
- }
- if (g_timer_state == OZ_TIMER_IDLE)
- restart_needed = 1;
- else if (g_timer_state == OZ_TIMER_IN_HANDLER)
- restart_needed = 0;
- spin_unlock(&g_polling_lock);
- if (restart_needed)
- oz_protocol_timer_start();
-}
-/*------------------------------------------------------------------------------
- * Context: softirq or process
- */
-void oz_timer_delete(struct oz_pd *pd, int type)
-{
- struct list_head *chain = NULL;
- struct oz_timer *t;
- struct oz_timer *n;
- int restart_needed = 0;
- int release = 0;
- oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, NULL, 0);
- spin_lock(&g_polling_lock);
- list_for_each_entry_safe(t, n, &g_timer_list, link) {
- if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
- if (g_cur_timer == t) {
- restart_needed = 1;
- g_cur_timer = NULL;
- del_timer(&g_timer);
- }
- list_del(&t->link);
- release++;
- if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
- t->link.next = g_timer_pool;
- g_timer_pool = &t->link;
- g_timer_pool_count++;
- } else {
- t->link.next = chain;
- chain = &t->link;
- }
- if (type)
- break;
+ hrtimer_start(&pd->timeout, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC), HRTIMER_MODE_REL);
}
+ pd->timeout_type = type;
+ break;
+ case OZ_TIMER_HEARTBEAT:
+ if (!hrtimer_active(&pd->heartbeat))
+ hrtimer_start(&pd->heartbeat, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC), HRTIMER_MODE_REL);
+ break;
}
- if (g_timer_state == OZ_TIMER_IN_HANDLER)
- restart_needed = 0;
- else if (restart_needed)
- g_timer_state = OZ_TIMER_IDLE;
- spin_unlock(&g_polling_lock);
- if (restart_needed)
- oz_protocol_timer_start();
- while (release--)
- oz_pd_put(pd);
- while (chain) {
- t = container_of(chain, struct oz_timer, link);
- chain = chain->next;
- kfree(t);
- }
+ spin_unlock_bh(&g_polling_lock);
}
/*------------------------------------------------------------------------------
* Context: softirq or process
*/
void oz_pd_request_heartbeat(struct oz_pd *pd)
{
- unsigned long now = jiffies;
- unsigned long t;
- spin_lock(&g_polling_lock);
- if (pd->heartbeat_requested) {
- spin_unlock(&g_polling_lock);
- return;
- }
- if (pd->pulse_period_j)
- t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
- else
- t = now + 1;
- pd->heartbeat_requested = 1;
- spin_unlock(&g_polling_lock);
- oz_timer_add(pd, OZ_TIMER_HEARTBEAT, t, 0);
+ oz_timer_add(pd, OZ_TIMER_HEARTBEAT, OZ_QUANTUM);
}
/*------------------------------------------------------------------------------
* Context: softirq or process
@@ -748,7 +641,7 @@ struct oz_pd *oz_pd_find(const u8 *mac_addr)
}
}
spin_unlock_bh(&g_polling_lock);
- return NULL;
+ return 0;
}
/*------------------------------------------------------------------------------
* Context: process
@@ -770,7 +663,6 @@ void oz_app_enable(int app_id, int enable)
static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- oz_event_log(OZ_EVT_RX_FRAME, 0, 0, NULL, 0);
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
return 0;
@@ -802,7 +694,7 @@ static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
/*------------------------------------------------------------------------------
* Context: process
*/
-void oz_binding_add(char *net_dev)
+void oz_binding_add(const char *net_dev)
{
struct oz_binding *binding;
@@ -812,23 +704,23 @@ void oz_binding_add(char *net_dev)
binding->ptype.func = oz_pkt_recv;
memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
if (net_dev && *net_dev) {
- oz_trace("Adding binding: %s\n", net_dev);
+ oz_trace_msg(M, "Adding binding: '%s'\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
if (binding->ptype.dev == NULL) {
- oz_trace("Netdev %s not found\n", net_dev);
+ oz_trace_msg(M, "Netdev '%s' not found\n",
+ net_dev);
kfree(binding);
binding = NULL;
}
} else {
- oz_trace("Binding to all netcards\n");
+ oz_trace_msg(M, "Binding to all netcards\n");
binding->ptype.dev = NULL;
}
if (binding) {
dev_add_pack(&binding->ptype);
spin_lock_bh(&g_binding_lock);
- binding->next = g_binding;
- g_binding = binding;
+ list_add_tail(&binding->link, &g_binding);
spin_unlock_bh(&g_binding_lock);
}
}
@@ -836,7 +728,7 @@ void oz_binding_add(char *net_dev)
/*------------------------------------------------------------------------------
* Context: process
*/
-static int compare_binding_name(char *s1, char *s2)
+static int compare_binding_name(const char *s1, const char *s2)
{
int i;
for (i = 0; i < OZ_MAX_BINDING_LEN; i++) {
@@ -874,28 +766,26 @@ static void pd_stop_all_for_device(struct net_device *net_dev)
/*------------------------------------------------------------------------------
* Context: process
*/
-void oz_binding_remove(char *net_dev)
+void oz_binding_remove(const char *net_dev)
{
- struct oz_binding *binding;
- struct oz_binding **link;
- oz_trace("Removing binding: %s\n", net_dev);
+ struct oz_binding *binding, *tmp;
+ int found = 0;
+
+ oz_trace_msg(M, "Removing binding: '%s'\n", net_dev);
spin_lock_bh(&g_binding_lock);
- binding = g_binding;
- link = &g_binding;
- while (binding) {
+ list_for_each_entry_safe(binding, tmp, &g_binding, link) {
if (compare_binding_name(binding->name, net_dev)) {
- oz_trace("Binding '%s' found\n", net_dev);
- *link = binding->next;
+ oz_trace_msg(M, "Binding '%s' found\n", net_dev);
+ list_del(&binding->link);
+ found = 1;
break;
- } else {
- link = &binding;
- binding = binding->next;
}
}
spin_unlock_bh(&g_binding_lock);
- if (binding) {
+ if (found) {
dev_remove_pack(&binding->ptype);
if (binding->ptype.dev) {
+ oz_trace_msg(M, "dev_put(%s)\n", binding->name);
dev_put(binding->ptype.dev);
pd_stop_all_for_device(binding->ptype.dev);
}
@@ -905,6 +795,24 @@ void oz_binding_remove(char *net_dev)
/*------------------------------------------------------------------------------
* Context: process
*/
+int oz_get_binding_list(char *buf, int max_if)
+{
+ struct oz_binding *binding = 0;
+ int count = 0;
+
+ spin_lock_bh(&g_binding_lock);
+ list_for_each_entry(binding, &g_binding, link) {
+ if (count++ > max_if)
+ break;
+ memcpy(buf, binding->name, OZ_MAX_BINDING_LEN);
+ buf += OZ_MAX_BINDING_LEN;
+ }
+ spin_unlock_bh(&g_binding_lock);
+ return count;
+}
+/*------------------------------------------------------------------------------
+ * Context: process
+ */
static char *oz_get_next_device_name(char *s, char *dname, int max_size)
{
while (*s == ',')
@@ -923,16 +831,21 @@ int oz_protocol_init(char *devs)
{
skb_queue_head_init(&g_rx_queue);
if (devs && (devs[0] == '*')) {
- oz_binding_add(NULL);
+ return -1;
} else {
char d[32];
+ int err = 0;
+ err = register_netdevice_notifier(&nb_oz_net_notifier);
+ if (err) {
+ oz_trace("notifier registration failed. err %d\n", err);
+ return -1;
+ }
while (*devs) {
devs = oz_get_next_device_name(devs, d, sizeof(d));
if (d[0])
oz_binding_add(d);
}
}
- init_timer(&g_timer);
return 0;
}
/*------------------------------------------------------------------------------
@@ -954,6 +867,32 @@ int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
return count;
}
/*------------------------------------------------------------------------------
+ * Context: process
+ */
+int oz_get_pd_status_list(u8 *pd_list, int max_count)
+{
+ struct oz_pd *pd;
+ struct list_head *e;
+ int count = 0;
+
+ spin_lock_bh(&g_polling_lock);
+ list_for_each(e, &g_pd_list) {
+ if (count >= max_count)
+ break;
+ pd = container_of(e, struct oz_pd, link);
+ if (pd_list) {
+ memcpy(&pd_list[count * (ETH_ALEN + sizeof(pd->state))],
+ pd->mac_addr, ETH_ALEN);
+ memcpy(&pd_list[(count * (ETH_ALEN + sizeof(pd->state)))
+ + ETH_ALEN],
+ &pd->state, sizeof(pd->state));
+ count++;
+ }
+ }
+ spin_unlock_bh(&g_polling_lock);
+ return count;
+}
+/*------------------------------------------------------------------------------
*/
void oz_polling_lock_bh(void)
{
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
index 93bb4c0172e0..64ac30c7fb54 100644
--- a/drivers/staging/ozwpan/ozproto.h
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -7,28 +7,19 @@
#define _OZPROTO_H
#include <asm/byteorder.h>
-#include "ozconfig.h"
#include "ozappif.h"
#define OZ_ALLOCATED_SPACE(__x) (LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom)
-/* Converts millisecs to jiffies.
- */
-#define oz_ms_to_jiffies(__x) msecs_to_jiffies(__x)
-/* Quantum milliseconds.
- */
-#define OZ_QUANTUM_MS 8
-/* Quantum jiffies
- */
-#define OZ_QUANTUM_J (oz_ms_to_jiffies(OZ_QUANTUM_MS))
+/* Quantum in MS */
+#define OZ_QUANTUM 8
/* Default timeouts.
*/
-#define OZ_CONNECTION_TOUT_J (2*HZ)
-#define OZ_PRESLEEP_TOUT_J (11*HZ)
+#define OZ_PRESLEEP_TOUT 11
/* Maximun sizes of tx frames. */
-#define OZ_MAX_TX_SIZE 1514
+#define OZ_MAX_TX_SIZE 760
/* Maximum number of uncompleted isoc frames that can be pending in network. */
#define OZ_MAX_SUBMITTED_ISOC 16
@@ -63,13 +54,17 @@ void oz_protocol_term(void);
int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
void oz_app_enable(int app_id, int enable);
struct oz_pd *oz_pd_find(const u8 *mac_addr);
-void oz_binding_add(char *net_dev);
-void oz_binding_remove(char *net_dev);
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
- int remove);
+void oz_binding_add(const char *net_dev);
+void oz_binding_remove(const char *net_dev);
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time);
void oz_timer_delete(struct oz_pd *pd, int type);
void oz_pd_request_heartbeat(struct oz_pd *pd);
void oz_polling_lock_bh(void);
void oz_polling_unlock_bh(void);
-
+void oz_pd_heartbeat_handler(unsigned long data);
+void oz_pd_timeout_handler(unsigned long data);
+enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer);
+enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer);
+int oz_get_pd_status_list(u8 *pd_list, int max_count);
+int oz_get_binding_list(char *buf, int max_if);
#endif /* _OZPROTO_H */
diff --git a/drivers/staging/ozwpan/ozprotocol.h b/drivers/staging/ozwpan/ozprotocol.h
index 17b09b9a5b08..5cb989c90a58 100644
--- a/drivers/staging/ozwpan/ozprotocol.h
+++ b/drivers/staging/ozwpan/ozprotocol.h
@@ -27,17 +27,51 @@ struct oz_elt {
u8 length;
} PACKED;
-#define oz_next_elt(__elt) \
- (struct oz_elt *)((u8 *)((__elt) + 1) + (__elt)->length)
+/* This is an extended element header.
+ */
+struct oz_ext_elt {
+ u8 type;
+ u16 length;
+} PACKED;
+
+
+
+#define oz_is_ext_elt(__elt) \
+ (((struct oz_elt *)(__elt))->type >= OZ_ELT_EXTENDED)
+
+#define oz_elt_hdr_len(__elt) \
+ (int)(oz_is_ext_elt(__elt) ? \
+ (sizeof(struct oz_ext_elt)) \
+ : (sizeof(struct oz_elt)))
+
+#define oz_elt_data_len(__elt) \
+ (int)(oz_is_ext_elt(__elt) ? \
+ (le16_to_cpu((((struct oz_ext_elt *)(__elt))->length))) \
+ : (__elt)->length)
+
+#define oz_elt_len(__elt) \
+ (oz_elt_hdr_len(__elt) + oz_elt_data_len(__elt))
+
+#define oz_elt_data(__elt) \
+ ((u8 *)(((u8 *)(__elt)) + oz_elt_hdr_len(__elt)))
+
+
+#define oz_next_elt(__elt) \
+ (struct oz_elt *)((u8 *)(__elt) + oz_elt_len(__elt))
+
/* Protocol element IDs.
*/
+#define OZ_ELT_EXTENDED 0xC0
+#define OZ_ELT_ID_MASK 0x3F
#define OZ_ELT_CONNECT_REQ 0x06
#define OZ_ELT_CONNECT_RSP 0x07
#define OZ_ELT_DISCONNECT 0x08
#define OZ_ELT_UPDATE_PARAM_REQ 0x11
#define OZ_ELT_FAREWELL_REQ 0x12
#define OZ_ELT_APP_DATA 0x31
+#define OZ_ELT_APP_DATA_EX (OZ_ELT_EXTENDED|OZ_ELT_APP_DATA)
+
/* This is the Ozmo header which is the first Ozmo specific part
* of a frame and comes after the MAC header.
@@ -83,7 +117,8 @@ struct oz_elt_connect_req {
u16 apps;
u8 max_len_div16;
u8 ms_per_isoc;
- u8 resv3[2];
+ u8 up_audio_buf;
+ u8 ms_per_elt;
} PACKED;
/* mode field bits.
@@ -104,6 +139,8 @@ struct oz_elt_connect_req {
#define OZ_KALIVE_MINS 0x80
#define OZ_KALIVE_HOURS 0xc0
+#define OZ_KALIVE_INFINITE (1000*60*60*24*20)
+
/* Connect response data structure.
*/
struct oz_elt_connect_rsp {
@@ -142,7 +179,10 @@ struct oz_app_hdr {
#define OZ_APPID_UNUSED1 0x2
#define OZ_APPID_UNUSED2 0x3
#define OZ_APPID_SERIAL 0x4
-#define OZ_APPID_MAX OZ_APPID_SERIAL
+#define OZ_APPID_UNUSED3 0x5
+#define OZ_APPID_UNUSED4 0x6
+#define OZ_APPID_TFTP 0x7
+#define OZ_APPID_MAX OZ_APPID_TFTP
#define OZ_NB_APPS (OZ_APPID_MAX+1)
/* USB header common to all elements for the USB application.
diff --git a/drivers/staging/ozwpan/oztrace.c b/drivers/staging/ozwpan/oztrace.c
index 353ead24fd7d..4503c5dfa091 100644
--- a/drivers/staging/ozwpan/oztrace.c
+++ b/drivers/staging/ozwpan/oztrace.c
@@ -3,34 +3,157 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
-#include "ozconfig.h"
#include "oztrace.h"
+#define CREATE_TRACE_POINTS
+#include "ozeventtrace.h"
-#ifdef WANT_VERBOSE_TRACE
-unsigned long trace_flags =
- 0
-#ifdef WANT_TRACE_STREAM
- | OZ_TRACE_STREAM
-#endif /* WANT_TRACE_STREAM */
-#ifdef WANT_TRACE_URB
- | OZ_TRACE_URB
-#endif /* WANT_TRACE_URB */
-
-#ifdef WANT_TRACE_CTRL_DETAIL
- | OZ_TRACE_CTRL_DETAIL
-#endif /* WANT_TRACE_CTRL_DETAIL */
-
-#ifdef WANT_TRACE_HUB
- | OZ_TRACE_HUB
-#endif /* WANT_TRACE_HUB */
-
-#ifdef WANT_TRACE_RX_FRAMES
- | OZ_TRACE_RX_FRAMES
-#endif /* WANT_TRACE_RX_FRAMES */
-
-#ifdef WANT_TRACE_TX_FRAMES
- | OZ_TRACE_TX_FRAMES
-#endif /* WANT_TRACE_TX_FRAMES */
- ;
-#endif /* WANT_VERBOSE_TRACE */
+#define OZ_TRACE_DUMP_SKB_LEN_MAX 32
+#define OZ_TRACE_DUMP_URB_LEN_MAX 16
+u32 g_debug =
+#ifdef WANT_TRACE_DATA_FLOW
+ TRC_FLG(M)|TRC_FLG(R)|TRC_FLG(T)|
+ TRC_FLG(S)|TRC_FLG(E)|TRC_FLG(C);
+#else
+ 0;
+#endif
+
+void (*func[]) (char *fmt, va_list arg) = {
+ trace_hcd_msg_evt,
+ trace_isoc_msg_evt,
+ trace_info_msg_evt
+};
+
+void oz_dump_data(char *buf, unsigned char *data, int len, int lmt)
+{
+ int i = 0;
+ if (len > lmt)
+ len = lmt;
+ while (len--) {
+ *buf = (*data>>4) + '0';
+ if (*data > (0xA0-1))
+ *buf += 'A' - '9' - 1;
+ *++buf = (*data++&0xF) + '0';
+ if (*buf > '9')
+ *buf += 'A' - '9' - 1;
+ if (buf++ && !(++i%4))
+ *buf++ = ' ';
+ }
+ *buf++ = '\n';
+ *buf = 0;
+}
+
+void oz_trace_f_urb_in(struct urb *urb)
+{
+ int i = 0;
+ char buf[128*2];
+ int endpoint = usb_pipeendpoint(urb->pipe);
+
+ if (usb_pipein(urb->pipe))
+ endpoint |= 0x80;
+
+ if (endpoint == 0x00 || endpoint == 0x80) {
+ i += sprintf(&buf[i], "OZ S %08X %02X %02X ",
+ (unsigned int)((uintptr_t)urb), endpoint,
+ urb->transfer_buffer_length);
+
+ oz_dump_data(&buf[i], urb->setup_packet, 8, 8);
+
+ } else {
+ i += sprintf(&buf[i], "OZ S %08X %02X %02X ",
+ (unsigned int)((uintptr_t)urb), endpoint,
+ urb->transfer_buffer_length);
+ if (!usb_pipein(urb->pipe)) {
+ oz_dump_data(&buf[i], (u8 *)(urb->transfer_buffer),
+ urb->transfer_buffer_length,
+ OZ_TRACE_DUMP_URB_LEN_MAX);
+
+ } else {
+ oz_dump_data(&buf[i], NULL, 0, 0);
+ }
+
+ }
+ printk(buf);
+}
+
+void oz_trace_f_urb_out(struct urb *urb, int status)
+{
+ int i = 0;
+ char buf[128*2];
+ int endpoint = usb_pipeendpoint(urb->pipe);
+ int length = urb->actual_length;
+
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+
+ if (usb_pipein(urb->pipe))
+ endpoint |= 0x80;
+
+ if (status != 0) {
+ printk("OZ E %08X %08X\n",
+ (unsigned int)((uintptr_t)urb), status);
+ } else {
+ i += sprintf(&buf[i], "OZ C %08X %02X %02X ",
+ (unsigned int)((uintptr_t)urb),
+ endpoint, urb->actual_length);
+
+ if (usb_pipein(urb->pipe)) {
+ oz_dump_data(&buf[i],
+ (u8 *)(urb->transfer_buffer),
+ urb->actual_length,
+ OZ_TRACE_DUMP_URB_LEN_MAX);
+ } else {
+ oz_dump_data(&buf[i], NULL, 0, 0);
+ }
+ printk(buf);
+ }
+}
+
+void oz_trace_f_skb(struct sk_buff *skb, char dir)
+{
+ int i = 0;
+ char buf[128*2];
+ int len = skb->len;
+
+ if (dir == 'T')
+ len -= 14;
+
+ i += sprintf(&buf[i], "OZ %c %04X ", dir, len);
+ oz_dump_data(&buf[i], (u8 *)skb_network_header(skb),
+ len, OZ_TRACE_DUMP_SKB_LEN_MAX);
+ printk(buf);
+}
+
+void oz_trace_f_dbg(void)
+{
+}
+
+void trace_dbg_msg(int c, char *fmt, ...)
+{
+ va_list arg;
+
+ va_start(arg, fmt);
+ func[c](fmt, arg);
+ va_end(arg);
+}
+
+void trace_debug_log(char log_type, ...)
+{
+ va_list arg;
+ char *fmt;
+
+ va_start(arg, log_type);
+ fmt = va_arg(arg, char *);
+ switch (log_type) {
+ case 'H':
+ trace_hcd_msg_evt(fmt, arg);
+ break;
+ case 'I':
+ trace_isoc_msg_evt(fmt, arg);
+ break;
+ default:
+ trace_info_msg_evt(fmt, arg);
+ break;
+ }
+ va_end(arg);
+}
diff --git a/drivers/staging/ozwpan/oztrace.h b/drivers/staging/ozwpan/oztrace.h
index 8293b24c5a77..ab1d5cb96c1c 100644
--- a/drivers/staging/ozwpan/oztrace.h
+++ b/drivers/staging/ozwpan/oztrace.h
@@ -5,31 +5,95 @@
*/
#ifndef _OZTRACE_H_
#define _OZTRACE_H_
-#include "ozconfig.h"
-
-#define TRACE_PREFIX KERN_ALERT "OZWPAN: "
-
-#ifdef WANT_TRACE
-#define oz_trace(...) printk(TRACE_PREFIX __VA_ARGS__)
-#ifdef WANT_VERBOSE_TRACE
-extern unsigned long trace_flags;
-#define oz_trace2(_flag, ...) \
- do { if (trace_flags & _flag) printk(TRACE_PREFIX __VA_ARGS__); \
- } while (0)
-#else
-#define oz_trace2(...)
-#endif /* #ifdef WANT_VERBOSE_TRACE */
-#else
-#define oz_trace(...)
-#define oz_trace2(...)
-#endif /* #ifdef WANT_TRACE */
-
-#define OZ_TRACE_STREAM 0x1
-#define OZ_TRACE_URB 0x2
-#define OZ_TRACE_CTRL_DETAIL 0x4
-#define OZ_TRACE_HUB 0x8
-#define OZ_TRACE_RX_FRAMES 0x10
-#define OZ_TRACE_TX_FRAMES 0x20
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include "ozeventtrace.h"
+
+extern struct device *g_oz_wpan_dev;
+
+#define oz_trace(fmt, ...) \
+ do { dev_dbg(g_oz_wpan_dev, fmt, ##__VA_ARGS__); } while (0)
+
+void oz_trace_f_urb_out(struct urb *urb, int status);
+void oz_trace_f_urb_in(struct urb *urb);
+void oz_trace_f_skb(struct sk_buff *skb, char dir);
+void oz_trace_f_dbg(void);
+void trace_dbg_msg(int c, char *fmt, ...);
+void trace_debug_log(char log_type, ...);
+
+extern u32 g_debug;
+
+#define TRC_A 'A'
+#define TRC_B 'B'
+#define TRC_C 'C' /* urb Completion */
+#define TRC_D 'D' /* Debug */
+#define TRC_E 'E' /* urb Error */
+#define TRC_F 'F'
+#define TRC_G 'G'
+#define TRC_H 'H' /* Hcd message */
+#define TRC_I 'I' /* Isoc buffer depth */
+#define TRC_J 'J'
+#define TRC_K 'K'
+#define TRC_L 'L'
+#define TRC_M 'M' /* Message */
+#define TRC_N 'N'
+#define TRC_O 'O'
+#define TRC_P 'P'
+#define TRC_Q 'Q'
+#define TRC_R 'R' /* Rx Ozmo frame */
+#define TRC_S 'S' /* urb Submission */
+#define TRC_T 'T' /* Tx ozmo frame */
+#define TRC_U 'U'
+#define TRC_V 'V'
+#define TRC_W 'W'
+#define TRC_X 'X'
+#define TRC_Y 'Y'
+#define TRC_Z 'Z'
+
+#define TRC_FLG(f) (1<<((TRC_##f)-'A'))
+
+#define oz_trace_urb_out(u, s) \
+ do { if (!g_debug) \
+ trace_urb_out(u, s); \
+ else if ((g_debug & TRC_FLG(C)) ||\
+ ((g_debug & TRC_FLG(E)) && (s != 0))) \
+ oz_trace_f_urb_out(u, s); } while (0)
+
+#define oz_trace_urb_in(u) \
+ do { if (!g_debug) \
+ trace_urb_in(u); \
+ else if (g_debug & TRC_FLG(S)) \
+ oz_trace_f_urb_in(u); } while (0)
+
+#define oz_trace_skb(u, d) \
+ do { if ((!g_debug) && ('T' == d)) \
+ trace_tx_frame(u); \
+ else if ((!g_debug) && ('R' == d)) \
+ trace_rx_frame(u); \
+ else if ((('T' == d) && (g_debug & TRC_FLG(T))) || \
+ (('R' == d) && (g_debug & TRC_FLG(R)))) \
+ oz_trace_f_skb(u, d); } while(0)
+
+#define oz_trace_msg(f, ...) \
+ do { if (!g_debug) \
+ trace_debug_log(TRC_##f, __VA_ARGS__); \
+ else if (g_debug & TRC_FLG(f)) \
+ printk("OZ " #f " " __VA_ARGS__); } while(0)
+
+enum {
+ TRACE_HCD_MSG,
+ TRACE_ISOC_MSG,
+ TRACE_INFO_MSG
+};
+
+#define trace_hcd_msg(fmt, ...)\
+ trace_dbg_msg(TRACE_HCD_MSG, fmt, ##__VA_ARGS__)
+
+#define trace_isoc_msg(fmt, ...)\
+ trace_dbg_msg(TRACE_ISOC_MSG, fmt, ##__VA_ARGS__)
+
+#define trace_info_msg(fmt, ...)\
+ trace_dbg_msg(TRACE_INFO_MSG, fmt, ##__VA_ARGS__)
#endif /* Sentry */
diff --git a/drivers/staging/ozwpan/ozurbparanoia.c b/drivers/staging/ozwpan/ozurbparanoia.c
index 55b9afbbe47b..b05519534444 100644
--- a/drivers/staging/ozwpan/ozurbparanoia.c
+++ b/drivers/staging/ozwpan/ozurbparanoia.c
@@ -4,7 +4,6 @@
* -----------------------------------------------------------------------------
*/
#include <linux/usb.h>
-#include "ozconfig.h"
#ifdef WANT_URB_PARANOIA
#include "ozurbparanoia.h"
#include "oztrace.h"
@@ -22,7 +21,7 @@ void oz_remember_urb(struct urb *urb)
spin_lock_irqsave(&g_urb_mem_lock, irq_state);
if (g_nb_urbs < OZ_MAX_URBS) {
g_urb_memory[g_nb_urbs++] = urb;
- oz_trace("%lu: urb up = %d %p\n", jiffies, g_nb_urbs, urb);
+ oz_trace("urb up = %d %p\n", g_nb_urbs, urb);
} else {
oz_trace("ERROR urb buffer full\n");
}
@@ -42,8 +41,8 @@ int oz_forget_urb(struct urb *urb)
if (--g_nb_urbs > i)
memcpy(&g_urb_memory[i], &g_urb_memory[i+1],
(g_nb_urbs - i) * sizeof(struct urb *));
- oz_trace("%lu: urb down = %d %p\n",
- jiffies, g_nb_urbs, urb);
+ oz_trace("urb down = %d %p\n",
+ g_nb_urbs, urb);
}
}
spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
index 8531438d7586..b1d58e16992a 100644
--- a/drivers/staging/ozwpan/ozusbif.h
+++ b/drivers/staging/ozwpan/ozusbif.h
@@ -13,6 +13,10 @@
void oz_usb_get(void *hpd);
void oz_usb_put(void *hpd);
+/* Reset device.
+*/
+void oz_usb_reset_device(void *hpd);
+
/* Stream functions.
*/
int oz_usb_stream_create(void *hpd, u8 ep_num);
@@ -34,10 +38,16 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
const u8 *data, int data_len);
+void oz_hcd_mark_urb_submitted(void *hport, int ep_ix, u8 req_id);
+
/* Indication functions.
*/
void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len);
int oz_hcd_heartbeat(void *hport);
+/* Get information.
+ */
+u8 oz_get_up_max_buffer_units(void *hpd);
+
#endif /* _OZUSBIF_H */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
index 543a9415975c..c763c089ebc9 100644
--- a/drivers/staging/ozwpan/ozusbsvc.c
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -18,7 +18,6 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
-#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
@@ -27,14 +26,12 @@
#include "ozhcd.h"
#include "oztrace.h"
#include "ozusbsvc.h"
-#include "ozevent.h"
/*------------------------------------------------------------------------------
* This is called once when the driver is loaded to initialise the USB service.
* Context: process
*/
int oz_usb_init(void)
{
- oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_USB, NULL, 0);
return oz_hcd_init();
}
/*------------------------------------------------------------------------------
@@ -43,10 +40,25 @@ int oz_usb_init(void)
*/
void oz_usb_term(void)
{
- oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_USB, NULL, 0);
oz_hcd_term();
}
/*------------------------------------------------------------------------------
+ * This is called when HCD received FEAT_RESET request from hub.
+ * If PD is in sleep, it then removes PD as it is unable to respond any host
+ * action.
+ */
+void oz_usb_reset_device(void *hpd)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ struct oz_pd *pd = usb_ctx->pd;
+ oz_pd_get(pd);
+ if (pd && (!(pd->state & OZ_PD_S_CONNECTED))) {
+ oz_trace_msg(M, "Remove device\n");
+ oz_pd_stop(pd);
+ }
+ oz_pd_put(pd);
+}
+/*------------------------------------------------------------------------------
* This is called when the USB service is started or resumed for a PD.
* Context: softirq
*/
@@ -55,7 +67,6 @@ int oz_usb_start(struct oz_pd *pd, int resume)
int rc = 0;
struct oz_usb_ctx *usb_ctx;
struct oz_usb_ctx *old_ctx;
- oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_USB, NULL, resume);
if (resume) {
oz_trace("USB service resumed.\n");
return 0;
@@ -81,7 +92,6 @@ int oz_usb_start(struct oz_pd *pd, int resume)
oz_usb_get(pd->app_ctx[OZ_APPID_USB-1]);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (old_ctx) {
- oz_trace("Already have USB context.\n");
kfree(usb_ctx);
usb_ctx = old_ctx;
} else if (usb_ctx) {
@@ -99,7 +109,6 @@ int oz_usb_start(struct oz_pd *pd, int resume)
} else {
usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
if (usb_ctx->hport == NULL) {
- oz_trace("USB hub returned null port.\n");
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
@@ -117,7 +126,6 @@ int oz_usb_start(struct oz_pd *pd, int resume)
void oz_usb_stop(struct oz_pd *pd, int pause)
{
struct oz_usb_ctx *usb_ctx;
- oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_USB, NULL, pause);
if (pause) {
oz_trace("USB service paused.\n");
return;
@@ -127,7 +135,8 @@ void oz_usb_stop(struct oz_pd *pd, int pause)
pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (usb_ctx) {
- unsigned long tout = jiffies + HZ;
+ struct timespec ts, now;
+ getnstimeofday(&ts);
oz_trace("USB service stopping...\n");
usb_ctx->stopped = 1;
/* At this point the reference count on the usb context should
@@ -136,10 +145,13 @@ void oz_usb_stop(struct oz_pd *pd, int pause)
* should get in but someone may already be in. So wait
* until they leave but timeout after 1 second.
*/
- while ((atomic_read(&usb_ctx->ref_count) > 2) &&
- time_before(jiffies, tout))
- ;
- oz_trace("USB service stopped.\n");
+ while ((atomic_read(&usb_ctx->ref_count) > 2)) {
+ getnstimeofday(&now);
+ /*Approx 1 Sec. this is not perfect calculation*/
+ if (now.tv_sec != ts.tv_sec)
+ break;
+ }
+ oz_trace_msg(M, "USB service stopped.\n");
oz_hcd_pd_departed(usb_ctx->hport);
/* Release the reference taken in oz_usb_start.
*/
@@ -165,7 +177,6 @@ void oz_usb_put(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
if (atomic_dec_and_test(&usb_ctx->ref_count)) {
- oz_trace("Dealloc USB context.\n");
oz_pd_put(usb_ctx->pd);
kfree(usb_ctx);
}
@@ -200,7 +211,6 @@ int oz_usb_stream_create(void *hpd, u8 ep_num)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
struct oz_pd *pd = usb_ctx->pd;
- oz_trace("oz_usb_stream_create(0x%x)\n", ep_num);
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
oz_isoc_stream_create(pd, ep_num);
} else {
@@ -222,7 +232,6 @@ int oz_usb_stream_delete(void *hpd, u8 ep_num)
if (usb_ctx) {
struct oz_pd *pd = usb_ctx->pd;
if (pd) {
- oz_trace("oz_usb_stream_delete(0x%x)\n", ep_num);
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
oz_isoc_stream_delete(pd, ep_num);
} else {
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index 4e4b650fee3f..609d7119051a 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
-#include "ozconfig.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
@@ -22,10 +21,24 @@
#include "ozhcd.h"
#include "oztrace.h"
#include "ozusbsvc.h"
-#include "ozevent.h"
/*------------------------------------------------------------------------------
*/
#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
+
+/*------------------------------------------------------------------------------
+ * Context: softirq
+ */
+static void oz_usb_setup_elt_completion_callback(struct oz_pd *pd, long context)
+{
+ struct oz_usb_ctx *ctx;
+ spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+ ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
+ if (ctx) {
+ u8 req_id = (u8)context;
+ oz_hcd_mark_urb_submitted(ctx->hport, 0, req_id);
+ }
+ spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
+}
/*------------------------------------------------------------------------------
* Context: softirq
*/
@@ -63,16 +76,12 @@ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
struct oz_get_desc_req *body;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
- oz_trace(" req_type = 0x%x\n", req_type);
- oz_trace(" desc_type = 0x%x\n", desc_type);
- oz_trace(" index = 0x%x\n", index);
- oz_trace(" windex = 0x%x\n", windex);
- oz_trace(" offset = 0x%x\n", offset);
- oz_trace(" len = 0x%x\n", len);
if (len > 200)
len = 200;
- if (ei == NULL)
+ if (ei == 0)
return -1;
+ ei->callback = oz_usb_setup_elt_completion_callback;
+ ei->context = req_id;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_get_desc_req);
body = (struct oz_get_desc_req *)(elt+1);
@@ -97,8 +106,10 @@ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_config_req *body;
- if (ei == NULL)
+ if (ei == 0)
return -1;
+ ei->callback = oz_usb_setup_elt_completion_callback;
+ ei->context = req_id;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_set_config_req);
body = (struct oz_set_config_req *)(elt+1);
@@ -118,8 +129,10 @@ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_interface_req *body;
- if (ei == NULL)
+ if (ei == 0)
return -1;
+ ei->callback = oz_usb_setup_elt_completion_callback;
+ ei->context = req_id;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_set_interface_req);
body = (struct oz_set_interface_req *)(elt+1);
@@ -141,9 +154,11 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_feature_req *body;
- if (ei == NULL)
+ if (ei == 0)
return -1;
elt = (struct oz_elt *)ei->data;
+ ei->callback = oz_usb_setup_elt_completion_callback;
+ ei->context = req_id;
elt->length = sizeof(struct oz_feature_req);
body = (struct oz_feature_req *)(elt+1);
body->type = type;
@@ -165,8 +180,10 @@ static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_vendor_class_req *body;
- if (ei == NULL)
+ if (ei == 0)
return -1;
+ ei->callback = oz_usb_setup_elt_completion_callback;
+ ei->context = req_id;
elt = (struct oz_elt *)ei->data;
elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
body = (struct oz_vendor_class_req *)(elt+1);
@@ -190,10 +207,7 @@ int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
unsigned windex = le16_to_cpu(setup->wIndex);
unsigned wlength = le16_to_cpu(setup->wLength);
int rc = 0;
- oz_event_log(OZ_EVT_CTRL_REQ, setup->bRequest, req_id,
- (void *)(((unsigned long)(setup->wValue))<<16 |
- ((unsigned long)setup->wIndex)),
- setup->bRequestType);
+
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
@@ -245,10 +259,14 @@ int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
struct usb_iso_packet_descriptor *desc;
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
+ urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++) {
u8 *data;
desc = &urb->iso_frame_desc[i];
data = ((u8 *)urb->transfer_buffer)+desc->offset;
+ desc->status = 0;
+ desc->actual_length = desc->length;
+ urb->actual_length += desc->length;
oz_send_isoc_unit(pd, ep_num, data, desc->length);
}
return 0;
@@ -264,7 +282,7 @@ int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
int unit_count;
int unit_size;
int rem;
- if (ei == NULL)
+ if (ei == 0)
return -1;
rem = MAX_ISOC_FIXED_DATA;
elt = (struct oz_elt *)ei->data;
@@ -305,7 +323,7 @@ int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
/*------------------------------------------------------------------------------
* Context: softirq-serialized
*/
-static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
+void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_usb_hdr *usb_hdr, int len)
{
struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
@@ -359,7 +377,7 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
- if (usb_ctx == NULL)
+ if (usb_ctx == 0)
return; /* Context has gone so nothing to do. */
if (usb_ctx->stopped)
goto done;
@@ -369,7 +387,9 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
if (usb_hdr->elt_seq_num != 0) {
if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
/* Reject duplicate element. */
- goto done;
+ oz_trace_msg(M, "USB seq overlap %02X %02X\n",
+ usb_ctx->rx_seq_num,
+ usb_hdr->elt_seq_num);
}
usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
switch (usb_hdr->type) {
@@ -381,7 +401,6 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
u16 offs = le16_to_cpu(get_unaligned(&body->offset));
u16 total_size =
le16_to_cpu(get_unaligned(&body->total_size));
- oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data,
data_len, offs, total_size);
@@ -391,14 +410,14 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
struct oz_set_config_rsp *body =
(struct oz_set_config_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
- body->rcode, NULL, 0);
+ body->rcode, 0, 0);
}
break;
case OZ_SET_INTERFACE_RSP: {
struct oz_set_interface_rsp *body =
(struct oz_set_interface_rsp *)usb_hdr;
oz_hcd_control_cnf(usb_ctx->hport,
- body->req_id, body->rcode, NULL, 0);
+ body->req_id, body->rcode, 0, 0);
}
break;
case OZ_VENDOR_CLASS_RSP: {
@@ -427,7 +446,7 @@ void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
if (usb_ctx)
oz_usb_get(usb_ctx);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
- if (usb_ctx == NULL)
+ if (usb_ctx == 0)
return; /* Context has gone so nothing to do. */
if (!usb_ctx->stopped) {
oz_trace("Farewell indicated ep = 0x%x\n", ep_num);
@@ -435,3 +454,11 @@ void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
}
oz_usb_put(usb_ctx);
}
+/*------------------------------------------------------------------------------
+ * Context: softirq or process
+ */
+u8 oz_get_up_max_buffer_units(void *hpd)
+{
+ struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+ return usb_ctx->pd->up_audio_buf;
+}
diff --git a/drivers/staging/pasr/Kconfig b/drivers/staging/pasr/Kconfig
new file mode 100644
index 000000000000..6bd24217edc6
--- /dev/null
+++ b/drivers/staging/pasr/Kconfig
@@ -0,0 +1,14 @@
+config ARCH_HAS_PASR
+ bool
+
+config PASR
+ bool "DDR Partial Array Self-Refresh"
+ depends on ARCH_HAS_PASR
+ ---help---
+ PASR consists on masking the refresh of unused segments or banks
+ when DDR is in self-refresh state.
+
+config PASR_DEBUG
+ bool "Add PASR debug prints"
+ def_bool n
+ depends on PASR
diff --git a/drivers/staging/pasr/Makefile b/drivers/staging/pasr/Makefile
new file mode 100644
index 000000000000..d17229474e6d
--- /dev/null
+++ b/drivers/staging/pasr/Makefile
@@ -0,0 +1,5 @@
+pasr-objs := helper.o init.o core.o
+
+obj-$(CONFIG_PASR) += pasr.o
+
+ccflags-$(CONFIG_PASR_DEBUG) := -DDEBUG
diff --git a/drivers/staging/pasr/core.c b/drivers/staging/pasr/core.c
new file mode 100644
index 000000000000..f0b3479a40b7
--- /dev/null
+++ b/drivers/staging/pasr/core.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Maxime Coquelin <maxime.coquelin@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/pasr.h>
+
+#include "helper.h"
+
+enum pasr_state {
+ PASR_REFRESH,
+ PASR_NO_REFRESH,
+};
+
+struct pasr_fw {
+ struct pasr_map *map;
+};
+
+static struct pasr_fw pasr;
+
+void pasr_update_mask(struct pasr_section *section, enum pasr_state state)
+{
+ struct pasr_die *die = section->die;
+ phys_addr_t addr = section->start - die->start;
+ u8 bit = addr >> section_bit;
+
+ if (state == PASR_REFRESH)
+ die->mem_reg &= ~(1 << bit);
+ else
+ die->mem_reg |= (1 << bit);
+
+ pr_debug("%s(): %s refresh section 0x%08x. segment %#9llx Die%d mem_reg = 0x%02x\n"
+ , __func__, state == PASR_REFRESH ? "Start" : "Stop"
+ , bit, section->start, die->idx, die->mem_reg);
+
+ if (die->apply_mask)
+ die->apply_mask(&die->mem_reg, die->cookie);
+
+ return;
+}
+
+void pasr_put(phys_addr_t paddr, u64 size)
+{
+ struct pasr_section *s;
+ u64 cur_sz;
+ unsigned long flags = 0;
+
+ if (!pasr.map) {
+ WARN_ONCE(1, KERN_INFO"%s(): Map not initialized.\n"
+ "\tCommand line parameters missing or incorrect\n"
+ , __func__);
+ goto out;
+ }
+
+ do {
+ s = pasr_addr2section(pasr.map, paddr);
+ if (!s) {
+ pasr.map = NULL;
+ WARN_ONCE(1, KERN_INFO"%s(): Segment missing,\
+ PASR disabled\n", __func__);
+ goto out;
+ }
+
+ cur_sz = ((paddr + size - 1) < (s->start + section_size - 1)) ?
+ size : s->start + section_size - paddr;
+
+ if (s->lock)
+ spin_lock_irqsave(s->lock, flags);
+
+ s->free_size += cur_sz;
+
+ if (s->free_size < section_size)
+ goto unlock;
+
+ BUG_ON(s->free_size > section_size);
+
+ if (!s->pair)
+ pasr_update_mask(s, PASR_NO_REFRESH);
+ else if (s->pair->free_size == section_size) {
+ pasr_update_mask(s, PASR_NO_REFRESH);
+ pasr_update_mask(s->pair, PASR_NO_REFRESH);
+ }
+
+unlock:
+ if (s->lock)
+ spin_unlock_irqrestore(s->lock, flags);
+
+ paddr += cur_sz;
+ size -= cur_sz;
+ } while (size);
+
+out:
+ return;
+}
+
+void pasr_get(phys_addr_t paddr, u64 size)
+{
+ unsigned long flags = 0;
+ u64 cur_sz;
+ struct pasr_section *s;
+
+ if (!pasr.map) {
+ WARN_ONCE(1, KERN_INFO"%s(): Map not initialized.\n"
+ "\tCommand line parameters missing or incorrect\n"
+ , __func__);
+ return;
+ }
+
+ do {
+ s = pasr_addr2section(pasr.map, paddr);
+ if (!s) {
+ pasr.map = NULL;
+ WARN_ONCE(1, KERN_INFO"%s(): Segment missing,\
+ PASR disabled\n", __func__);
+ goto out;
+ }
+
+ cur_sz = ((paddr + size - 1) < (s->start + section_size - 1)) ?
+ size : s->start + section_size - paddr;
+
+ if (s->lock)
+ spin_lock_irqsave(s->lock, flags);
+
+ if (s->free_size < section_size)
+ goto unlock;
+
+ if (!s->pair) {
+ pasr_update_mask(s, PASR_REFRESH);
+ } else {
+ pasr_update_mask(s, PASR_REFRESH);
+ pasr_update_mask(s->pair, PASR_REFRESH);
+ }
+unlock:
+
+ BUG_ON(cur_sz > s->free_size);
+ s->free_size -= cur_sz;
+
+ if (s->lock)
+ spin_unlock_irqrestore(s->lock, flags);
+
+ paddr += cur_sz;
+ size -= cur_sz;
+ } while (size);
+
+out:
+ return;
+}
+
+int pasr_register_mask_function(phys_addr_t addr, void *function, void *cookie)
+{
+ struct pasr_die *die = pasr_addr2die(pasr.map, addr);
+
+ if (!die) {
+ pr_err("%s: No DDR die corresponding to address 0x%09llx\n",
+ __func__, (u64)addr);
+ return -EINVAL;
+ }
+
+ if (addr != die->start)
+ pr_warn("%s: Addresses mismatch (Die = 0x%09llx, addr = 0x%09llx\n"
+ , __func__, (u64)die->start, (u64)addr);
+
+ die->cookie = cookie;
+ die->apply_mask = function;
+
+ if (die->apply_mask)
+ die->apply_mask(&die->mem_reg, die->cookie);
+
+ return 0;
+}
+
+int __init pasr_init_core(struct pasr_map *map)
+{
+ pasr.map = map;
+ return 0;
+}
+
diff --git a/drivers/staging/pasr/helper.c b/drivers/staging/pasr/helper.c
new file mode 100644
index 000000000000..6c270af93af4
--- /dev/null
+++ b/drivers/staging/pasr/helper.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Maxime Coquelin <maxime.coquelin@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/pasr.h>
+
+
+struct pasr_die *pasr_addr2die(struct pasr_map *map, phys_addr_t addr)
+{
+ unsigned int left, right, mid;
+
+ if (!map)
+ return NULL;
+
+ left = 0;
+ right = map->nr_dies;
+
+ while (left != right) {
+ struct pasr_die *d;
+
+ mid = (left + right) >> 1;
+
+ d = &map->die[mid];
+
+ if ((addr >= d->start) && (addr < d->end))
+ return d;
+
+ if (left == mid || right == mid)
+ break;
+
+ if (addr > d->end)
+ left = mid;
+ else
+ right = mid;
+ }
+
+ pr_debug("%s: No die found for address %#9llx",
+ __func__, (u64)addr);
+ return NULL;
+}
+
+struct pasr_section *pasr_addr2section(struct pasr_map *map
+ , phys_addr_t addr)
+{
+ unsigned int left, right, mid;
+ struct pasr_die *die;
+
+ /* Find the die the address it is located in */
+ die = pasr_addr2die(map, addr);
+ if (!die)
+ goto err;
+
+ left = 0;
+ right = die->nr_sections;
+
+ addr &= ~(section_size - 1);
+
+ while (left != right) {
+ struct pasr_section *s;
+
+ mid = (left + right) >> 1;
+ s = &die->section[mid];
+
+ if (addr == s->start)
+ return s;
+
+ if (left == mid || right == mid)
+ break;
+
+ if (addr > s->start)
+ left = mid;
+ else
+ right = mid;
+ }
+
+err:
+ /* Provided address isn't in any declared section */
+ pr_debug("%s: No section found for address %#9llx",
+ __func__, (u64)addr);
+
+ return NULL;
+}
+
+phys_addr_t pasr_section2addr(struct pasr_section *s)
+{
+ return s->start;
+}
diff --git a/drivers/staging/pasr/helper.h b/drivers/staging/pasr/helper.h
new file mode 100644
index 000000000000..6488f2f8833a
--- /dev/null
+++ b/drivers/staging/pasr/helper.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Maxime Coquelin <maxime.coquelin@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _PASR_HELPER_H
+#define _PASR_HELPER_H
+
+#include <linux/pasr.h>
+
+struct pasr_die *pasr_addr2die(struct pasr_map *map, phys_addr_t addr);
+struct pasr_section *pasr_addr2section(struct pasr_map *map, phys_addr_t addr);
+phys_addr_t pasr_section2addr(struct pasr_section *s);
+
+#endif /* _PASR_HELPER_H */
diff --git a/drivers/staging/pasr/init.c b/drivers/staging/pasr/init.c
new file mode 100644
index 000000000000..c93bf1707431
--- /dev/null
+++ b/drivers/staging/pasr/init.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Maxime Coquelin <maxime.coquelin@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/sort.h>
+#include <linux/pasr.h>
+#include <linux/debugfs.h>
+
+#include "helper.h"
+
+#define NR_DIES 8
+#define NR_INT 8
+
+struct ddr_die {
+ u64 size;
+ phys_addr_t addr;
+};
+
+struct interleaved_area {
+ phys_addr_t addr1;
+ phys_addr_t addr2;
+ u64 size;
+};
+
+struct pasr_info {
+ int nr_dies;
+ struct ddr_die die[NR_DIES];
+
+ int nr_int;
+ struct interleaved_area int_area[NR_INT];
+};
+
+static struct pasr_info __initdata pasr_info;
+static struct pasr_map pasr_map;
+u64 section_size;
+unsigned int section_bit;
+
+static void add_ddr_die(phys_addr_t addr, u64 size);
+static void add_interleaved_area(phys_addr_t a1,
+ phys_addr_t a2, u64 size);
+
+static int __init section_param(char *p)
+{
+ section_size = memparse(p, &p);
+ section_bit = ffs(section_size) - 1;
+
+ return 0;
+}
+early_param("section", section_param);
+
+static int __init ddr_die_param(char *p)
+{
+ phys_addr_t start;
+ u64 size;
+
+ size = memparse(p, &p);
+
+ if (*p != '@')
+ goto err;
+
+ start = memparse(p + 1, &p);
+
+ add_ddr_die(start, size);
+
+ return 0;
+err:
+ return -EINVAL;
+}
+early_param("ddr_die", ddr_die_param);
+
+static int __init interleaved_param(char *p)
+{
+ phys_addr_t start1, start2;
+ u64 size;
+
+ size = memparse(p, &p);
+
+ if (*p != '@')
+ goto err;
+
+ start1 = memparse(p + 1, &p);
+
+ if (*p != ':')
+ goto err;
+
+ start2 = memparse(p + 1, &p);
+
+ add_interleaved_area(start1, start2, size);
+
+ return 0;
+err:
+ return -EINVAL;
+}
+early_param("interleaved", interleaved_param);
+
+void __init add_ddr_die(phys_addr_t addr, u64 size)
+{
+ BUG_ON(pasr_info.nr_dies >= NR_DIES);
+
+ pasr_info.die[pasr_info.nr_dies].addr = addr;
+ pasr_info.die[pasr_info.nr_dies++].size = size;
+}
+
+void __init add_interleaved_area(phys_addr_t a1, phys_addr_t a2,
+ u64 size)
+{
+ BUG_ON(pasr_info.nr_int >= NR_INT);
+
+ pasr_info.int_area[pasr_info.nr_int].addr1 = a1;
+ pasr_info.int_area[pasr_info.nr_int].addr2 = a2;
+ pasr_info.int_area[pasr_info.nr_int++].size = size;
+}
+
+#ifdef DEBUG
+static void __init pasr_print_info(struct pasr_info *info)
+{
+ int i;
+
+ pr_info("PASR information coherent\n");
+
+
+ pr_info("DDR Dies layout:\n");
+ pr_info("\tid - start address - end address\n");
+ for (i = 0; i < info->nr_dies; i++)
+ pr_info("\t- %d : %#09llx - %#09llx\n",
+ i, (u64)info->die[i].addr,
+ (u64)(info->die[i].addr
+ + info->die[i].size - 1));
+
+ if (info->nr_int == 0) {
+ pr_info("No interleaved areas declared\n");
+ return;
+ }
+
+ pr_info("Interleaving layout:\n");
+ pr_info("\tid - start @1 - end @2 : start @2 - end @2\n");
+ for (i = 0; i < info->nr_int; i++)
+ pr_info("\t-%d - %#09llx - %#09llx : %#09llx - %#09llx\n"
+ , i
+ , (u64)info->int_area[i].addr1
+ , (u64)(info->int_area[i].addr1
+ + info->int_area[i].size - 1)
+ , (u64)info->int_area[i].addr2
+ , (u64)(info->int_area[i].addr2
+ + info->int_area[i].size - 1));
+}
+#else
+#define pasr_print_info(info) do {} while (0)
+#endif /* DEBUG */
+
+static int __init is_in_physmem(phys_addr_t addr, struct ddr_die *d)
+{
+ return ((addr >= d->addr) && (addr <= d->addr + d->size - 1));
+}
+
+static int __init pasr_check_interleave_in_physmem(struct pasr_info *info,
+ struct interleaved_area *i)
+{
+ struct ddr_die *d;
+ int j;
+ int err = 4;
+
+ for (j = 0; j < info->nr_dies; j++) {
+ d = &info->die[j];
+
+ if (is_in_physmem(i->addr1, d))
+ err--;
+ if (is_in_physmem(i->addr1 + i->size - 1, d))
+ err--;
+ if (is_in_physmem(i->addr2, d))
+ err--;
+ if (is_in_physmem(i->addr2 + i->size - 1, d))
+ err--;
+ }
+
+ return err;
+}
+
+static int __init ddrdie_cmp(const void *_a, const void *_b)
+{
+ const struct ddr_die *a = _a, *b = _b;
+
+ return a->addr < b->addr ? -1 : a->addr > b->addr ? 1 : 0;
+}
+
+static int __init interleaved_cmp(const void *_a, const void *_b)
+{
+ const struct interleaved_area *a = _a, *b = _b;
+
+ return a->addr1 < b->addr1 ? -1 : a->addr1 > b->addr1 ? 1 : 0;
+}
+
+static int __init pasr_info_sanity_check(struct pasr_info *info)
+{
+ int i;
+
+ /* Check at least one physical chunk is defined */
+ if (info->nr_dies == 0) {
+ pr_err("%s: No DDR dies declared in command line\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Sort DDR dies areas */
+ sort(&info->die, info->nr_dies,
+ sizeof(info->die[0]), ddrdie_cmp, NULL);
+
+ /* Physical layout checking */
+ for (i = 0; i < info->nr_dies; i++) {
+ struct ddr_die *d1, *d2;
+
+ d1 = &info->die[i];
+
+ if (d1->size == 0) {
+ pr_err("%s: DDR die at %#x has 0 size\n",
+ __func__, d1->addr);
+ return -EINVAL;
+ }
+
+ /* Check die is aligned on section boundaries */
+ if (((d1->addr & ~(section_size - 1)) != d1->addr)
+ || (((d1->size & ~(section_size - 1))) != d1->size)) {
+ pr_err("%s: DDR die at %#x (size %#llx) \
+ is not aligned on section boundaries %#llx\n",
+ __func__, d1->addr, d1->size, section_size);
+ return -EINVAL;
+ }
+
+ if (i == 0)
+ continue;
+
+ /* Check areas are not overlapping */
+ d2 = d1;
+ d1 = &info->die[i-1];
+ if ((d1->addr + d1->size - 1) >= d2->addr) {
+ pr_err("%s: DDR dies at %#x and %#x are overlapping\n",
+ __func__, d1->addr, d2->addr);
+ return -EINVAL;
+ }
+ }
+
+ /* Interleave layout checking */
+ if (info->nr_int == 0)
+ goto out;
+
+ /* Sort interleaved areas */
+ sort(&info->int_area, info->nr_int,
+ sizeof(info->int_area[0]), interleaved_cmp, NULL);
+
+ for (i = 0; i < info->nr_int; i++) {
+ struct interleaved_area *i1;
+
+ i1 = &info->int_area[i];
+ if (i1->size == 0) {
+ pr_err("%s: Interleaved area %#x/%#x has 0 size\n",
+ __func__, i1->addr1, i1->addr2);
+ return -EINVAL;
+ }
+
+ /* Check area is aligned on section boundaries */
+ if (((i1->addr1 & ~(section_size - 1)) != i1->addr1)
+ || ((i1->addr2 & ~(section_size - 1)) != i1->addr2)
+ || ((i1->size & ~(section_size - 1)) != i1->size)) {
+ pr_err("%s: Interleaved area at %#x/%#x (size %#lx) \
+ is not aligned on section boundaries %#lx\n",
+ __func__, i1->addr1, i1->addr2, i1->size,
+ section_size);
+ return -EINVAL;
+ }
+
+ /* Check interleaved areas are not overlapping */
+ if ((i1->addr1 + i1->size - 1) >= i1->addr2) {
+ pr_err("%s: Interleaved areas %#x and \
+ %#x are overlapping\n",
+ __func__, i1->addr1, i1->addr2);
+ return -EINVAL;
+ }
+
+ /* Check the interleaved areas are in the physical areas */
+ if (pasr_check_interleave_in_physmem(info, i1)) {
+ pr_err("%s: Interleaved area %#x/%#x \
+ not in physical memory\n",
+ __func__, i1->addr1, i1->addr2);
+ return -EINVAL;
+ }
+ }
+
+out:
+ return 0;
+}
+
+#ifdef DEBUG
+static void __init pasr_print_map(struct pasr_map *map)
+{
+ int i, j;
+
+ if (!map)
+ goto out;
+
+ pr_info("PASR map:\n");
+
+ for (i = 0; i < map->nr_dies; i++) {
+ struct pasr_die *die = &map->die[i];
+
+ pr_info("Die %d:\n", i);
+ for (j = 0; j < die->nr_sections; j++) {
+ struct pasr_section *s = &die->section[j];
+ pr_info("\tSection %d: @ = %#09llx, Pair = %s @%#09llx\n"
+ , j, s->start, s->pair ? "Yes" : "No",
+ s->pair ? s->pair->start : 0);
+ }
+ }
+out:
+ return;
+}
+#else
+#define pasr_print_map(map) do {} while (0)
+#endif /* DEBUG */
+
+static int __init pasr_build_map(struct pasr_info *info, struct pasr_map *map)
+{
+ int i, j;
+ struct pasr_die *die;
+
+ map->nr_dies = info->nr_dies;
+ die = map->die;
+
+ for (i = 0; i < info->nr_dies; i++) {
+ phys_addr_t addr = info->die[i].addr;
+ struct pasr_section *section = die[i].section;
+
+ die[i].start = addr;
+ die[i].idx = i;
+ die[i].nr_sections = info->die[i].size >> section_bit;
+
+ for (j = 0; j < die[i].nr_sections; j++) {
+ section[j].start = addr;
+ addr += section_size;
+ section[j].die = &die[i];
+ }
+
+ die[i].end = addr;
+ }
+
+ for (i = 0; i < info->nr_int; i++) {
+ struct interleaved_area *ia = &info->int_area[i];
+ struct pasr_section *s1, *s2;
+ unsigned long offset = 0;
+
+ for (j = 0; j < (ia->size >> section_bit); j++) {
+ s1 = pasr_addr2section(map, ia->addr1 + offset);
+ s2 = pasr_addr2section(map, ia->addr2 + offset);
+ if (!s1 || !s2)
+ return -EINVAL;
+
+ offset += section_size;
+
+ s1->pair = s2;
+ s2->pair = s1;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *rootdir;
+
+static int pasr_print_meminfo(struct seq_file *s, void *data)
+{
+ struct pasr_map *map = &pasr_map;
+ unsigned int i, j;
+
+ if (!map)
+ return 0;
+
+ for (i = 0; i < map->nr_dies; i++) {
+ struct pasr_die *die = &map->die[i];
+ seq_printf(s, "die %d\n", i);
+ for (j = 0; j < die->nr_sections; j++) {
+ struct pasr_section *section = &die->section[j];
+ u64 percentage;
+
+ percentage = (u64)section->free_size * 100;
+ do_div(percentage, section_size);
+ seq_printf(s, "section %d %llu %llu\n", j, section->free_size,
+ percentage);
+ }
+ }
+ return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pasr_print_meminfo, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+ .open = meminfo_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init pasr_init_debug(void)
+{
+ struct dentry *d;
+
+ rootdir = debugfs_create_dir("pasr", NULL);
+ if (!rootdir)
+ return -ENOMEM;
+
+ d = debugfs_create_file("meminfo", S_IRUGO, rootdir, (void *)&pasr_map,
+ &meminfo_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+late_initcall(pasr_init_debug);
+#endif
+
+int __init early_pasr_setup(void)
+{
+ int ret;
+
+ ret = pasr_info_sanity_check(&pasr_info);
+ if (ret) {
+ pr_err("PASR info sanity check failed (err %d)\n", ret);
+ return ret;
+ }
+
+ pasr_print_info(&pasr_info);
+
+ ret = pasr_build_map(&pasr_info, &pasr_map);
+ if (ret) {
+ pr_err("PASR build map failed (err %d)\n", ret);
+ return ret;
+ }
+
+ pasr_print_map(&pasr_map);
+
+ ret = pasr_init_core(&pasr_map);
+
+ pr_debug("PASR: First stage init done.\n");
+
+ return ret;
+}
+
+/*
+ * late_pasr_setup() has to be called after Linux allocator is
+ * initialized but before other CPUs are launched.
+ */
+int __init late_pasr_setup(void)
+{
+ int i, j;
+ struct pasr_section *s;
+
+ for_each_pasr_section(i, j, pasr_map, s) {
+ if (!s->lock) {
+ s->lock = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
+ BUG_ON(!s->lock);
+ spin_lock_init(s->lock);
+ if (s->pair)
+ s->pair->lock = s->lock;
+ }
+ }
+
+ pr_debug("PASR Second stage init done.\n");
+
+ return 0;
+}
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index 983314c41349..bd6d4178e690 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -3,7 +3,7 @@ config ZRAM
depends on BLOCK && SYSFS && ZSMALLOC
select LZO_COMPRESS
select LZO_DECOMPRESS
- default n
+ default y
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
Pages written to these disks are compressed and stored in memory
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index a333d44d0cff..cab18d224677 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -278,6 +278,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
+ /*
+ * zram_slot_free_notify could miss free so that let's
+ * double check.
+ */
+ if (unlikely(meta->table[index].handle ||
+ zram_test_flag(meta, index, ZRAM_ZERO)))
+ zram_free_page(zram, index);
+
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
@@ -335,6 +343,20 @@ out:
return ret;
}
+static void handle_pending_slot_free(struct zram *zram)
+{
+ struct zram_slot_free *free_rq;
+
+ spin_lock(&zram->slot_free_lock);
+ while (zram->slot_free_rq) {
+ free_rq = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq->next;
+ zram_free_page(zram, free_rq->index);
+ kfree(free_rq);
+ }
+ spin_unlock(&zram->slot_free_lock);
+}
+
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw)
{
@@ -342,10 +364,12 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
if (rw == READ) {
down_read(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
up_read(&zram->lock);
} else {
down_write(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_write(zram, bvec, index, offset);
up_write(&zram->lock);
}
@@ -581,16 +605,40 @@ void zram_init_device(struct zram *zram, struct zram_meta *meta)
pr_debug("Initialization done!\n");
}
+static void zram_slot_free(struct work_struct *work)
+{
+ struct zram *zram;
+
+ zram = container_of(work, struct zram, free_work);
+ down_write(&zram->lock);
+ handle_pending_slot_free(zram);
+ up_write(&zram->lock);
+}
+
+static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
+{
+ spin_lock(&zram->slot_free_lock);
+ free_rq->next = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq;
+ spin_unlock(&zram->slot_free_lock);
+}
+
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
+ struct zram_slot_free *free_rq;
zram = bdev->bd_disk->private_data;
- down_write(&zram->lock);
- zram_free_page(zram, index);
- up_write(&zram->lock);
zram_stat64_inc(zram, &zram->stats.notify_free);
+
+ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
+ if (!free_rq)
+ return;
+
+ free_rq->index = index;
+ add_slot_free(zram, free_rq);
+ schedule_work(&zram->free_work);
}
static const struct block_device_operations zram_devops = {
@@ -606,6 +654,10 @@ static int create_device(struct zram *zram, int device_id)
init_rwsem(&zram->init_lock);
spin_lock_init(&zram->stat64_lock);
+ INIT_WORK(&zram->free_work, zram_slot_free);
+ spin_lock_init(&zram->slot_free_lock);
+ zram->slot_free_rq = NULL;
+
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index d542eee81357..ece3524ea403 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -90,12 +90,21 @@ struct zram_meta {
struct zs_pool *mem_pool;
};
+struct zram_slot_free {
+ unsigned long index;
+ struct zram_slot_free *next;
+};
+
struct zram {
struct zram_meta *meta;
spinlock_t stat64_lock; /* protect 64-bit stats */
struct rw_semaphore lock; /* protect compression buffers, table,
* 32bit stat counters against concurrent
* notifications, reads and writes */
+
+ struct work_struct free_work; /* handle pending free request */
+ struct zram_slot_free *slot_free_rq; /* list head of free request */
+
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -106,6 +115,7 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
+ spinlock_t slot_free_lock;
struct zram_stats stats;
};
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
index 7fab032298f3..2fbb21284dda 100644
--- a/drivers/staging/zsmalloc/Kconfig
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -1,6 +1,6 @@
config ZSMALLOC
bool "Memory allocator for compressed pages"
- default n
+ default y
help
zsmalloc is a slab-based memory allocator designed to store
compressed RAM pages. zsmalloc uses virtual memory mapping