summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Dragan <kdragan@nvidia.com>2014-03-03 05:53:54 -0800
committerBo Yan <byan@nvidia.com>2014-03-07 08:57:24 -0800
commitda14a4855552c6294e2fda19a818f4c8c590ea57 (patch)
tree4688b5b30c6124296f690380d8090e859cc6201a
parent801ecc49f69f8855d84ce4cd07ed30a6cc172f4c (diff)
driver: misc: Add NVIDIA MODS driver 3.42
Change-Id: I814c889ffd61f8f751eb26364e46f071885d0a48 Signed-off-by: Chris Dragan <kdragan@nvidia.com> Reviewed-on: http://git-master/r/376625 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Lael Jones <lajones@nvidia.com> Reviewed-by: Vivek Aseeja <vaseeja@nvidia.com> Reviewed-by: Bo Yan <byan@nvidia.com>
-rw-r--r--drivers/misc/Kconfig13
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/mods/Makefile7
-rw-r--r--drivers/misc/mods/mods.h503
-rw-r--r--drivers/misc/mods/mods_acpi.c417
-rw-r--r--drivers/misc/mods/mods_clock.c437
-rw-r--r--drivers/misc/mods/mods_config.h34
-rw-r--r--drivers/misc/mods/mods_internal.h446
-rw-r--r--drivers/misc/mods/mods_irq.c1026
-rw-r--r--drivers/misc/mods/mods_krnl.c993
-rw-r--r--drivers/misc/mods/mods_mem.c1141
-rw-r--r--drivers/misc/mods/mods_pci.c265
12 files changed, 5283 insertions, 0 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 38d0f4a89bb1..00ca27fe61b2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -633,6 +633,19 @@ config SIM_PALMAS
If yes, the Palmas SIM driver will be enabled. It supports SIM card
detection and SIM configuration for Palmas PMICs.
+config MODS
+ tristate "NVIDIA MODS driver"
+ default n
+ ---help---
+ The NVIDIA MODS driver gives user space software direct access
+ to hardware. The NVIDIA Diagnostic Software uses this to test hardware.
+ This driver should normally be disabled in all production builds.
+
+ Say Y here to compile the NVIDIA MODS driver into the kernel or say M
+ to compile it as a module.
+
+ If unsure, say N here.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index fd744b89a148..1cc429e99c5a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -83,3 +83,4 @@ obj-$(CONFIG_MTK_GPS) += gps/
obj-y += tegra-fuse/
obj-$(CONFIG_DENVER_CPU) += force_idle_t132.o
obj-$(CONFIG_ARCH_TEGRA) +=tegra_timerinfo.o
+obj-$(CONFIG_MODS) += mods/
diff --git a/drivers/misc/mods/Makefile b/drivers/misc/mods/Makefile
new file mode 100644
index 000000000000..986d2d142f82
--- /dev/null
+++ b/drivers/misc/mods/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_MODS) := mods.o
+mods-y := mods_krnl.o
+mods-y += mods_mem.o
+mods-y += mods_irq.o
+mods-$(CONFIG_PCI) += mods_pci.o
+mods-$(CONFIG_ACPI) += mods_acpi.o
+mods-$(CONFIG_ARCH_TEGRA) += mods_clock.o
diff --git a/drivers/misc/mods/mods.h b/drivers/misc/mods/mods.h
new file mode 100644
index 000000000000..57980ec988d8
--- /dev/null
+++ b/drivers/misc/mods/mods.h
@@ -0,0 +1,503 @@
+/*
+ * mods.h - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2008-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MODS_H_
+#define _MODS_H_
+
+/* Driver version */
+#define MODS_DRIVER_VERSION_MAJOR 3
+#define MODS_DRIVER_VERSION_MINOR 42
+#define MODS_DRIVER_VERSION ((MODS_DRIVER_VERSION_MAJOR << 8) | \
+ ((MODS_DRIVER_VERSION_MINOR/10) << 4) | \
+ (MODS_DRIVER_VERSION_MINOR%10))
+
+#pragma pack(1)
+
+/* ************************************************************************* */
+/* ** ESCAPE INTERFACE STRUCTURE */
+/* ************************************************************************* */
+
+struct mods_pci_dev {
+ NvU16 bus;
+ NvU8 device;
+ NvU8 function;
+};
+
+/* MODS_ESC_ALLOC_PAGES */
+struct MODS_ALLOC_PAGES {
+ /* IN */
+ NvU32 num_bytes;
+ NvU32 contiguous;
+ NvU32 address_bits;
+ NvU32 attrib;
+
+ /* OUT */
+ NvU64 memory_handle;
+};
+
+/* MODS_ESC_DEVICE_ALLOC_PAGES */
+struct MODS_DEVICE_ALLOC_PAGES {
+ /* IN */
+ NvU32 num_bytes;
+ NvU32 contiguous;
+ NvU32 address_bits;
+ NvU32 attrib;
+ struct mods_pci_dev pci_device;
+
+ /* OUT */
+ NvU64 memory_handle;
+};
+
+/* MODS_ESC_FREE_PAGES */
+struct MODS_FREE_PAGES {
+ /* IN */
+ NvU64 memory_handle;
+};
+
+/* MODS_ESC_GET_PHYSICAL_ADDRESS */
+struct MODS_GET_PHYSICAL_ADDRESS {
+ /* IN */
+ NvU64 memory_handle;
+ NvU32 offset;
+
+ /* OUT */
+ NvU64 physical_address;
+};
+
+/* MODS_ESC_VIRTUAL_TO_PHYSICAL */
+struct MODS_VIRTUAL_TO_PHYSICAL {
+ /* IN */
+ NvU64 virtual_address;
+
+ /* OUT */
+ NvU64 physical_address;
+};
+
+/* MODS_ESC_PHYSICAL_TO_VIRTUAL */
+struct MODS_PHYSICAL_TO_VIRTUAL {
+ /* IN */
+ NvU64 physical_address;
+
+ /* OUT */
+ NvU64 virtual_address;
+
+};
+
+/* MODS_ESC_FLUSH_CACHE_RANGE */
+#define MODS_FLUSH_CPU_CACHE 1
+#define MODS_INVALIDATE_CPU_CACHE 2
+
+struct MODS_FLUSH_CPU_CACHE_RANGE {
+ /* IN */
+ NvU64 virt_addr_start;
+ NvU64 virt_addr_end;
+ NvU32 flags;
+};
+
+/* MODS_ESC_FIND_PCI_DEVICE */
+struct MODS_FIND_PCI_DEVICE {
+ /* IN */
+ NvU32 device_id;
+ NvU32 vendor_id;
+ NvU32 index;
+
+ /* OUT */
+ NvU32 bus_number;
+ NvU32 device_number;
+ NvU32 function_number;
+};
+
+/* MODS_ESC_FIND_PCI_CLASS_CODE */
+struct MODS_FIND_PCI_CLASS_CODE {
+ /* IN */
+ NvU32 class_code;
+ NvU32 index;
+
+ /* OUT */
+ NvU32 bus_number;
+ NvU32 device_number;
+ NvU32 function_number;
+};
+
+/* MODS_ESC_PCI_READ */
+struct MODS_PCI_READ {
+ /* IN */
+ NvU32 bus_number;
+ NvU32 device_number;
+ NvU32 function_number;
+ NvU32 address;
+ NvU32 data_size;
+
+ /* OUT */
+ NvU32 data;
+};
+
+/* MODS_ESC_PCI_WRITE */
+struct MODS_PCI_WRITE {
+ /* IN */
+ NvU32 bus_number;
+ NvU32 device_number;
+ NvU32 function_number;
+ NvU32 address;
+ NvU32 data;
+ NvU32 data_size;
+};
+
+/* MODS_ESC_PCI_BUS_ADD_DEVICES*/
+struct MODS_PCI_BUS_ADD_DEVICES {
+ /* IN */
+ NvU32 bus;
+};
+
+/* MODS_ESC_PIO_READ */
+struct MODS_PIO_READ {
+ /* IN */
+ NvU16 port;
+ NvU32 data_size;
+
+ /* OUT */
+ NvU32 data;
+};
+
+/* MODS_ESC_PIO_WRITE */
+struct MODS_PIO_WRITE {
+ /* IN */
+ NvU16 port;
+ NvU32 data;
+ NvU32 data_size;
+};
+
+#define INQ_CNT 8
+
+struct mods_irq_data {
+ NvU32 irq;
+ NvU32 delay;
+};
+
+struct mods_irq_status {
+ struct mods_irq_data data[INQ_CNT];
+ NvU32 irqbits:INQ_CNT;
+ NvU32 otherirq:1;
+};
+
+/* MODS_ESC_IRQ */
+struct MODS_IRQ {
+ /* IN */
+ NvU32 cmd;
+ NvU32 size; /* memory size */
+ NvU32 irq; /* the irq number to be registered in driver */
+
+ /* IN OUT */
+ NvU32 channel; /* application id allocated by driver. */
+
+ /* OUT */
+ struct mods_irq_status stat; /* for querying irq */
+ NvU64 phys; /* the memory physical address */
+};
+
+/* MODS_ESC_REGISTER_IRQ */
+/* MODS_ESC_UNREGISTER_IRQ */
+struct MODS_REGISTER_IRQ {
+ /* IN */
+ struct mods_pci_dev dev; /* device which generates the interrupt */
+ NvU8 type; /* MODS_IRQ_TYPE_* */
+};
+
+struct mods_irq {
+ NvU32 delay; /* delay in ns between the irq occuring and
+ MODS querying for it */
+ struct mods_pci_dev dev; /* device which generated the interrupt */
+};
+
+#define MODS_MAX_IRQS 32
+
+/* MODS_ESC_QUERY_IRQ */
+struct MODS_QUERY_IRQ {
+ /* OUT */
+ struct mods_irq irq_list[MODS_MAX_IRQS];
+ NvU8 more; /* indicates that more interrupts are waiting */
+};
+
+#define MODS_IRQ_TYPE_INT 0
+#define MODS_IRQ_TYPE_MSI 1
+#define MODS_IRQ_TYPE_CPU 2
+
+/* MODS_ESC_SET_IRQ_MASK */
+struct MODS_SET_IRQ_MASK {
+ /* IN */
+ NvU64 aperture_addr; /* physical address of aperture */
+ NvU32 aperture_size; /* size of the mapped region */
+ NvU32 reg_offset; /* offset of the irq mask register
+ within the aperture */
+ NvU32 and_mask; /* and mask for clearing bits in
+ the irq mask register */
+ NvU32 or_mask; /* or mask for setting bits in
+ the irq mask register */
+ struct mods_pci_dev dev; /* device identifying interrupt for
+ which the mask will be applied */
+ NvU8 irq_type; /* irq type */
+ NvU8 mask_type; /* mask type */
+};
+
+#define MODS_MASK_TYPE_IRQ_DISABLE 0
+
+#define ACPI_MODS_TYPE_INTEGER 1
+#define ACPI_MODS_TYPE_BUFFER 2
+#define ACPI_MAX_BUFFER_LENGTH 4096
+#define ACPI_MAX_METHOD_LENGTH 12
+#define ACPI_MAX_ARGUMENT_NUMBER 12
+
+union ACPI_ARGUMENT {
+ NvU32 type;
+
+ struct {
+ NvU32 type;
+ NvU32 value;
+ } integer;
+
+ struct {
+ NvU32 type;
+ NvU32 length;
+ NvU32 offset;
+ } buffer;
+};
+
+/* MODS_ESC_EVAL_ACPI_METHOD */
+struct MODS_EVAL_ACPI_METHOD {
+ /* IN */
+ char method_name[ACPI_MAX_METHOD_LENGTH];
+ NvU32 argument_count;
+ union ACPI_ARGUMENT argument[ACPI_MAX_ARGUMENT_NUMBER];
+ NvU8 in_buffer[ACPI_MAX_BUFFER_LENGTH];
+
+ /* IN OUT */
+ NvU32 out_data_size;
+
+ /* OUT */
+ NvU8 out_buffer[ACPI_MAX_BUFFER_LENGTH];
+ NvU32 out_status;
+};
+
+/* MODS_ESC_EVAL_DEV_ACPI_METHOD */
+struct MODS_EVAL_DEV_ACPI_METHOD {
+ /* IN OUT */
+ struct MODS_EVAL_ACPI_METHOD method;
+
+ /* IN */
+ struct mods_pci_dev device;
+};
+
+/* MODS_ESC_ACPI_GET_DDC */
+struct MODS_ACPI_GET_DDC {
+ /* OUT */
+ NvU32 out_data_size;
+ NvU8 out_buffer[ACPI_MAX_BUFFER_LENGTH];
+
+ /* IN */
+ struct mods_pci_dev device;
+};
+
+/* MODS_ESC_GET_VERSION */
+struct MODS_GET_VERSION {
+ /* OUT */
+ NvU64 version;
+};
+
+/* MODS_ESC_SET_PARA */
+struct MODS_SET_PARA {
+ /* IN */
+ NvU64 Highmem4g;
+ NvU64 debug;
+};
+
+/* MODS_ESC_SET_MEMORY_TYPE */
+struct MODS_MEMORY_TYPE {
+ /* IN */
+ NvU64 physical_address;
+ NvU64 size;
+ NvU32 type;
+};
+
+#define MAX_CLOCK_HANDLE_NAME 64
+
+/* MODS_ESC_GET_CLOCK_HANDLE */
+struct MODS_GET_CLOCK_HANDLE {
+ /* OUT */
+ NvU32 clock_handle;
+
+ /* IN */
+ char device_name[MAX_CLOCK_HANDLE_NAME];
+ char controller_name[MAX_CLOCK_HANDLE_NAME];
+};
+
+/* MODS_ESC_SET_CLOCK_RATE, MODS_ESC_GET_CLOCK_RATE,
+ * MODS_ESC_GET_CLOCK_MAX_RATE, MODS_ESC_SET_CLOCK_MAX_RATE */
+struct MODS_CLOCK_RATE {
+ /* IN/OUT */
+ NvU64 clock_rate_hz;
+
+ /* IN */
+ NvU32 clock_handle;
+};
+
+/* MODS_ESC_SET_CLOCK_PARENT, MODS_ESC_GET_CLOCK_PARENT */
+struct MODS_CLOCK_PARENT {
+ /* IN */
+ NvU32 clock_handle;
+
+ /* IN/OUT */
+ NvU32 clock_parent_handle;
+};
+
+/* MODS_ESC_ENABLE_CLOCK, MODS_ESC_DISABLE_CLOCK, MODS_ESC_CLOCK_RESET_ASSERT,
+ * MODS_ESC_CLOCK_RESET_DEASSERT */
+struct MODS_CLOCK_HANDLE {
+ /* IN */
+ NvU32 clock_handle;
+};
+
+/* MODS_ESC_IS_CLOCK_ENABLED */
+struct MODS_CLOCK_ENABLED {
+ /* IN */
+ NvU32 clock_handle;
+
+ /* OUT */
+ NvU32 enable_count;
+};
+
+/* MODS_ESC_DEVICE_NUMA_INFO */
+#define MAX_CPU_MASKS 32 /* 32 masks of 32bits = 1024 CPUs max */
+struct MODS_DEVICE_NUMA_INFO {
+ /* IN */
+ struct mods_pci_dev pci_device;
+
+ /* OUT */
+ NvS32 node;
+ NvU32 node_count;
+ NvU32 node_cpu_mask[MAX_CPU_MASKS];
+ NvU32 cpu_count;
+};
+
+/* The ids match MODS ids */
+#define MODS_MEMORY_CACHED 5
+#define MODS_MEMORY_UNCACHED 1
+#define MODS_MEMORY_WRITECOMBINE 2
+
+#pragma pack()
+
+/* ************************************************************************* */
+/* ************************************************************************* */
+/* ** */
+/* ** ESCAPE CALLS */
+/* ** */
+/* ************************************************************************* */
+/* ************************************************************************* */
+#define MODS_IOC_MAGIC 'x'
+#define MODS_ESC_ALLOC_PAGES \
+ _IOWR(MODS_IOC_MAGIC, 0, struct MODS_ALLOC_PAGES)
+#define MODS_ESC_FREE_PAGES \
+ _IOWR(MODS_IOC_MAGIC, 1, struct MODS_FREE_PAGES)
+#define MODS_ESC_GET_PHYSICAL_ADDRESS \
+ _IOWR(MODS_IOC_MAGIC, 2, struct MODS_GET_PHYSICAL_ADDRESS)
+#define MODS_ESC_VIRTUAL_TO_PHYSICAL \
+ _IOWR(MODS_IOC_MAGIC, 3, struct MODS_VIRTUAL_TO_PHYSICAL)
+#define MODS_ESC_PHYSICAL_TO_VIRTUAL \
+ _IOWR(MODS_IOC_MAGIC, 4, struct MODS_PHYSICAL_TO_VIRTUAL)
+#define MODS_ESC_FIND_PCI_DEVICE \
+ _IOWR(MODS_IOC_MAGIC, 5, struct MODS_FIND_PCI_DEVICE)
+#define MODS_ESC_FIND_PCI_CLASS_CODE \
+ _IOWR(MODS_IOC_MAGIC, 6, struct MODS_FIND_PCI_CLASS_CODE)
+#define MODS_ESC_PCI_READ \
+ _IOWR(MODS_IOC_MAGIC, 7, struct MODS_PCI_READ)
+#define MODS_ESC_PCI_WRITE \
+ _IOWR(MODS_IOC_MAGIC, 8, struct MODS_PCI_WRITE)
+#define MODS_ESC_PIO_READ \
+ _IOWR(MODS_IOC_MAGIC, 9, struct MODS_PIO_READ)
+#define MODS_ESC_PIO_WRITE \
+ _IOWR(MODS_IOC_MAGIC, 10, struct MODS_PIO_WRITE)
+#define MODS_ESC_IRQ_REGISTER \
+ _IOWR(MODS_IOC_MAGIC, 11, struct MODS_IRQ)
+#define MODS_ESC_IRQ_FREE \
+ _IOWR(MODS_IOC_MAGIC, 12, struct MODS_IRQ)
+#define MODS_ESC_IRQ_INQUIRY \
+ _IOWR(MODS_IOC_MAGIC, 13, struct MODS_IRQ)
+#define MODS_ESC_EVAL_ACPI_METHOD \
+ _IOWR(MODS_IOC_MAGIC, 16, struct MODS_EVAL_ACPI_METHOD)
+#define MODS_ESC_GET_API_VERSION \
+ _IOWR(MODS_IOC_MAGIC, 17, struct MODS_GET_VERSION)
+#define MODS_ESC_GET_KERNEL_VERSION \
+ _IOWR(MODS_IOC_MAGIC, 18, struct MODS_GET_VERSION)
+#define MODS_ESC_SET_DRIVER_PARA \
+ _IOWR(MODS_IOC_MAGIC, 19, struct MODS_SET_PARA)
+#define MODS_ESC_MSI_REGISTER \
+ _IOWR(MODS_IOC_MAGIC, 20, struct MODS_IRQ)
+#define MODS_ESC_REARM_MSI \
+ _IOWR(MODS_IOC_MAGIC, 21, struct MODS_IRQ)
+#define MODS_ESC_SET_MEMORY_TYPE \
+ _IOW(MODS_IOC_MAGIC, 22, struct MODS_MEMORY_TYPE)
+#define MODS_ESC_PCI_BUS_ADD_DEVICES \
+ _IOW(MODS_IOC_MAGIC, 23, struct MODS_PCI_BUS_ADD_DEVICES)
+#define MODS_ESC_REGISTER_IRQ \
+ _IOW(MODS_IOC_MAGIC, 24, struct MODS_REGISTER_IRQ)
+#define MODS_ESC_UNREGISTER_IRQ \
+ _IOW(MODS_IOC_MAGIC, 25, struct MODS_REGISTER_IRQ)
+#define MODS_ESC_QUERY_IRQ \
+ _IOR(MODS_IOC_MAGIC, 26, struct MODS_QUERY_IRQ)
+#define MODS_ESC_EVAL_DEV_ACPI_METHOD \
+ _IOWR(MODS_IOC_MAGIC, 27, struct MODS_EVAL_DEV_ACPI_METHOD)
+#define MODS_ESC_ACPI_GET_DDC \
+ _IOWR(MODS_IOC_MAGIC, 28, struct MODS_ACPI_GET_DDC)
+#define MODS_ESC_GET_CLOCK_HANDLE \
+ _IOWR(MODS_IOC_MAGIC, 29, struct MODS_GET_CLOCK_HANDLE)
+#define MODS_ESC_SET_CLOCK_RATE \
+ _IOW(MODS_IOC_MAGIC, 30, struct MODS_CLOCK_RATE)
+#define MODS_ESC_GET_CLOCK_RATE \
+ _IOWR(MODS_IOC_MAGIC, 31, struct MODS_CLOCK_RATE)
+#define MODS_ESC_SET_CLOCK_PARENT \
+ _IOW(MODS_IOC_MAGIC, 32, struct MODS_CLOCK_PARENT)
+#define MODS_ESC_GET_CLOCK_PARENT \
+ _IOWR(MODS_IOC_MAGIC, 33, struct MODS_CLOCK_PARENT)
+#define MODS_ESC_ENABLE_CLOCK \
+ _IOW(MODS_IOC_MAGIC, 34, struct MODS_CLOCK_HANDLE)
+#define MODS_ESC_DISABLE_CLOCK \
+ _IOW(MODS_IOC_MAGIC, 35, struct MODS_CLOCK_HANDLE)
+#define MODS_ESC_IS_CLOCK_ENABLED \
+ _IOWR(MODS_IOC_MAGIC, 36, struct MODS_CLOCK_ENABLED)
+#define MODS_ESC_CLOCK_RESET_ASSERT \
+ _IOW(MODS_IOC_MAGIC, 37, struct MODS_CLOCK_HANDLE)
+#define MODS_ESC_CLOCK_RESET_DEASSERT \
+ _IOW(MODS_IOC_MAGIC, 38, struct MODS_CLOCK_HANDLE)
+#define MODS_ESC_SET_IRQ_MASK \
+ _IOW(MODS_IOC_MAGIC, 39, struct MODS_SET_IRQ_MASK)
+#define MODS_ESC_MEMORY_BARRIER \
+ _IO(MODS_IOC_MAGIC, 40)
+#define MODS_ESC_IRQ_HANDLED \
+ _IOW(MODS_IOC_MAGIC, 41, struct MODS_REGISTER_IRQ)
+#define MODS_ESC_FLUSH_CPU_CACHE_RANGE \
+ _IOW(MODS_IOC_MAGIC, 42, struct MODS_FLUSH_CPU_CACHE_RANGE)
+#define MODS_ESC_GET_CLOCK_MAX_RATE \
+ _IOWR(MODS_IOC_MAGIC, 43, struct MODS_CLOCK_RATE)
+#define MODS_ESC_SET_CLOCK_MAX_RATE \
+ _IOW(MODS_IOC_MAGIC, 44, struct MODS_CLOCK_RATE)
+#define MODS_ESC_DEVICE_ALLOC_PAGES \
+ _IOWR(MODS_IOC_MAGIC, 45, struct MODS_DEVICE_ALLOC_PAGES)
+#define MODS_ESC_DEVICE_NUMA_INFO \
+ _IOWR(MODS_IOC_MAGIC, 46, struct MODS_DEVICE_NUMA_INFO)
+
+#endif /* _MODS_H_ */
diff --git a/drivers/misc/mods/mods_acpi.c b/drivers/misc/mods/mods_acpi.c
new file mode 100644
index 000000000000..deac0b0a9ddb
--- /dev/null
+++ b/drivers/misc/mods/mods_acpi.c
@@ -0,0 +1,417 @@
+/*
+ * mods_acpi.c - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2008-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mods_internal.h"
+
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+
+static acpi_status mods_acpi_find_acpi_handler(acpi_handle,
+ u32,
+ void *,
+ void **);
+
+/*********************
+ * PRIVATE FUNCTIONS *
+ *********************/
+
+/* store handle if found. */
+static void mods_acpi_handle_init(char *method_name, acpi_handle *handler)
+{
+ MODS_ACPI_WALK_NAMESPACE(ACPI_TYPE_ANY,
+ ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ mods_acpi_find_acpi_handler,
+ method_name,
+ handler);
+
+ if (!(*handler)) {
+ mods_debug_printk(DEBUG_ACPI, "ACPI method %s not found\n",
+ method_name);
+ return;
+ }
+}
+
+static acpi_status mods_acpi_find_acpi_handler(
+ acpi_handle handle,
+ u32 nest_level,
+ void *dummy1,
+ void **dummy2
+)
+{
+ acpi_handle acpi_method_handler_temp;
+
+ if (!acpi_get_handle(handle, dummy1, &acpi_method_handler_temp))
+ *dummy2 = acpi_method_handler_temp;
+
+ return OK;
+}
+
+static int mods_extract_acpi_object(
+ char *method,
+ union acpi_object *obj,
+ NvU8 **buf,
+ NvU8 *buf_end
+)
+{
+ int ret = OK;
+ switch (obj->type) {
+
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length == 0) {
+ mods_error_printk(
+ "empty ACPI output buffer from ACPI method %s\n",
+ method);
+ ret = -EINVAL;
+ } else if (obj->buffer.length <= buf_end-*buf) {
+ u32 size = obj->buffer.length;
+ memcpy(*buf, obj->buffer.pointer, size);
+ *buf += size;
+ } else {
+ mods_error_printk(
+ "output buffer too small for ACPI method %s\n",
+ method);
+ ret = -EINVAL;
+ }
+ break;
+
+ case ACPI_TYPE_INTEGER:
+ if (4 <= buf_end-*buf) {
+ if (obj->integer.value > 0xFFFFFFFFU) {
+ mods_error_printk(
+ "integer value from ACPI method %s out of range\n",
+ method);
+ ret = -EINVAL;
+ } else {
+ memcpy(*buf, &obj->integer.value, 4);
+ *buf += 4;
+ }
+ } else {
+ mods_error_printk(
+ "output buffer too small for ACPI method %s\n",
+ method);
+ ret = -EINVAL;
+ }
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ if (obj->package.count == 0) {
+ mods_error_printk(
+ "empty ACPI output package from ACPI method %s\n",
+ method);
+ ret = -EINVAL;
+ } else {
+ union acpi_object *elements = obj->package.elements;
+ u32 size = 0;
+ u32 i;
+ for (i = 0; i < obj->package.count; i++) {
+ NvU8 *old_buf = *buf;
+ ret = mods_extract_acpi_object(method,
+ &elements[i],
+ buf,
+ buf_end);
+ if (ret == OK) {
+ u32 new_size = *buf - old_buf;
+ if (size == 0) {
+ size = new_size;
+ } else if (size != new_size) {
+ mods_error_printk(
+ "ambiguous package element size from ACPI method %s\n",
+ method);
+ ret = -EINVAL;
+ }
+ } else
+ break;
+ }
+ }
+ break;
+
+ default:
+ mods_error_printk(
+ "unsupported ACPI output type 0x%02x from method %s\n",
+ (unsigned)obj->type, method);
+ ret = -EINVAL;
+ break;
+
+ }
+ return ret;
+}
+
+static int mods_eval_acpi_method(struct file *pfile,
+ struct MODS_EVAL_ACPI_METHOD *p,
+ struct mods_pci_dev *pdevice)
+{
+ int ret = OK;
+ int i;
+ acpi_status status;
+ struct acpi_object_list input;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *acpi_method = NULL;
+ union acpi_object acpi_params[ACPI_MAX_ARGUMENT_NUMBER];
+ acpi_handle acpi_method_handler = NULL;
+
+ if (pdevice) {
+#ifdef DEVICE_ACPI_HANDLE
+ unsigned int devfn;
+ struct pci_dev *dev;
+
+ mods_debug_printk(DEBUG_ACPI, "ACPI %s for device %x:%02x.%x\n",
+ p->method_name,
+ (unsigned)pdevice->bus,
+ (unsigned)pdevice->device,
+ (unsigned)pdevice->function);
+
+ devfn = PCI_DEVFN(pdevice->device, pdevice->function);
+ dev = MODS_PCI_GET_SLOT(pdevice->bus, devfn);
+ if (!dev) {
+ mods_error_printk("ACPI: PCI device not found\n");
+ return -EINVAL;
+ }
+ acpi_method_handler = DEVICE_ACPI_HANDLE(&dev->dev);
+#else
+ mods_error_printk(
+ "this kernel does not support per-device ACPI calls\n");
+ return -EINVAL;
+#endif
+ } else {
+ mods_debug_printk(DEBUG_ACPI, "ACPI %s\n", p->method_name);
+ mods_acpi_handle_init(p->method_name, &acpi_method_handler);
+ }
+
+ if (!acpi_method_handler) {
+ mods_debug_printk(DEBUG_ACPI, "ACPI: handle for %s not found\n",
+ p->method_name);
+ return -EINVAL;
+ }
+
+ if (p->argument_count >= ACPI_MAX_ARGUMENT_NUMBER) {
+ mods_error_printk("invalid argument count for ACPI call\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < p->argument_count; i++) {
+ switch (p->argument[i].type) {
+ case ACPI_MODS_TYPE_INTEGER: {
+ acpi_params[i].integer.type = ACPI_TYPE_INTEGER;
+ acpi_params[i].integer.value
+ = p->argument[i].integer.value;
+ break;
+ }
+ case ACPI_MODS_TYPE_BUFFER: {
+ acpi_params[i].buffer.type = ACPI_TYPE_BUFFER;
+ acpi_params[i].buffer.length
+ = p->argument[i].buffer.length;
+ acpi_params[i].buffer.pointer
+ = p->in_buffer + p->argument[i].buffer.offset;
+ break;
+ }
+ default: {
+ mods_error_printk("unsupported ACPI argument type\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ input.count = p->argument_count;
+ input.pointer = acpi_params;
+
+ status = acpi_evaluate_object(acpi_method_handler,
+ pdevice ? p->method_name : NULL,
+ &input,
+ &output);
+
+ if (ACPI_FAILURE(status)) {
+ mods_error_printk("ACPI method %s failed\n", p->method_name);
+ return -EINVAL;
+ }
+
+ acpi_method = output.pointer;
+ if (!acpi_method) {
+ mods_error_printk("missing output from ACPI method %s\n",
+ p->method_name);
+ ret = -EINVAL;
+ } else {
+ NvU8 *buf = p->out_buffer;
+ ret = mods_extract_acpi_object(p->method_name,
+ acpi_method,
+ &buf,
+ buf+sizeof(p->out_buffer));
+ p->out_data_size = (ret == OK) ? (buf - p->out_buffer) : 0;
+ }
+
+ kfree(output.pointer);
+ return ret;
+}
+
+/*************************
+ * ESCAPE CALL FUNCTIONS *
+ *************************/
+
+int esc_mods_eval_acpi_method(struct file *pfile,
+ struct MODS_EVAL_ACPI_METHOD *p)
+{
+ return mods_eval_acpi_method(pfile, p, 0);
+}
+
+int esc_mods_eval_dev_acpi_method(struct file *pfile,
+ struct MODS_EVAL_DEV_ACPI_METHOD *p)
+{
+ return mods_eval_acpi_method(pfile, &p->method, &p->device);
+}
+
+int esc_mods_acpi_get_ddc(struct file *pfile, struct MODS_ACPI_GET_DDC *p)
+{
+#if !defined(DEVICE_ACPI_HANDLE)
+ mods_error_printk(
+ "this kernel does not support per-device ACPI calls\n");
+ return -EINVAL;
+#else
+
+ acpi_status status;
+ struct acpi_device *device = NULL;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *ddc;
+ union acpi_object ddc_arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list input = { 1, &ddc_arg0 };
+ struct list_head *node, *next;
+ NvU32 i;
+ acpi_handle dev_handle = NULL;
+ acpi_handle lcd_dev_handle = NULL;
+
+ mods_debug_printk(DEBUG_ACPI,
+ "ACPI _DDC (EDID) for device %x:%02x.%x\n",
+ (unsigned)p->device.bus,
+ (unsigned)p->device.device,
+ (unsigned)p->device.function);
+
+ {
+ unsigned int devfn = PCI_DEVFN(p->device.device,
+ p->device.function);
+ struct pci_dev *dev = MODS_PCI_GET_SLOT(p->device.bus, devfn);
+ if (!dev) {
+ mods_error_printk("ACPI: PCI device not found\n");
+ return -EINVAL;
+ }
+ dev_handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ }
+ if (!dev_handle) {
+ mods_debug_printk(DEBUG_ACPI,
+ "ACPI: handle for _DDC not found\n");
+ return -EINVAL;
+ }
+ status = acpi_bus_get_device(dev_handle, &device);
+
+ if (ACPI_FAILURE(status) || !device) {
+ mods_error_printk("ACPI: device for _DDC not found\n");
+ return -EINVAL;
+ }
+
+ list_for_each_safe(node, next, &device->children) {
+#ifdef MODS_ACPI_DEVID_64
+ unsigned long long
+#else
+ unsigned long
+#endif
+ device_id = 0;
+
+ struct acpi_device *dev =
+ list_entry(node, struct acpi_device, node);
+
+ if (!dev)
+ continue;
+
+ status = acpi_evaluate_integer(dev->handle,
+ "_ADR",
+ NULL,
+ &device_id);
+ if (ACPI_FAILURE(status))
+ /* Couldnt query device_id for this device */
+ continue;
+
+ device_id = (device_id & 0xffff);
+
+ if ((device_id == 0x0110) || /* Only for an LCD*/
+ (device_id == 0x0118) ||
+ (device_id == 0x0400)) {
+
+ lcd_dev_handle = dev->handle;
+ mods_debug_printk(DEBUG_ACPI,
+ "ACPI: Found LCD 0x%x on device %x:%02x.%x\n",
+ (unsigned)device_id,
+ (unsigned)p->device.bus,
+ (unsigned)p->device.device,
+ (unsigned)p->device.function);
+ break;
+ }
+
+ }
+
+ if (lcd_dev_handle == NULL) {
+ mods_error_printk("ACPI: LCD not found for device %x:%02x.%x\n",
+ (unsigned)p->device.bus,
+ (unsigned)p->device.device,
+ (unsigned)p->device.function);
+ return -EINVAL;
+ }
+
+ /*
+ * As per ACPI Spec 3.0:
+ * ARG0 = 0x1 for 128 bytes EDID buffer
+ * ARG0 = 0x2 for 256 bytes EDID buffer
+ */
+ for (i = 1; i <= 2; i++) {
+ ddc_arg0.integer.value = i;
+ status = acpi_evaluate_object(lcd_dev_handle,
+ "_DDC",
+ &input,
+ &output);
+ if (ACPI_SUCCESS(status))
+ break;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ mods_error_printk("ACPI method _DDC (EDID) failed\n");
+ return -EINVAL;
+ }
+
+ ddc = output.pointer;
+ if (ddc && (ddc->type == ACPI_TYPE_BUFFER)
+ && (ddc->buffer.length > 0)) {
+
+ if (ddc->buffer.length <= sizeof(p->out_buffer)) {
+ p->out_data_size = ddc->buffer.length;
+ memcpy(p->out_buffer,
+ ddc->buffer.pointer,
+ p->out_data_size);
+ } else {
+ mods_error_printk(
+ "output buffer too small for ACPI method _DDC (EDID)\n");
+ kfree(output.pointer);
+ return -EINVAL;
+ }
+ } else {
+ mods_error_printk("unsupported ACPI output type\n");
+ kfree(output.pointer);
+ return -EINVAL;
+ }
+
+ kfree(output.pointer);
+ return OK;
+#endif
+}
diff --git a/drivers/misc/mods/mods_clock.c b/drivers/misc/mods/mods_clock.c
new file mode 100644
index 000000000000..246e0a812c22
--- /dev/null
+++ b/drivers/misc/mods/mods_clock.c
@@ -0,0 +1,437 @@
+/*
+ * mods_clock.c - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mods_internal.h"
+#include <linux/clk.h>
+#include <mach/clk.h>
+#include <../arch/arm/mach-tegra/clock.h>
+
+static struct list_head mods_clock_handles;
+static spinlock_t mods_clock_lock;
+static NvU32 last_handle;
+
+struct clock_entry {
+ struct clk *pclk;
+ NvU32 handle;
+ struct list_head list;
+};
+
+void mods_init_clock_api(void)
+{
+ spin_lock_init(&mods_clock_lock);
+ INIT_LIST_HEAD(&mods_clock_handles);
+ last_handle = 0;
+}
+
+void mods_shutdown_clock_api(void)
+{
+ struct list_head *head = &mods_clock_handles;
+ struct list_head *iter;
+ struct list_head *tmp;
+
+ spin_lock(&mods_clock_lock);
+
+ list_for_each_safe(iter, tmp, head) {
+ struct clock_entry *entry
+ = list_entry(iter, struct clock_entry, list);
+ list_del(iter);
+ MEMDBG_FREE(entry);
+ }
+
+ spin_unlock(&mods_clock_lock);
+}
+
+static NvU32 mods_get_clock_handle(struct clk *pclk)
+{
+ struct list_head *head = &mods_clock_handles;
+ struct list_head *iter;
+ struct clock_entry *entry = 0;
+ NvU32 handle = 0;
+
+ spin_lock(&mods_clock_lock);
+
+ list_for_each(iter, head) {
+ struct clock_entry *cur
+ = list_entry(iter, struct clock_entry, list);
+ if (cur->pclk == pclk) {
+ entry = cur;
+ handle = cur->handle;
+ break;
+ }
+ }
+
+ if (!entry) {
+ MEMDBG_ALLOC(entry, sizeof(*entry));
+ if (!unlikely(!entry)) {
+ entry->pclk = pclk;
+ entry->handle = ++last_handle;
+ handle = entry->handle;
+ list_add(&entry->list, &mods_clock_handles);
+ }
+ }
+
+ spin_unlock(&mods_clock_lock);
+
+ return handle;
+}
+
+static struct clk *mods_get_clock(NvU32 handle)
+{
+ struct list_head *head = &mods_clock_handles;
+ struct list_head *iter;
+ struct clk *pclk = 0;
+
+ spin_lock(&mods_clock_lock);
+
+ list_for_each(iter, head) {
+ struct clock_entry *entry
+ = list_entry(iter, struct clock_entry, list);
+ if (entry->handle == handle) {
+ pclk = entry->pclk;
+ break;
+ }
+ }
+
+ spin_unlock(&mods_clock_lock);
+
+ return pclk;
+}
+
+int esc_mods_get_clock_handle(struct file *pfile,
+ struct MODS_GET_CLOCK_HANDLE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ p->device_name[sizeof(p->device_name)-1] = 0;
+ p->controller_name[sizeof(p->controller_name)-1] = 0;
+ pclk = clk_get_sys(p->device_name, p->controller_name);
+
+ if (IS_ERR(pclk)) {
+ mods_error_printk("invalid clock specified: dev=%s, ctx=%s\n",
+ p->device_name, p->controller_name);
+ } else {
+ p->clock_handle = mods_get_clock_handle(pclk);
+ ret = OK;
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_set_clock_rate(struct file *pfile, struct MODS_CLOCK_RATE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+ ret = clk_set_rate(pclk, p->clock_rate_hz);
+ if (ret) {
+ mods_error_printk(
+ "unable to set rate %lluHz on clock 0x%x\n",
+ p->clock_rate_hz, p->clock_handle);
+ } else {
+ mods_debug_printk(DEBUG_CLOCK,
+ "successfuly set rate %lluHz on clock 0x%x\n",
+ p->clock_rate_hz, p->clock_handle);
+ }
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_get_clock_rate(struct file *pfile, struct MODS_CLOCK_RATE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+ p->clock_rate_hz = clk_get_rate(pclk);
+ mods_debug_printk(DEBUG_CLOCK, "clock 0x%x has rate %lluHz\n",
+ p->clock_handle, p->clock_rate_hz);
+ ret = OK;
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_get_clock_max_rate(struct file *pfile, struct MODS_CLOCK_RATE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else if (!pclk->ops || !pclk->ops->round_rate) {
+ mods_error_printk(
+ "unable to detect max rate for clock handle 0x%x\n",
+ p->clock_handle);
+ } else {
+ long rate = pclk->ops->round_rate(pclk, pclk->max_rate);
+ p->clock_rate_hz = rate < 0 ? pclk->max_rate
+ : (unsigned long)rate;
+ mods_debug_printk(DEBUG_CLOCK,
+ "clock 0x%x has max rate %lluHz\n",
+ p->clock_handle, p->clock_rate_hz);
+ ret = OK;
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_set_clock_max_rate(struct file *pfile, struct MODS_CLOCK_RATE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+#ifdef CONFIG_TEGRA_CLOCK_DEBUG_FUNC
+ ret = tegra_clk_set_max(pclk, p->clock_rate_hz);
+ if (ret) {
+ mods_error_printk(
+ "unable to override max clock rate %lluHz on clock 0x%x\n",
+ p->clock_rate_hz, p->clock_handle);
+ } else {
+ mods_debug_printk(DEBUG_CLOCK,
+ "successfuly set max rate %lluHz on clock 0x%x\n",
+ p->clock_rate_hz, p->clock_handle);
+ }
+#else
+ mods_error_printk("unable to override max clock rate\n");
+ mods_error_printk(
+ "reconfigure kernel with CONFIG_TEGRA_CLOCK_DEBUG_FUNC=y\n");
+ ret = -ENOSYS;
+#endif
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_set_clock_parent(struct file *pfile, struct MODS_CLOCK_PARENT *p)
+{
+ struct clk *pclk = 0;
+ struct clk *pparent = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+ pparent = mods_get_clock(p->clock_parent_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else if (!pparent) {
+ mods_error_printk("unrecognized parent clock handle: 0x%x\n",
+ p->clock_parent_handle);
+ } else {
+ ret = clk_set_parent(pclk, pparent);
+ if (ret) {
+ mods_error_printk(
+ "unable to make clock 0x%x parent of clock 0x%x\n",
+ p->clock_parent_handle, p->clock_handle);
+ } else {
+ mods_debug_printk(DEBUG_CLOCK,
+ "successfuly made clock 0x%x parent of clock 0x%x\n",
+ p->clock_parent_handle, p->clock_handle);
+ }
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_get_clock_parent(struct file *pfile, struct MODS_CLOCK_PARENT *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+ struct clk *pparent = clk_get_parent(pclk);
+ p->clock_parent_handle = mods_get_clock_handle(pparent);
+ mods_debug_printk(DEBUG_CLOCK,
+ "clock 0x%x is parent of clock 0x%x\n",
+ p->clock_parent_handle, p->clock_handle);
+ ret = OK;
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_enable_clock(struct file *pfile, struct MODS_CLOCK_HANDLE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+ ret = clk_enable(pclk);
+ if (ret) {
+ mods_error_printk("unable to enable clock 0x%x\n",
+ p->clock_handle);
+ } else {
+ mods_debug_printk(DEBUG_CLOCK, "clock 0x%x enabled\n",
+ p->clock_handle);
+ }
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_disable_clock(struct file *pfile, struct MODS_CLOCK_HANDLE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+ clk_disable(pclk);
+ mods_debug_printk(DEBUG_CLOCK, "clock 0x%x disabled\n",
+ p->clock_handle);
+ ret = OK;
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_is_clock_enabled(struct file *pfile, struct MODS_CLOCK_ENABLED *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+ p->enable_count = pclk->refcnt;
+ mods_debug_printk(DEBUG_CLOCK,
+ "clock 0x%x enable count is %u\n",
+ p->clock_handle, p->enable_count);
+ ret = OK;
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_clock_reset_assert(struct file *pfile,
+ struct MODS_CLOCK_HANDLE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+ tegra_periph_reset_assert(pclk);
+ mods_debug_printk(DEBUG_CLOCK, "clock 0x%x reset asserted\n",
+ p->clock_handle);
+ ret = OK;
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_clock_reset_deassert(struct file *pfile,
+ struct MODS_CLOCK_HANDLE *p)
+{
+ struct clk *pclk = 0;
+ int ret = -EINVAL;
+
+ LOG_ENT();
+
+ pclk = mods_get_clock(p->clock_handle);
+
+ if (!pclk) {
+ mods_error_printk("unrecognized clock handle: 0x%x\n",
+ p->clock_handle);
+ } else {
+ tegra_periph_reset_deassert(pclk);
+ mods_debug_printk(DEBUG_CLOCK, "clock 0x%x reset deasserted\n",
+ p->clock_handle);
+ ret = OK;
+ }
+
+ LOG_EXT();
+ return ret;
+}
diff --git a/drivers/misc/mods/mods_config.h b/drivers/misc/mods/mods_config.h
new file mode 100644
index 000000000000..42cddc9cc7e7
--- /dev/null
+++ b/drivers/misc/mods/mods_config.h
@@ -0,0 +1,34 @@
+/*
+ * mods_config.h - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2008-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MODS_CONFIG_H_
+#define _MODS_CONFIG_H_
+
+#define MODS_KERNEL_VERSION KERNEL_VERSION(3, 10, 0)
+
+#define MODS_IRQ_HANDLE_NO_REGS 1
+#define MODS_HAS_SET_MEMORY 1
+#define MODS_ACPI_DEVID_64 1
+#define MODS_HAS_WC 1
+#define MODS_HAS_DEV_TO_NUMA_NODE 1
+#define MODS_HAS_NEW_ACPI_WALK 1
+
+#endif /* _MODS_CONFIG_H_ */
+
+/* vim: set ts=8 sw=8 noet: */
diff --git a/drivers/misc/mods/mods_internal.h b/drivers/misc/mods/mods_internal.h
new file mode 100644
index 000000000000..2e24db672b85
--- /dev/null
+++ b/drivers/misc/mods/mods_internal.h
@@ -0,0 +1,446 @@
+/*
+ * mods_internal.h - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2008-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MODS_INTERNAL_H_
+#define _MODS_INTERNAL_H_
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#define NvU8 u8
+#define NvU16 u16
+#define NvU32 u32
+#define NvS32 s32
+#define NvU64 u64
+
+#include "mods_config.h"
+#include "mods.h"
+
+#ifndef true
+#define true 1
+#define false 0
+#endif
+
+/* function return code */
+#define OK 0
+#define ERROR -1
+
+#define IRQ_FOUND 1
+#define IRQ_NOT_FOUND 0
+
+#define DEV_FOUND 1
+#define DEV_NOT_FOUND 0
+
+#define MSI_DEV_FOUND 1
+#define MSI_DEV_NOT_FOUND 0
+
+struct SYS_PAGE_TABLE {
+ NvU64 dma_addr;
+ struct page *p_page;
+};
+
+struct en_dev_entry {
+ struct pci_dev *dev;
+ struct en_dev_entry *next;
+};
+
+struct mem_type {
+ NvU64 dma_addr;
+ NvU64 size;
+ NvU32 type;
+};
+
+/* file private data */
+struct mods_file_private_data {
+ struct list_head *mods_alloc_list;
+ struct list_head *mods_mapping_list;
+ wait_queue_head_t interrupt_event;
+ struct en_dev_entry *enabled_devices;
+ int mods_id;
+ struct mem_type mem_type;
+ spinlock_t lock;
+};
+
+/* VM private data */
+struct mods_vm_private_data {
+ struct file *fp;
+ atomic_t usage_count;
+};
+
+/* system memory allocation tracking */
+struct SYS_MEM_MODS_INFO {
+ NvU32 alloc_type;
+
+ /* tells how the memory is cached:
+ * (MODS_MEMORY_CACHED, MODS_MEMORY_UNCACHED, MODS_MEMORY_WRITECOMBINE)
+ */
+ NvU32 cache_type;
+
+ NvU32 length; /* actual number of bytes allocated */
+ NvU32 order; /* 2^order pages allocated (contig alloc) */
+ NvU32 num_pages; /* number of allocated pages */
+ NvU32 k_mapping_ref_cnt;
+
+ NvU32 addr_bits;
+ struct page *p_page;
+ NvU64 logical_addr; /* kernel logical address */
+ NvU64 dma_addr; /* physical address, for contig alloc,
+ machine address on Xen */
+ int numa_node; /* numa node for the allocation */
+
+ /* keeps information about allocated pages for noncontig allocation */
+ struct SYS_PAGE_TABLE **p_page_tbl;
+
+ struct list_head list;
+};
+
+#define MODS_ALLOC_TYPE_NON_CONTIG 0
+#define MODS_ALLOC_TYPE_CONTIG 1
+#define MODS_ALLOC_TYPE_BIGPHYS_AREA 2
+
+/* map memory tracking */
+struct SYS_MAP_MEMORY {
+ NvU32 contiguous;
+ NvU64 dma_addr; /* first physical address of given mapping,
+ machine address on Xen */
+ NvU64 virtual_addr; /* virtual address of given mapping */
+ NvU32 mapping_length; /* tells how many bytes were mapped */
+
+ /* helps to unmap noncontiguous memory, NULL for contiguous */
+ struct SYS_MEM_MODS_INFO *p_mem_info;
+
+ struct list_head list;
+};
+
+/* functions used to avoid global debug variables */
+int mods_check_debug_level(int);
+int mods_get_mem4g(void);
+int mods_get_highmem4g(void);
+void mods_set_highmem4g(int);
+int mods_get_multi_instance(void);
+int mods_get_mem4goffset(void);
+
+#define IRQ_MAX (256+PCI_IRQ_MAX)
+#define PCI_IRQ_MAX 15
+#define MODS_CHANNEL_MAX 32
+
+#define IRQ_VAL_POISON 0xfafbfcfdU
+
+/* debug print masks */
+#define DEBUG_IOCTL 0x2
+#define DEBUG_PCICFG 0x4
+#define DEBUG_ACPI 0x8
+#define DEBUG_ISR 0x10
+#define DEBUG_MEM 0x20
+#define DEBUG_FUNC 0x40
+#define DEBUG_CLOCK 0x80
+#define DEBUG_DETAILED 0x100
+#define DEBUG_ISR_DETAILED (DEBUG_ISR | DEBUG_DETAILED)
+#define DEBUG_MEM_DETAILED (DEBUG_MEM | DEBUG_DETAILED)
+
+#define LOG_ENT() mods_debug_printk(DEBUG_FUNC, "> %s\n", __func__)
+#define LOG_EXT() mods_debug_printk(DEBUG_FUNC, "< %s\n", __func__)
+#define LOG_ENT_C(format, args...) \
+ mods_debug_printk(DEBUG_FUNC, "> %s: " format, __func__, ##args)
+#define LOG_EXT_C(format, args...) \
+ mods_debug_printk(DEBUG_FUNC, "< %s: " format, __func__, ##args)
+
+#define mods_debug_printk(level, fmt, args...)\
+ ({ \
+ if (mods_check_debug_level(level)) \
+ pr_info("mods debug: " fmt, ##args); \
+ })
+
+#define mods_info_printk(fmt, args...)\
+ pr_info("mods: " fmt, ##args)
+
+#define mods_error_printk(fmt, args...)\
+ pr_info("mods error: " fmt, ##args)
+
+#define mods_warning_printk(fmt, args...)\
+ pr_info("mods warning: " fmt, ##args)
+
+struct irq_q_data {
+ NvU32 time;
+ struct pci_dev *dev;
+ NvU32 irq;
+};
+
+struct irq_q_info {
+ struct irq_q_data data[MODS_MAX_IRQS];
+ NvU32 head;
+ NvU32 tail;
+};
+
+struct dev_irq_map {
+ void *dev_irq_aperture;
+ NvU32 *dev_irq_mask_reg;
+ NvU32 *dev_irq_state;
+ NvU32 irq_and_mask;
+ NvU32 irq_or_mask;
+ NvU32 apic_irq;
+ NvU8 type;
+ NvU8 channel;
+ struct pci_dev *dev;
+ struct list_head list;
+};
+
+struct mods_priv {
+ /* map info from pci irq to apic irq */
+ struct list_head irq_head[MODS_CHANNEL_MAX];
+
+ /* bits map for each allocated id. Each mods has an id. */
+ /* the design is to take into account multi mods. */
+ unsigned long channel_flags;
+
+ /* fifo loop queue */
+ struct irq_q_info rec_info[MODS_CHANNEL_MAX];
+ spinlock_t lock;
+};
+
+/* ************************************************************************* */
+/* ************************************************************************* */
+/* ** */
+/* ** SYSTEM CALLS */
+/* ** */
+/* ************************************************************************* */
+/* ************************************************************************* */
+
+/* MEMORY */
+#define MODS_KMALLOC(ptr, size) \
+ { \
+ (ptr) = kmalloc(size, GFP_KERNEL); \
+ MODS_ALLOC_RECORD(ptr, size, "km_alloc"); \
+ }
+
+#define MODS_KMALLOC_ATOMIC(ptr, size) \
+ { \
+ (ptr) = kmalloc(size, GFP_ATOMIC); \
+ MODS_ALLOC_RECORD(ptr, size, "km_alloc_atomic"); \
+ }
+
+#define MODS_KFREE(ptr, size) \
+ { \
+ MODS_FREE_RECORD(ptr, size, "km_free"); \
+ kfree((void *) (ptr)); \
+ }
+
+#define MODS_ALLOC_RECORD(ptr, size, name) \
+ {if (ptr != NULL) { \
+ mods_add_mem(ptr, size, __FILE__, __LINE__); \
+ } }
+
+#define MODS_FREE_RECORD(ptr, size, name) \
+ {if (ptr != NULL) { \
+ mods_del_mem(ptr, size, __FILE__, __LINE__); \
+ } }
+
+#define MEMDBG_ALLOC(a, b) (a = kmalloc(b, GFP_ATOMIC))
+#define MEMDBG_FREE(a) (kfree(a))
+#define MODS_FORCE_KFREE(ptr) (kfree(ptr))
+
+#define __MODS_ALLOC_PAGES(page, order, gfp_mask, numa_node) \
+ { \
+ (page) = alloc_pages_node(numa_node, gfp_mask, order); \
+ }
+
+#define __MODS_FREE_PAGES(page, order) \
+ { \
+ __free_pages(page, order); \
+ }
+
+#ifndef MODS_HAS_SET_MEMORY
+# define MODS_SET_MEMORY_UC(addr, pages) \
+ change_page_attr(virt_to_page(addr), pages, PAGE_KERNEL_NOCACHE)
+# define MODS_SET_MEMORY_WC MODS_SET_MEMORY_UC
+# define MODS_SET_MEMORY_WB(addr, pages) \
+ change_page_attr(virt_to_page(addr), pages, PAGE_KERNEL)
+#elif defined(CONFIG_ARCH_TEGRA) && !defined(CONFIG_CPA) && \
+ !defined(CONFIG_ARCH_TEGRA_3x_SOC)
+# define MODS_SET_MEMORY_UC(addr, pages) 0
+# define MODS_SET_MEMORY_WC(addr, pages) 0
+# define MODS_SET_MEMORY_WB(addr, pages) 0
+#else
+# define MODS_SET_MEMORY_UC(addr, pages) set_memory_uc(addr, pages)
+# ifdef MODS_HAS_WC
+# define MODS_SET_MEMORY_WC(addr, pages)\
+ set_memory_wc(addr, pages)
+# else
+# define MODS_SET_MEMORY_WC(addr, pages)\
+ MODS_SET_MEMORY_UC(addr, pages)
+# endif
+# define MODS_SET_MEMORY_WB(addr, pages) set_memory_wb(addr, pages)
+#endif
+
+#define MODS_PGPROT_UC pgprot_noncached
+#ifdef MODS_HAS_WC
+# define MODS_PGPROT_WC pgprot_writecombine
+#else
+# define MODS_PGPROT_WC pgprot_noncached
+#endif
+
+/* VMA */
+#define MODS_VMA_PGOFF(vma) ((vma)->vm_pgoff)
+#define MODS_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start)
+#define MODS_VMA_OFFSET(vma) (((NvU64)(vma)->vm_pgoff) << PAGE_SHIFT)
+#define MODS_VMA_PRIVATE(vma) ((vma)->vm_private_data)
+#define MODS_VMA_FILE(vma) ((vma)->vm_file)
+
+/* Xen adds a translation layer between the physical address
+ * and real system memory address space.
+ *
+ * To illustrate if a PC has 2 GBs of RAM and each VM is given 1GB, then:
+ * for guest OS in domain 0, physical address = machine address;
+ * for guest OS in domain 1, physical address x = machine address 1GB+x
+ *
+ * In reality even domain's 0 physical address is not equal to machine
+ * address and the mappings are not continuous.
+ */
+
+#if defined(CONFIG_XEN) && !defined(CONFIG_PARAVIRT)
+ #define MODS_PHYS_TO_DMA(phys_addr) phys_to_machine(phys_addr)
+ #define MODS_DMA_TO_PHYS(dma_addr) machine_to_phys(dma_addr)
+#else
+ #define MODS_PHYS_TO_DMA(phys_addr) (phys_addr)
+ #define MODS_DMA_TO_PHYS(dma_addr) (dma_addr)
+#endif
+
+/* PCI */
+#define MODS_PCI_GET_SLOT(mybus, devfn) \
+({ \
+ struct pci_dev *__dev = NULL; \
+ while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, __dev))) { \
+ if (__dev->bus->number == mybus \
+ && __dev->devfn == devfn) \
+ break; \
+ } \
+ __dev; \
+})
+
+/* ACPI */
+#ifdef MODS_HAS_NEW_ACPI_WALK
+#define MODS_ACPI_WALK_NAMESPACE(type, start_object, max_depth, user_function, \
+ context, return_value)\
+ acpi_walk_namespace(type, start_object, max_depth, user_function, NULL,\
+ context, return_value)
+#else
+#define MODS_ACPI_WALK_NAMESPACE acpi_walk_namespace
+#endif
+
+/* FILE */
+#define MODS_PRIVATE_DATA(var, fp) \
+ struct mods_file_private_data *var = (fp)->private_data
+#define MODS_GET_FILE_PRIVATE_ID(fp) (((struct mods_file_private_data *)(fp) \
+ ->private_data)->mods_id)
+
+/* ************************************************************************* */
+/* ** MODULE WIDE FUNCTIONS */
+/* ************************************************************************* */
+
+/* irq */
+void mods_init_irq(void);
+void mods_cleanup_irq(void);
+unsigned char mods_alloc_channel(void);
+void mods_free_channel(unsigned char);
+void mods_irq_dev_clr_pri(unsigned char);
+void mods_irq_dev_set_pri(unsigned char id, void *pri);
+int mods_irq_event_check(unsigned char);
+
+/* mem */
+void mods_init_mem(void);
+void mods_add_mem(void *, NvU32, const char *, NvU32);
+void mods_del_mem(void *, NvU32, const char *, NvU32);
+void mods_check_mem(void);
+void mods_unregister_all_alloc(struct file *fp);
+struct SYS_MEM_MODS_INFO *mods_find_alloc(struct file *, NvU64);
+
+/* clock */
+#ifdef CONFIG_ARCH_TEGRA
+void mods_init_clock_api(void);
+void mods_shutdown_clock_api(void);
+#endif
+
+/* ioctl hanndlers */
+
+/* mem */
+int esc_mods_alloc_pages(struct file *, struct MODS_ALLOC_PAGES *);
+int esc_mods_device_alloc_pages(struct file *,
+ struct MODS_DEVICE_ALLOC_PAGES *);
+int esc_mods_free_pages(struct file *, struct MODS_FREE_PAGES *);
+int esc_mods_set_mem_type(struct file *, struct MODS_MEMORY_TYPE *);
+int esc_mods_get_phys_addr(struct file *,
+ struct MODS_GET_PHYSICAL_ADDRESS *);
+int esc_mods_virtual_to_phys(struct file *,
+ struct MODS_VIRTUAL_TO_PHYSICAL *);
+int esc_mods_phys_to_virtual(struct file *,
+ struct MODS_PHYSICAL_TO_VIRTUAL *);
+int esc_mods_memory_barrier(struct file *);
+/* acpi */
+#ifdef CONFIG_ACPI
+int esc_mods_eval_acpi_method(struct file *,
+ struct MODS_EVAL_ACPI_METHOD *);
+int esc_mods_eval_dev_acpi_method(struct file *,
+ struct MODS_EVAL_DEV_ACPI_METHOD *);
+int esc_mods_acpi_get_ddc(struct file *, struct MODS_ACPI_GET_DDC *);
+#endif
+/* pci */
+#ifdef CONFIG_PCI
+int esc_mods_find_pci_dev(struct file *, struct MODS_FIND_PCI_DEVICE *);
+int esc_mods_find_pci_class_code(struct file *,
+ struct MODS_FIND_PCI_CLASS_CODE *);
+int esc_mods_pci_read(struct file *, struct MODS_PCI_READ *);
+int esc_mods_pci_write(struct file *, struct MODS_PCI_WRITE *);
+int esc_mods_pci_bus_add_dev(struct file *,
+ struct MODS_PCI_BUS_ADD_DEVICES *);
+int esc_mods_pio_read(struct file *, struct MODS_PIO_READ *);
+int esc_mods_pio_write(struct file *, struct MODS_PIO_WRITE *);
+int esc_mods_device_numa_info(struct file *,
+ struct MODS_DEVICE_NUMA_INFO *);
+#endif
+/* irq */
+int esc_mods_register_irq(struct file *, struct MODS_REGISTER_IRQ *);
+int esc_mods_unregister_irq(struct file *, struct MODS_REGISTER_IRQ *);
+int esc_mods_query_irq(struct file *, struct MODS_QUERY_IRQ *);
+int esc_mods_set_irq_mask(struct file *, struct MODS_SET_IRQ_MASK *);
+int esc_mods_irq_handled(struct file *, struct MODS_REGISTER_IRQ *);
+/* clock */
+#ifdef CONFIG_ARCH_TEGRA
+int esc_mods_get_clock_handle(struct file *,
+ struct MODS_GET_CLOCK_HANDLE *);
+int esc_mods_set_clock_rate(struct file *, struct MODS_CLOCK_RATE *);
+int esc_mods_get_clock_rate(struct file *, struct MODS_CLOCK_RATE *);
+int esc_mods_get_clock_max_rate(struct file *, struct MODS_CLOCK_RATE *);
+int esc_mods_set_clock_max_rate(struct file *, struct MODS_CLOCK_RATE *);
+int esc_mods_set_clock_parent(struct file *, struct MODS_CLOCK_PARENT *);
+int esc_mods_get_clock_parent(struct file *, struct MODS_CLOCK_PARENT *);
+int esc_mods_enable_clock(struct file *, struct MODS_CLOCK_HANDLE *);
+int esc_mods_disable_clock(struct file *, struct MODS_CLOCK_HANDLE *);
+int esc_mods_is_clock_enabled(struct file *pfile,
+ struct MODS_CLOCK_ENABLED *p);
+int esc_mods_clock_reset_assert(struct file *,
+ struct MODS_CLOCK_HANDLE *);
+int esc_mods_clock_reset_deassert(struct file *,
+ struct MODS_CLOCK_HANDLE *);
+int esc_mods_flush_cpu_cache_range(struct file *,
+ struct MODS_FLUSH_CPU_CACHE_RANGE *);
+#endif
+
+#endif /* _MODS_INTERNAL_H_ */
diff --git a/drivers/misc/mods/mods_irq.c b/drivers/misc/mods/mods_irq.c
new file mode 100644
index 000000000000..a90e8e2f05d9
--- /dev/null
+++ b/drivers/misc/mods/mods_irq.c
@@ -0,0 +1,1026 @@
+/*
+ * mods_irq.c - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2008-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mods_internal.h"
+
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/interrupt.h>
+#include <linux/pci_regs.h>
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+#define INDEX_IRQSTAT(irq) (irq / BITS_NUM)
+#define POS_IRQSTAT(irq) (irq & (BITS_NUM - 1))
+
+/* MSI */
+#define PCI_MSI_MASK_BIT 16
+#define MSI_CONTROL_REG(base) (base + PCI_MSI_FLAGS)
+#define IS_64BIT_ADDRESS(control) (!!(control & PCI_MSI_FLAGS_64BIT))
+#define MSI_DATA_REG(base, is64bit) \
+ ((is64bit == 1) ? base + PCI_MSI_DATA_64 : base + PCI_MSI_DATA_32)
+
+struct nv_device {
+ char name[20];
+ struct mods_priv *isr_pri;
+ void *pri[MODS_CHANNEL_MAX];
+};
+
+/*********************
+ * PRIVATE FUNCTIONS *
+ *********************/
+static struct mods_priv mp;
+static struct nv_device nv_dev = { "nvidia mods", &mp, {0} };
+
+static struct mods_priv *get_all_data(void)
+{
+ return &mp;
+}
+
+static struct nv_device *get_dev(void)
+{
+ return &nv_dev;
+}
+
+#ifdef CONFIG_PCI
+static int mods_enable_device(struct mods_file_private_data *priv,
+ struct pci_dev *pdev)
+{
+ int ret = -1;
+ struct en_dev_entry *entry = priv->enabled_devices;
+ while (entry != 0) {
+ if (entry->dev == pdev)
+ return 0;
+ entry = entry->next;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret == 0) {
+ entry = 0;
+ MODS_KMALLOC(entry, sizeof(*entry));
+ if (unlikely(!entry))
+ return 0;
+ entry->dev = pdev;
+ entry->next = priv->enabled_devices;
+ priv->enabled_devices = entry;
+ }
+ return ret;
+}
+#endif
+
+static unsigned int get_cur_time(void)
+{
+ /* This is not very precise, sched_clock() would be better */
+ return jiffies_to_usecs(jiffies);
+}
+
+static int id_is_valid(unsigned char channel)
+{
+ if (channel <= 0 || channel > MODS_CHANNEL_MAX)
+ return ERROR;
+
+ return OK;
+}
+
+static inline int mods_check_interrupt(struct dev_irq_map *t)
+{
+ if (t->dev_irq_state && t->dev_irq_mask_reg) {
+ /* GPU device */
+ return *t->dev_irq_state && *t->dev_irq_mask_reg;
+ } else {
+ /* Non-GPU device - we can't tell */
+ return true;
+ }
+}
+
+static void mods_disable_interrupts(struct dev_irq_map *t)
+{
+ if (t->dev_irq_mask_reg) {
+ if (t->irq_and_mask == 0) {
+ *t->dev_irq_mask_reg = t->irq_or_mask;
+ } else {
+ *t->dev_irq_mask_reg =
+ (*t->dev_irq_mask_reg & t->irq_and_mask)
+ | t->irq_or_mask;
+ }
+ } else if (t->type == MODS_IRQ_TYPE_CPU) {
+ disable_irq_nosync(t->apic_irq);
+ }
+}
+
+static void rec_irq_done(struct nv_device *dev,
+ unsigned char channel,
+ struct dev_irq_map *t,
+ unsigned int irq_time)
+{
+ struct irq_q_info *q;
+ struct mods_priv *pmp = dev->isr_pri;
+ struct mods_file_private_data *private_data = dev->pri[channel];
+
+ /* Get interrupt queue */
+ q = &pmp->rec_info[channel - 1];
+
+ /* Don't do anything if the IRQ has already been recorded */
+ if (q->head != q->tail) {
+ unsigned int i;
+ for (i = q->head; i != q->tail; i++) {
+ if (t->dev) {
+ struct pci_dev *cur
+ = q->data[i & (MODS_MAX_IRQS - 1)].dev;
+ if (cur == t->dev)
+ return;
+ } else {
+ NvU32 cur
+ = q->data[i & (MODS_MAX_IRQS - 1)].irq;
+ if (cur == t->apic_irq)
+ return;
+ }
+ }
+ }
+
+ /* Print an error if the queue is full */
+ /* This is deadly! */
+ if (q->tail - q->head == MODS_MAX_IRQS) {
+ mods_error_printk("IRQ queue is full\n");
+ return;
+ }
+
+ /* Record the device which generated the IRQ in the queue */
+ q->data[q->tail & (MODS_MAX_IRQS - 1)].dev = t->dev;
+ q->data[q->tail & (MODS_MAX_IRQS - 1)].irq = t->apic_irq;
+ q->data[q->tail & (MODS_MAX_IRQS - 1)].time = irq_time;
+ q->tail++;
+
+#ifdef CONFIG_PCI
+ if (t->dev) {
+ mods_debug_printk(DEBUG_ISR_DETAILED,
+ "%s IRQ 0x%x for %x:%02x.%x, time=%uus\n",
+ (t->type == MODS_IRQ_TYPE_MSI) ? "MSI" : "INTx",
+ t->apic_irq,
+ (unsigned)(t->dev->bus->number),
+ (unsigned)PCI_SLOT(t->dev->devfn),
+ (unsigned)PCI_FUNC(t->dev->devfn),
+ irq_time);
+ } else
+#endif
+ mods_debug_printk(DEBUG_ISR_DETAILED,
+ "CPU IRQ 0x%x, time=%uus\n",
+ t->apic_irq,
+ irq_time);
+
+ /* Wake MODS to handle the interrupt */
+ if (private_data) {
+ spin_unlock(&pmp->lock);
+ wake_up_interruptible(&private_data->interrupt_event);
+ spin_lock(&pmp->lock);
+ }
+}
+
+/* mods_irq_handle - interrupt function */
+static irqreturn_t mods_irq_handle(int irq, void *data
+#ifndef MODS_IRQ_HANDLE_NO_REGS
+ , struct pt_regs *regs
+#endif
+)
+{
+ struct nv_device *dev = (struct nv_device *)data;
+ struct mods_priv *pmp = dev->isr_pri;
+ struct dev_irq_map *t = NULL;
+ unsigned char channel_idx;
+ unsigned long flags = 0;
+ int found = 0;
+ unsigned int irq_time = get_cur_time();
+
+ spin_lock_irqsave(&pmp->lock, flags);
+
+ for (channel_idx = 0; channel_idx < MODS_CHANNEL_MAX; channel_idx++) {
+ if (!(pmp->channel_flags & (1 << channel_idx)))
+ continue;
+
+ list_for_each_entry(t, &pmp->irq_head[channel_idx], list) {
+ if ((t->apic_irq == irq) && mods_check_interrupt(t)) {
+ /* Disable interrupts on this device to avoid
+ * interrupt storm*/
+ mods_disable_interrupts(t);
+
+ /* Record IRQ for MODS and wake MODS up */
+ rec_irq_done(dev, channel_idx+1, t, irq_time);
+ found |= 1;
+
+ /* MSI and CPU interrupts are not shared,
+ * so stop looking */
+ if (t->type != MODS_IRQ_TYPE_INT) {
+ channel_idx = MODS_CHANNEL_MAX;
+ break;
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&pmp->lock, flags);
+ return IRQ_RETVAL(found);
+}
+
+static int mods_lookup_irq(unsigned char channel, struct pci_dev *pdev,
+ unsigned int irq)
+{
+ unsigned char channel_idx;
+ struct mods_priv *pmp = get_all_data();
+ int ret = IRQ_NOT_FOUND;
+
+ LOG_ENT();
+
+ for (channel_idx = 0; channel_idx < MODS_CHANNEL_MAX; channel_idx++) {
+ struct dev_irq_map *t = NULL;
+ struct dev_irq_map *next = NULL;
+ list_for_each_entry_safe(t,
+ next,
+ &pmp->irq_head[channel_idx],
+ list) {
+ if ((pdev && (t->dev == pdev))
+ || (!pdev && (t->apic_irq == irq))) {
+
+ if (channel == 0) {
+ ret = IRQ_FOUND;
+ } else {
+ ret = (channel == channel_idx + 1)
+ ? IRQ_FOUND : IRQ_NOT_FOUND;
+ }
+
+ /* Break out of the outer loop */
+ channel_idx = MODS_CHANNEL_MAX;
+ break;
+ }
+ }
+ }
+
+ LOG_EXT();
+ return ret;
+}
+
+static int add_irq_map(unsigned char channel,
+ struct pci_dev *pdev,
+ NvU32 irq,
+ unsigned int type)
+{
+ struct dev_irq_map *newmap = NULL;
+ struct mods_priv *pmp = get_all_data();
+ struct nv_device *nvdev = get_dev();
+
+ LOG_ENT();
+
+ /* Allocate memory for the new entry */
+ MODS_KMALLOC(newmap, sizeof(*newmap));
+ if (unlikely(!newmap)) {
+ LOG_EXT();
+ return -ENOMEM;
+ }
+
+ /* Fill out the new entry */
+ newmap->apic_irq = irq;
+ newmap->dev = pdev;
+ newmap->channel = channel;
+ newmap->dev_irq_aperture = 0;
+ newmap->dev_irq_mask_reg = 0;
+ newmap->dev_irq_state = 0;
+ newmap->irq_and_mask = ~0U;
+ newmap->irq_or_mask = 0;
+ newmap->type = type;
+
+ /* Enable IRQ for this device in the kernel */
+ if (request_irq(
+ irq,
+ &mods_irq_handle,
+ (type == MODS_IRQ_TYPE_INT) ? IRQF_SHARED : 0,
+ nvdev->name,
+ nvdev)) {
+ mods_error_printk("unable to enable IRQ 0x%x\n", irq);
+ MODS_KFREE(newmap, sizeof(*newmap));
+ LOG_EXT();
+ return ERROR;
+ }
+
+ /* Add the new entry to the list of all registered interrupts */
+ list_add(&newmap->list, &pmp->irq_head[channel - 1]);
+
+#ifdef CONFIG_PCI
+ /* Map BAR0 of a graphics card to be able to disable interrupts */
+ if (type == MODS_IRQ_TYPE_INT) {
+ unsigned short class_code, vendor_id, device_id;
+ pci_read_config_word(pdev, PCI_CLASS_DEVICE, &class_code);
+ pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ if ((class_code == PCI_CLASS_DISPLAY_VGA) ||
+ (class_code == PCI_CLASS_DISPLAY_3D)) {
+
+ if (vendor_id == 0x10DE) {
+ char *bar = ioremap_nocache(
+ pci_resource_start(pdev, 0),
+ 0x200);
+ newmap->dev_irq_aperture = bar;
+ newmap->dev_irq_mask_reg = (NvU32 *)(bar+0x140);
+ newmap->dev_irq_state = (NvU32 *)(bar+0x100);
+ newmap->irq_and_mask = 0;
+ newmap->irq_or_mask = 0;
+ }
+ }
+ }
+#endif
+
+ /* Print out successful registration string */
+ if (type == MODS_IRQ_TYPE_CPU)
+ mods_debug_printk(DEBUG_ISR, "registered CPU IRQ 0x%x\n", irq);
+#ifdef CONFIG_PCI
+ else if (type == MODS_IRQ_TYPE_INT) {
+ mods_debug_printk(DEBUG_ISR,
+ "registered INTx IRQ 0x%x for device %x:%02x.%x\n",
+ pdev->irq,
+ (unsigned)(pdev->bus->number),
+ (unsigned)PCI_SLOT(pdev->devfn),
+ (unsigned)PCI_FUNC(pdev->devfn));
+ }
+#endif
+#ifdef CONFIG_PCI_MSI
+ else if (type == MODS_IRQ_TYPE_MSI) {
+ u16 control;
+ u16 data;
+ int cap_pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ pci_read_config_word(pdev, MSI_CONTROL_REG(cap_pos), &control);
+ if (IS_64BIT_ADDRESS(control))
+ pci_read_config_word(pdev,
+ MSI_DATA_REG(cap_pos, 1),
+ &data);
+ else
+ pci_read_config_word(pdev,
+ MSI_DATA_REG(cap_pos, 0),
+ &data);
+ mods_debug_printk(DEBUG_ISR,
+ "registered MSI IRQ 0x%x with data 0x%02x "
+ "for device %x:%02x.%x\n",
+ pdev->irq,
+ (unsigned)data,
+ (unsigned)(pdev->bus->number),
+ (unsigned)PCI_SLOT(pdev->devfn),
+ (unsigned)PCI_FUNC(pdev->devfn));
+ }
+#endif
+
+ LOG_EXT();
+ return OK;
+}
+
+static void mods_free_map(struct dev_irq_map *del)
+{
+ LOG_ENT();
+
+ /* Disable interrupts on the device */
+ mods_disable_interrupts(del);
+
+ /* Unmap aperture used for masking irqs */
+ if (del->dev_irq_aperture)
+ iounmap(del->dev_irq_aperture);
+
+ /* Unhook interrupts in the kernel */
+ free_irq(del->apic_irq, get_dev());
+
+ /* Disable MSI */
+#ifdef CONFIG_PCI_MSI
+ if (del->type == MODS_IRQ_TYPE_MSI)
+ pci_disable_msi(del->dev);
+#endif
+
+ /* Free memory */
+ MODS_KFREE(del, sizeof(*del));
+
+ LOG_EXT();
+}
+
+/*******************
+* PUBLIC FUNCTIONS *
+********************/
+void mods_init_irq(void)
+{
+ int i;
+ struct mods_priv *pmp = get_all_data();
+
+ LOG_ENT();
+
+ memset(pmp, 0, sizeof(struct mods_priv));
+ for (i = 0; i < MODS_CHANNEL_MAX; i++)
+ INIT_LIST_HEAD(&pmp->irq_head[i]);
+
+ spin_lock_init(&pmp->lock);
+ LOG_EXT();
+}
+
+void mods_cleanup_irq(void)
+{
+ int i;
+ struct mods_priv *pmp = get_all_data();
+
+ LOG_ENT();
+ for (i = 0; i < MODS_CHANNEL_MAX; i++) {
+ if (pmp->channel_flags && (1 << i))
+ mods_free_channel(i + 1);
+ }
+ LOG_EXT();
+}
+
+void mods_irq_dev_set_pri(unsigned char id, void *pri)
+{
+ struct nv_device *dev = get_dev();
+ dev->pri[id] = pri;
+}
+
+void mods_irq_dev_clr_pri(unsigned char id)
+{
+ struct nv_device *dev = get_dev();
+ dev->pri[id] = 0;
+}
+
+int mods_irq_event_check(unsigned char channel)
+{
+ struct mods_priv *pmp = get_all_data();
+ struct irq_q_info *q = &pmp->rec_info[channel - 1];
+ unsigned int pos = (1 << (channel - 1));
+
+ if (!(pmp->channel_flags & pos))
+ return POLLERR; /* irq has quit */
+
+ if (q->head != q->tail)
+ return POLLIN; /* irq generated */
+
+ return 0;
+}
+
+unsigned char mods_alloc_channel(void)
+{
+ struct mods_priv *pmp = get_all_data();
+ int i = 0;
+ unsigned char channel = MODS_CHANNEL_MAX + 1;
+ unsigned char max_channels = mods_get_multi_instance()
+ ? MODS_CHANNEL_MAX : 1;
+
+ LOG_ENT();
+
+ for (i = 0; i < max_channels; i++) {
+ if (!test_and_set_bit(i, &pmp->channel_flags)) {
+ channel = i + 1;
+ mods_debug_printk(DEBUG_IOCTL,
+ "open channel %u (bit mask 0x%lx)\n",
+ (unsigned)(i+1), pmp->channel_flags);
+ break;
+ }
+
+ }
+
+ LOG_EXT();
+ return channel;
+}
+
+void mods_free_channel(unsigned char channel)
+{
+ struct mods_priv *pmp = get_all_data();
+ struct dev_irq_map *del = NULL;
+ struct dev_irq_map *next = NULL;
+ struct irq_q_info *q = &pmp->rec_info[channel - 1];
+
+ LOG_ENT();
+
+ /* Release all interrupts */
+ list_for_each_entry_safe(del, next, &pmp->irq_head[channel - 1], list) {
+ list_del(&del->list);
+ if (del->type == MODS_IRQ_TYPE_CPU) {
+ mods_warning_printk(
+ "CPU IRQ 0x%x is still hooked, unhooking\n",
+ del->apic_irq);
+ }
+#ifdef CONFIG_PCI
+ else {
+ mods_warning_printk(
+ "%s IRQ 0x%x for device %x:%02x.%x is still hooked, unhooking\n",
+ (del->type == MODS_IRQ_TYPE_MSI)
+ ? "MSI" : "INTx",
+ del->dev->irq,
+ (unsigned)(del->dev->bus->number),
+ (unsigned)PCI_SLOT(del->dev->devfn),
+ (unsigned)PCI_FUNC(del->dev->devfn));
+ }
+#endif
+ mods_free_map(del);
+ }
+
+ /* Clear queue */
+ memset(q, 0, sizeof(*q));
+
+ /* Indicate the channel is free */
+ clear_bit(channel - 1, &pmp->channel_flags);
+
+ mods_debug_printk(DEBUG_IOCTL, "closed channel %u\n",
+ (unsigned)channel);
+ LOG_EXT();
+}
+
+#ifdef CONFIG_PCI
+static int mods_register_pci_irq(struct file *pfile,
+ struct MODS_REGISTER_IRQ *p)
+{
+ struct pci_dev *dev;
+ unsigned int devfn;
+ unsigned char channel;
+ MODS_PRIVATE_DATA(private_data, pfile);
+
+ LOG_ENT();
+
+ /* Identify the caller */
+ channel = MODS_GET_FILE_PRIVATE_ID(pfile);
+ BUG_ON(id_is_valid(channel) != OK);
+
+ /* Get the PCI device structure for the specified device from kernel */
+ devfn = PCI_DEVFN(p->dev.device, p->dev.function);
+ dev = MODS_PCI_GET_SLOT(p->dev.bus, devfn);
+ if (!dev) {
+ LOG_EXT();
+ return ERROR;
+ }
+
+ /* Determine if the interrupt is already hooked */
+ if (mods_lookup_irq(0, dev, 0) == IRQ_FOUND) {
+ mods_error_printk(
+ "IRQ for device %x:%02x.%x has already been registered\n",
+ (unsigned)p->dev.bus,
+ (unsigned)p->dev.device,
+ (unsigned)p->dev.function);
+ LOG_EXT();
+ return ERROR;
+ }
+
+ /* Determine if the device supports MSI */
+ if (p->type == MODS_IRQ_TYPE_MSI) {
+#ifdef CONFIG_PCI_MSI
+ if (0 == pci_find_capability(dev, PCI_CAP_ID_MSI)) {
+ mods_error_printk(
+ "device %x:%02x.%x does not support MSI\n",
+ (unsigned)p->dev.bus,
+ (unsigned)p->dev.device,
+ (unsigned)p->dev.function);
+ LOG_EXT();
+ return ERROR;
+ }
+#else
+ mods_error_printk("the kernel does not support MSI!\n");
+ return ERROR;
+#endif
+ }
+
+ /* Enable device on the PCI bus */
+ if (mods_enable_device(private_data, dev)) {
+ mods_error_printk("unable to enable device %x:%02x.%x\n",
+ (unsigned)p->dev.bus,
+ (unsigned)p->dev.device,
+ (unsigned)p->dev.function);
+ LOG_EXT();
+ return ERROR;
+ }
+
+ /* Enable MSI */
+#ifdef CONFIG_PCI_MSI
+ if (p->type == MODS_IRQ_TYPE_MSI) {
+ if (0 != pci_enable_msi(dev)) {
+ mods_error_printk(
+ "unable to enable MSI on device %x:%02x.%x\n",
+ (unsigned)p->dev.bus,
+ (unsigned)p->dev.device,
+ (unsigned)p->dev.function);
+ return ERROR;
+ }
+ }
+#endif
+
+ /* Register interrupt */
+ if (add_irq_map(channel, dev, dev->irq, p->type) != OK) {
+#ifdef CONFIG_PCI_MSI
+ if (p->type == MODS_IRQ_TYPE_MSI)
+ pci_disable_msi(dev);
+#endif
+ LOG_EXT();
+ return ERROR;
+ }
+
+ return OK;
+}
+#endif /* CONFIG_PCI */
+
+static int mods_register_cpu_irq(struct file *pfile,
+ struct MODS_REGISTER_IRQ *p)
+{
+ unsigned char channel;
+ unsigned int irq;
+
+ LOG_ENT();
+
+ irq = p->dev.bus;
+
+ /* Identify the caller */
+ channel = MODS_GET_FILE_PRIVATE_ID(pfile);
+ BUG_ON(id_is_valid(channel) != OK);
+
+ /* Determine if the interrupt is already hooked */
+ if (mods_lookup_irq(0, 0, irq) == IRQ_FOUND) {
+ mods_error_printk("CPU IRQ 0x%x has already been registered\n",
+ irq);
+ LOG_EXT();
+ return ERROR;
+ }
+
+ /* Register interrupt */
+ if (add_irq_map(channel, 0, irq, p->type) != OK) {
+ LOG_EXT();
+ return ERROR;
+ }
+
+ return OK;
+}
+
+#ifdef CONFIG_PCI
+static int mods_unregister_pci_irq(struct file *pfile,
+ struct MODS_REGISTER_IRQ *p)
+{
+ struct mods_priv *pmp = get_all_data();
+ struct dev_irq_map *del = NULL;
+ struct dev_irq_map *next;
+ struct pci_dev *dev;
+ unsigned int devfn;
+ unsigned char channel;
+
+ LOG_ENT();
+
+ /* Identify the caller */
+ channel = MODS_GET_FILE_PRIVATE_ID(pfile);
+ BUG_ON(id_is_valid(channel) != OK);
+
+ /* Get the PCI device structure for the specified device from kernel */
+ devfn = PCI_DEVFN(p->dev.device, p->dev.function);
+ dev = MODS_PCI_GET_SLOT(p->dev.bus, devfn);
+ if (!dev) {
+ LOG_EXT();
+ return ERROR;
+ }
+
+ /* Determine if the interrupt is already hooked by this client */
+ if (mods_lookup_irq(channel, dev, 0) == IRQ_NOT_FOUND) {
+ mods_error_printk(
+ "IRQ for device %x:%02x.%x not hooked, can't unhook\n",
+ (unsigned)p->dev.bus,
+ (unsigned)p->dev.device,
+ (unsigned)p->dev.function);
+ LOG_EXT();
+ return ERROR;
+ }
+
+ /* Delete device interrupt from the list */
+ list_for_each_entry_safe(del, next, &pmp->irq_head[channel - 1], list) {
+ if (dev == del->dev) {
+ if (del->type != p->type) {
+ mods_error_printk("wrong IRQ type passed\n");
+ LOG_EXT();
+ return ERROR;
+ }
+ list_del(&del->list);
+ mods_debug_printk(DEBUG_ISR,
+ "unregistered %s IRQ 0x%x for device "
+ "%x:%02x.%x\n",
+ (del->type == MODS_IRQ_TYPE_MSI)
+ ? "MSI" : "INTx",
+ del->dev->irq,
+ (unsigned)p->dev.bus,
+ (unsigned)p->dev.device,
+ (unsigned)p->dev.function);
+ mods_free_map(del);
+ break;
+ }
+ }
+
+ LOG_EXT();
+ return OK;
+}
+#endif
+
+static int mods_unregister_cpu_irq(struct file *pfile,
+ struct MODS_REGISTER_IRQ *p)
+{
+ struct mods_priv *pmp = get_all_data();
+ struct dev_irq_map *del = NULL;
+ struct dev_irq_map *next;
+ unsigned int irq;
+ unsigned char channel;
+
+ LOG_ENT();
+
+ irq = p->dev.bus;
+
+ /* Identify the caller */
+ channel = MODS_GET_FILE_PRIVATE_ID(pfile);
+ BUG_ON(id_is_valid(channel) != OK);
+
+ /* Determine if the interrupt is already hooked by this client */
+ if (mods_lookup_irq(channel, 0, irq) == IRQ_NOT_FOUND) {
+ mods_error_printk(
+ "IRQ 0x%x not hooked, can't unhook\n",
+ irq);
+ LOG_EXT();
+ return ERROR;
+ }
+
+ /* Delete device interrupt from the list */
+ list_for_each_entry_safe(del, next, &pmp->irq_head[channel - 1], list) {
+ if ((irq == del->apic_irq) && (del->dev == 0)) {
+ if (del->type != p->type) {
+ mods_error_printk("wrong IRQ type passed\n");
+ LOG_EXT();
+ return ERROR;
+ }
+ list_del(&del->list);
+ mods_debug_printk(DEBUG_ISR,
+ "unregistered CPU IRQ 0x%x\n",
+ irq);
+ mods_free_map(del);
+ break;
+ }
+ }
+
+ LOG_EXT();
+ return OK;
+}
+
+/*************************
+ * ESCAPE CALL FUNCTIONS *
+ *************************/
+
+int esc_mods_register_irq(struct file *pfile, struct MODS_REGISTER_IRQ *p)
+{
+ if (p->type == MODS_IRQ_TYPE_CPU) {
+ return mods_register_cpu_irq(pfile, p);
+ } else {
+#ifdef CONFIG_PCI
+ return mods_register_pci_irq(pfile, p);
+#else
+ mods_error_printk("PCI not available\n");
+ return -EINVAL;
+#endif
+ }
+}
+
+int esc_mods_unregister_irq(struct file *pfile, struct MODS_REGISTER_IRQ *p)
+{
+ if (p->type == MODS_IRQ_TYPE_CPU) {
+ return mods_unregister_cpu_irq(pfile, p);
+ } else {
+#ifdef CONFIG_PCI
+ return mods_unregister_pci_irq(pfile, p);
+#else
+ return -EINVAL;
+#endif
+ }
+}
+
+int esc_mods_query_irq(struct file *pfile, struct MODS_QUERY_IRQ *p)
+{
+ unsigned char channel;
+ struct irq_q_info *q = NULL;
+ struct mods_priv *pmp = get_all_data();
+ unsigned int i = 0;
+ unsigned long flags = 0;
+ unsigned int cur_time = get_cur_time();
+
+ /* Lock IRQ queue */
+ spin_lock_irqsave(&pmp->lock, flags);
+ LOG_ENT();
+
+ /* Identify the caller */
+ channel = MODS_GET_FILE_PRIVATE_ID(pfile);
+ BUG_ON(id_is_valid(channel) != OK);
+
+ /* Clear return array */
+ memset(p->irq_list, 0xFF, sizeof(p->irq_list));
+
+ /* Fill in return array with IRQ information */
+ q = &pmp->rec_info[channel - 1];
+ for (i = 0;
+ (q->head != q->tail) && (i < MODS_MAX_IRQS);
+ q->head++, i++) {
+ unsigned int index = q->head & (MODS_MAX_IRQS - 1);
+ struct pci_dev *dev = q->data[index].dev;
+ if (dev) {
+ p->irq_list[i].dev.bus = dev->bus->number;
+ p->irq_list[i].dev.device = PCI_SLOT(dev->devfn);
+ p->irq_list[i].dev.function = PCI_FUNC(dev->devfn);
+ } else {
+ p->irq_list[i].dev.bus = q->data[index].irq;
+ p->irq_list[i].dev.device = 0xFFU;
+ p->irq_list[i].dev.function = 0xFFU;
+ }
+ p->irq_list[i].delay = cur_time - q->data[index].time;
+
+ /* Print info about IRQ status returned */
+ if (dev) {
+ mods_debug_printk(DEBUG_ISR_DETAILED,
+ "retrieved IRQ for %x:%02x.%x, time=%uus, "
+ "delay=%uus\n",
+ (unsigned)p->irq_list[i].dev.bus,
+ (unsigned)p->irq_list[i].dev.device,
+ (unsigned)p->irq_list[i].dev.function,
+ q->data[index].time,
+ p->irq_list[i].delay);
+ } else {
+ mods_debug_printk(DEBUG_ISR_DETAILED,
+ "retrieved IRQ 0x%x, time=%uus, delay=%uus\n",
+ (unsigned)p->irq_list[i].dev.bus,
+ q->data[index].time,
+ p->irq_list[i].delay);
+ }
+ }
+
+ /* Indicate if there are more IRQs pending */
+ if (q->head != q->tail)
+ p->more = 1;
+
+ /* Unlock IRQ queue */
+ LOG_EXT();
+ spin_unlock_irqrestore(&pmp->lock, flags);
+
+ return OK;
+}
+
+int esc_mods_set_irq_mask(struct file *pfile, struct MODS_SET_IRQ_MASK *p)
+{
+ struct mods_priv *pmp = get_all_data();
+ unsigned long flags = 0;
+ unsigned char channel;
+ struct pci_dev *dev = 0;
+ NvU32 irq = ~0U;
+ struct dev_irq_map *t = NULL;
+ struct dev_irq_map *next = NULL;
+ int ret = -EINVAL;
+
+ /* Lock IRQ queue */
+ spin_lock_irqsave(&pmp->lock, flags);
+ LOG_ENT();
+
+ /* Identify the caller */
+ channel = MODS_GET_FILE_PRIVATE_ID(pfile);
+ BUG_ON(id_is_valid(channel) != OK);
+
+ /* Print info */
+ if (p->irq_type == MODS_IRQ_TYPE_CPU) {
+ mods_debug_printk(
+ DEBUG_ISR,
+ "set CPU IRQ 0x%x mask &0x%x |0x%x addr=0x%llx\n",
+ (unsigned)p->dev.bus,
+ p->and_mask, p->or_mask,
+ p->aperture_addr + p->reg_offset);
+ } else {
+ mods_debug_printk(
+ DEBUG_ISR,
+ "set dev %x:%02x.%x IRQ mask &0x%x |0x%x addr=0x%llx\n",
+ (unsigned)p->dev.bus,
+ (unsigned)p->dev.device,
+ (unsigned)p->dev.function,
+ p->and_mask,
+ p->or_mask,
+ p->aperture_addr + p->reg_offset);
+ }
+
+ /* Verify mask type */
+ if (p->mask_type != MODS_MASK_TYPE_IRQ_DISABLE) {
+ mods_error_printk("invalid mask type\n");
+ LOG_EXT();
+ spin_unlock_irqrestore(&pmp->lock, flags);
+ return -EINVAL;
+ }
+
+ /* Determine which interrupt is referenced */
+ if (p->irq_type == MODS_IRQ_TYPE_CPU) {
+ irq = p->dev.bus;
+ } else {
+#ifdef CONFIG_PCI
+ /* Get the PCI dev struct for the specified device from kernel*/
+ unsigned int devfn = PCI_DEVFN(p->dev.device, p->dev.function);
+ dev = MODS_PCI_GET_SLOT(p->dev.bus, devfn);
+ if (!dev) {
+ LOG_EXT();
+ spin_unlock_irqrestore(&pmp->lock, flags);
+ return -EINVAL;
+ }
+#else
+ mods_error_printk("PCI not available\n");
+ LOG_EXT();
+ spin_unlock_irqrestore(&pmp->lock, flags);
+ return -EINVAL;
+#endif
+ }
+
+ list_for_each_entry_safe(t, next, &pmp->irq_head[channel-1], list) {
+ if ((dev && (t->dev == dev))
+ || (!dev && (t->apic_irq == irq))) {
+
+ if (t->type != p->irq_type) {
+ mods_error_printk(
+ "IRQ type does not match registered IRQ\n");
+ } else {
+ char *bar = 0;
+
+ if (t->dev_irq_aperture) {
+ iounmap(t->dev_irq_aperture);
+ t->dev_irq_aperture = 0;
+ t->dev_irq_mask_reg = 0;
+ t->dev_irq_state = 0;
+ mods_warning_printk(
+ "resetting IRQ mask\n");
+ }
+
+ bar = ioremap_nocache(p->aperture_addr,
+ p->aperture_size);
+ if (bar) {
+ t->dev_irq_aperture = bar;
+ t->dev_irq_mask_reg
+ = (NvU32 *)(bar + p->reg_offset);
+ t->dev_irq_state = 0;
+ t->irq_and_mask = p->and_mask;
+ t->irq_or_mask = p->or_mask;
+ ret = OK;
+ } else {
+ mods_error_printk(
+ "unable to remap specified aperture\n");
+ }
+ }
+ break;
+ }
+ }
+
+ /* Unlock IRQ queue */
+ LOG_EXT();
+ spin_unlock_irqrestore(&pmp->lock, flags);
+
+ return ret;
+}
+
+int esc_mods_irq_handled(struct file *pfile, struct MODS_REGISTER_IRQ *p)
+{
+ struct mods_priv *pmp = get_all_data();
+ unsigned long flags = 0;
+ unsigned char channel;
+ NvU32 irq = p->dev.bus;
+ struct dev_irq_map *t = NULL;
+ struct dev_irq_map *next = NULL;
+ int ret = -EINVAL;
+
+ if (p->type != MODS_IRQ_TYPE_CPU)
+ return -EINVAL;
+
+ /* Lock IRQ queue */
+ spin_lock_irqsave(&pmp->lock, flags);
+ LOG_ENT();
+
+ /* Identify the caller */
+ channel = MODS_GET_FILE_PRIVATE_ID(pfile);
+ BUG_ON(id_is_valid(channel) != OK);
+
+ /* Print info */
+ mods_debug_printk(DEBUG_ISR_DETAILED,
+ "mark CPU IRQ 0x%x handled\n", irq);
+
+ list_for_each_entry_safe(t, next, &pmp->irq_head[channel-1], list) {
+ if (t->apic_irq == irq) {
+ if (t->type != p->type) {
+ mods_error_printk(
+ "IRQ type does not match registered IRQ\n");
+ } else {
+ enable_irq(irq);
+ ret = OK;
+ }
+ break;
+ }
+ }
+
+ /* Unlock IRQ queue */
+ LOG_EXT();
+ spin_unlock_irqrestore(&pmp->lock, flags);
+
+ return ret;
+}
diff --git a/drivers/misc/mods/mods_krnl.c b/drivers/misc/mods/mods_krnl.c
new file mode 100644
index 000000000000..f1ac5c695cf9
--- /dev/null
+++ b/drivers/misc/mods/mods_krnl.c
@@ -0,0 +1,993 @@
+/*
+ * mods_krnl.c - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2008-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mods_internal.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+/***********************************************************************
+ * mods_krnl_* functions, driver interfaces called by the Linux kernel *
+ ***********************************************************************/
+static int mods_krnl_open(struct inode *, struct file *);
+static int mods_krnl_close(struct inode *, struct file *);
+static unsigned int mods_krnl_poll(struct file *, poll_table *);
+static int mods_krnl_mmap(struct file *, struct vm_area_struct *);
+static long mods_krnl_ioctl(struct file *, unsigned int, unsigned long);
+
+/* character driver entry points */
+const struct file_operations mods_fops = {
+ .owner = THIS_MODULE,
+ .open = mods_krnl_open,
+ .release = mods_krnl_close,
+ .poll = mods_krnl_poll,
+ .mmap = mods_krnl_mmap,
+ .unlocked_ioctl = mods_krnl_ioctl,
+#if defined(HAVE_COMPAT_IOCTL)
+ .compat_ioctl = mods_krnl_ioctl,
+#endif
+};
+
+#define DEVICE_NAME "mods"
+
+struct miscdevice mods_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &mods_fops
+};
+
+/***********************************************
+ * module wide parameters and access functions *
+ * used to avoid globalization of variables *
+ ***********************************************/
+static int debug = -0x80000000;
+static int multi_instance = -1;
+
+int mods_check_debug_level(int mask)
+{
+ return ((debug & mask) == mask) ? 1 : 0;
+}
+
+int mods_get_multi_instance(void)
+{
+ return multi_instance > 0;
+}
+
+/******************************
+ * INIT/EXIT MODULE FUNCTIONS *
+ ******************************/
+static int __init mods_init_module(void)
+{
+ int rc;
+
+ LOG_ENT();
+
+ /* Initilize memory tracker */
+ mods_init_mem();
+
+ rc = misc_register(&mods_dev);
+ if (rc < 0)
+ return -EBUSY;
+
+ mods_init_irq();
+
+#ifdef CONFIG_ARCH_TEGRA
+ mods_init_clock_api();
+#endif
+
+ mods_info_printk("driver loaded, version %x.%02x\n",
+ (MODS_DRIVER_VERSION>>8),
+ (MODS_DRIVER_VERSION&0xFF));
+ LOG_EXT();
+ return OK;
+}
+
+static void __exit mods_exit_module(void)
+{
+ LOG_ENT();
+ mods_cleanup_irq();
+
+ misc_deregister(&mods_dev);
+
+#ifdef CONFIG_ARCH_TEGRA
+ mods_shutdown_clock_api();
+#endif
+
+ /* Check for memory leakage */
+ mods_check_mem();
+
+ mods_info_printk("driver unloaded\n");
+ LOG_EXT();
+}
+
+/***************************
+ * KERNEL INTERFACE SET UP *
+ ***************************/
+module_init(mods_init_module);
+module_exit(mods_exit_module);
+
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug,
+ "debug level (0=normal, 1=debug, 2=irq, 3=rich debug)");
+module_param(multi_instance, int, 0);
+MODULE_PARM_DESC(multi_instance,
+ "allows more than one client to connect simultaneously to "
+ "the driver");
+
+/********************
+ * HELPER FUNCTIONS *
+ ********************/
+static int id_is_valid(unsigned char channel)
+{
+ if (channel <= 0 || channel > MODS_CHANNEL_MAX)
+ return ERROR;
+
+ return OK;
+}
+
+static void mods_disable_all_devices(struct mods_file_private_data *priv)
+{
+ while (priv->enabled_devices != 0) {
+ struct en_dev_entry *old = priv->enabled_devices;
+#ifdef CONFIG_PCI
+ pci_disable_device(old->dev);
+#endif
+ priv->enabled_devices = old->next;
+ MODS_KFREE(old, sizeof(*old));
+ }
+}
+
+/*********************
+ * MAPPING FUNCTIONS *
+ *********************/
+static int mods_register_mapping(
+ struct file *fp,
+ struct SYS_MEM_MODS_INFO *p_mem_info,
+ NvU64 dma_addr,
+ NvU64 virtual_address,
+ NvU32 mapping_length)
+{
+ struct SYS_MAP_MEMORY *p_map_mem;
+ MODS_PRIVATE_DATA(private_data, fp);
+
+ LOG_ENT();
+
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "mapped dma 0x%llx, virt 0x%llx, size 0x%x\n",
+ dma_addr, virtual_address, mapping_length);
+
+ MODS_KMALLOC(p_map_mem, sizeof(*p_map_mem));
+ if (unlikely(!p_map_mem)) {
+ LOG_EXT();
+ return -ENOMEM;
+ }
+ memset(p_map_mem, 0, sizeof(*p_map_mem));
+
+ if (p_mem_info == NULL)
+ p_map_mem->contiguous = true;
+ else
+ p_map_mem->contiguous = false;
+ p_map_mem->dma_addr = dma_addr;
+ p_map_mem->virtual_addr = virtual_address;
+ p_map_mem->mapping_length = mapping_length;
+ p_map_mem->p_mem_info = p_mem_info;
+
+ list_add(&p_map_mem->list, private_data->mods_mapping_list);
+ LOG_EXT();
+ return OK;
+}
+
+static void mods_unregister_mapping(struct file *fp, NvU64 virtual_address)
+{
+ struct SYS_MAP_MEMORY *p_map_mem;
+ MODS_PRIVATE_DATA(private_data, fp);
+
+ struct list_head *head = private_data->mods_mapping_list;
+ struct list_head *iter;
+
+ LOG_ENT();
+
+ list_for_each(iter, head) {
+ p_map_mem = list_entry(iter, struct SYS_MAP_MEMORY, list);
+
+ if (p_map_mem->virtual_addr == virtual_address) {
+ /* remove from the list */
+ list_del(iter);
+
+ /* free our data struct which keeps track of mapping */
+ MODS_KFREE(p_map_mem, sizeof(*p_map_mem));
+
+ return;
+ }
+ }
+ LOG_EXT();
+}
+
+static void mods_unregister_all_mappings(struct file *fp)
+{
+ struct SYS_MAP_MEMORY *p_map_mem;
+ MODS_PRIVATE_DATA(private_data, fp);
+
+ struct list_head *head = private_data->mods_mapping_list;
+ struct list_head *iter;
+ struct list_head *tmp;
+
+ LOG_ENT();
+
+ list_for_each_safe(iter, tmp, head) {
+ p_map_mem = list_entry(iter, struct SYS_MAP_MEMORY, list);
+ mods_unregister_mapping(fp, p_map_mem->virtual_addr);
+ }
+
+ LOG_EXT();
+}
+
+static pgprot_t mods_get_prot(NvU32 mem_type, pgprot_t prot)
+{
+ switch (mem_type) {
+ case MODS_MEMORY_CACHED:
+ return prot;
+
+ case MODS_MEMORY_UNCACHED:
+ return MODS_PGPROT_UC(prot);
+
+ case MODS_MEMORY_WRITECOMBINE:
+ return MODS_PGPROT_WC(prot);
+
+ default:
+ mods_warning_printk("unsupported memory type: %u\n",
+ mem_type);
+ return prot;
+ }
+}
+
+static pgprot_t mods_get_prot_for_range(struct file *fp, NvU64 dma_addr,
+ NvU64 size, pgprot_t prot)
+{
+ MODS_PRIVATE_DATA(private_data, fp);
+ if ((dma_addr == private_data->mem_type.dma_addr) &&
+ (size == private_data->mem_type.size)) {
+
+ return mods_get_prot(private_data->mem_type.type, prot);
+ }
+ return prot;
+}
+
+static char *mods_get_prot_str(NvU32 mem_type)
+{
+ switch (mem_type) {
+ case MODS_MEMORY_CACHED:
+ return "WB";
+
+ case MODS_MEMORY_UNCACHED:
+ return "UC";
+
+ case MODS_MEMORY_WRITECOMBINE:
+ return "WC";
+
+ default:
+ return "unknown";
+ }
+}
+
+static char *mods_get_prot_str_for_range(struct file *fp, NvU64 dma_addr,
+ NvU64 size)
+{
+ MODS_PRIVATE_DATA(private_data, fp);
+ if ((dma_addr == private_data->mem_type.dma_addr) &&
+ (size == private_data->mem_type.size)) {
+
+ return mods_get_prot_str(private_data->mem_type.type);
+ }
+ return "default";
+}
+
+/********************
+ * KERNEL FUNCTIONS *
+ ********************/
+static void mods_krnl_vma_open(struct vm_area_struct *vma)
+{
+ struct mods_vm_private_data *vma_private_data;
+
+ LOG_ENT();
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "open vma, virt 0x%lx, phys 0x%llx\n",
+ vma->vm_start,
+ (NvU64)(MODS_VMA_PGOFF(vma) << PAGE_SHIFT));
+
+ if (MODS_VMA_PRIVATE(vma)) {
+ vma_private_data = MODS_VMA_PRIVATE(vma);
+ atomic_inc(&vma_private_data->usage_count);
+ }
+ LOG_EXT();
+}
+
+static void mods_krnl_vma_close(struct vm_area_struct *vma)
+{
+ LOG_ENT();
+
+ if (MODS_VMA_PRIVATE(vma)) {
+ struct mods_vm_private_data *vma_private_data
+ = MODS_VMA_PRIVATE(vma);
+ if (atomic_dec_and_test(&vma_private_data->usage_count)) {
+ MODS_PRIVATE_DATA(private_data, vma_private_data->fp);
+ spin_lock(&private_data->lock);
+
+ /* we need to unregister the mapping */
+ mods_unregister_mapping(vma_private_data->fp,
+ vma->vm_start);
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "closed vma, virt 0x%lx\n",
+ vma->vm_start);
+ MODS_VMA_PRIVATE(vma) = NULL;
+ MODS_KFREE(vma_private_data,
+ sizeof(*vma_private_data));
+
+ spin_unlock(&private_data->lock);
+ }
+ }
+ LOG_EXT();
+}
+
+static struct vm_operations_struct mods_krnl_vm_ops = {
+ .open = mods_krnl_vma_open,
+ .close = mods_krnl_vma_close
+};
+
+static int mods_krnl_open(struct inode *ip, struct file *fp)
+{
+ struct list_head *mods_alloc_list;
+ struct list_head *mods_mapping_list;
+ struct mods_file_private_data *private_data;
+ int id = 0;
+
+ LOG_ENT();
+
+ MODS_KMALLOC(mods_alloc_list, sizeof(struct list_head));
+ if (unlikely(!mods_alloc_list)) {
+ LOG_EXT();
+ return -ENOMEM;
+ }
+
+ MODS_KMALLOC(mods_mapping_list, sizeof(struct list_head));
+ if (unlikely(!mods_mapping_list)) {
+ MODS_KFREE(mods_alloc_list, sizeof(struct list_head));
+ LOG_EXT();
+ return -ENOMEM;
+ }
+
+ MODS_KMALLOC(private_data, sizeof(*private_data));
+ if (unlikely(!private_data)) {
+ MODS_KFREE(mods_alloc_list, sizeof(struct list_head));
+ MODS_KFREE(mods_mapping_list, sizeof(struct list_head));
+ LOG_EXT();
+ return -ENOMEM;
+ }
+
+ id = mods_alloc_channel();
+ if (id_is_valid(id) != OK) {
+ mods_error_printk("too many clients\n");
+ MODS_KFREE(mods_alloc_list, sizeof(struct list_head));
+ MODS_KFREE(mods_mapping_list, sizeof(struct list_head));
+ MODS_KFREE(private_data, sizeof(*private_data));
+ LOG_EXT();
+ return -EBUSY;
+ }
+
+ private_data->mods_id = id;
+ mods_irq_dev_set_pri(private_data->mods_id, private_data);
+
+ INIT_LIST_HEAD(mods_alloc_list);
+ INIT_LIST_HEAD(mods_mapping_list);
+ private_data->mods_alloc_list = mods_alloc_list;
+ private_data->mods_mapping_list = mods_mapping_list;
+ private_data->enabled_devices = 0;
+ private_data->mem_type.dma_addr = 0;
+ private_data->mem_type.size = 0;
+ private_data->mem_type.type = 0;
+
+ spin_lock_init(&private_data->lock);
+
+ init_waitqueue_head(&private_data->interrupt_event);
+
+ fp->private_data = private_data;
+
+ mods_info_printk("driver opened\n");
+ LOG_EXT();
+ return OK;
+}
+
+static int mods_krnl_close(struct inode *ip, struct file *fp)
+{
+ MODS_PRIVATE_DATA(private_data, fp);
+ unsigned char id = MODS_GET_FILE_PRIVATE_ID(fp);
+
+ LOG_ENT();
+
+ BUG_ON(id_is_valid(id) != OK);
+ mods_free_channel(id);
+ mods_irq_dev_clr_pri(private_data->mods_id);
+
+ mods_unregister_all_mappings(fp);
+ mods_unregister_all_alloc(fp);
+ mods_disable_all_devices(private_data);
+
+ MODS_KFREE(private_data->mods_alloc_list, sizeof(struct list_head));
+ MODS_KFREE(private_data->mods_mapping_list, sizeof(struct list_head));
+ MODS_KFREE(private_data, sizeof(*private_data));
+
+ mods_info_printk("driver closed\n");
+ LOG_EXT();
+ return OK;
+}
+
+static unsigned int mods_krnl_poll(struct file *fp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ MODS_PRIVATE_DATA(private_data, fp);
+ unsigned char id = MODS_GET_FILE_PRIVATE_ID(fp);
+
+ if (!(fp->f_flags & O_NONBLOCK)) {
+ mods_debug_printk(DEBUG_ISR_DETAILED, "poll wait\n");
+ poll_wait(fp, &private_data->interrupt_event, wait);
+ }
+ /* if any interrupts pending then check intr, POLLIN on irq */
+ mask |= mods_irq_event_check(id);
+ mods_debug_printk(DEBUG_ISR_DETAILED, "poll mask 0x%x\n", mask);
+ return mask;
+}
+
+static int mods_krnl_map_inner(struct file *fp, struct vm_area_struct *vma);
+
+static int mods_krnl_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct mods_vm_private_data *vma_private_data;
+
+ LOG_ENT();
+
+ vma->vm_ops = &mods_krnl_vm_ops;
+
+ MODS_KMALLOC(vma_private_data, sizeof(*vma_private_data));
+ if (unlikely(!vma_private_data)) {
+ LOG_EXT();
+ return -ENOMEM;
+ }
+
+ /* set private data for vm_area_struct */
+ atomic_set(&vma_private_data->usage_count, 0);
+ vma_private_data->fp = fp;
+ MODS_VMA_PRIVATE(vma) = vma_private_data;
+
+ /* call for the first time open function */
+ mods_krnl_vma_open(vma);
+
+ {
+ int ret = OK;
+ MODS_PRIVATE_DATA(private_data, fp);
+ spin_lock(&private_data->lock);
+ ret = mods_krnl_map_inner(fp, vma);
+ spin_unlock(&private_data->lock);
+ LOG_EXT();
+ return ret;
+ }
+}
+
+static int mods_krnl_map_inner(struct file *fp, struct vm_area_struct *vma)
+{
+ struct SYS_MEM_MODS_INFO *p_mem_info;
+ unsigned int pages;
+ int i, j;
+
+ pages = MODS_VMA_SIZE(vma) >> PAGE_SHIFT;
+
+ /* find already allocated memory */
+ p_mem_info = mods_find_alloc(fp, MODS_VMA_OFFSET(vma));
+
+ /* system memory */
+ if (p_mem_info != NULL) {
+ if (p_mem_info->alloc_type != MODS_ALLOC_TYPE_NON_CONTIG) {
+ NvU64 dma_addr = MODS_VMA_OFFSET(vma);
+ NvU32 pfn = MODS_DMA_TO_PHYS(dma_addr) >> PAGE_SHIFT;
+ pgprot_t prot = mods_get_prot(p_mem_info->cache_type,
+ vma->vm_page_prot);
+
+ mods_debug_printk(DEBUG_MEM,
+ "map contig sysmem: "
+ "dma 0x%llx, virt 0x%lx, size 0x%x, "
+ "caching %s\n",
+ dma_addr,
+ (unsigned long)vma->vm_start,
+ (unsigned int)MODS_VMA_SIZE(vma),
+ mods_get_prot_str(p_mem_info->cache_type));
+
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ pfn,
+ MODS_VMA_SIZE(vma),
+ prot)) {
+ mods_error_printk(
+ "failed to map contiguous memory\n");
+ return -EAGAIN;
+ }
+
+ /* MODS_VMA_OFFSET(vma) can change so it can't be used
+ * to register the mapping */
+ mods_register_mapping(fp,
+ p_mem_info,
+ dma_addr,
+ vma->vm_start,
+ MODS_VMA_SIZE(vma));
+ } else {
+ /* insert consecutive pages one at a time */
+
+ unsigned long start = 0;
+ NvU64 dma_addr = 0;
+ struct SYS_PAGE_TABLE **p_page_tbl
+ = p_mem_info->p_page_tbl;
+ const pgprot_t prot
+ = mods_get_prot(p_mem_info->cache_type,
+ vma->vm_page_prot);
+
+ mods_debug_printk(DEBUG_MEM,
+ "map noncontig sysmem: "
+ "virt 0x%lx, size 0x%x, caching %s\n",
+ (unsigned long)vma->vm_start,
+ (unsigned int)MODS_VMA_SIZE(vma),
+ mods_get_prot_str(p_mem_info->cache_type));
+
+ for (i = 0; i < p_mem_info->num_pages; i++) {
+ NvU64 offs = MODS_VMA_OFFSET(vma);
+ dma_addr = p_page_tbl[i]->dma_addr;
+ if ((offs >= dma_addr) &&
+ (offs < dma_addr + PAGE_SIZE)) {
+
+ break;
+ }
+ }
+
+ if (i == p_mem_info->num_pages) {
+ mods_error_printk(
+ "unable to find noncontiguous memory allocation\n");
+ return -EINVAL;
+ }
+
+ if ((i + pages) > p_mem_info->num_pages) {
+ mods_error_printk(
+ "requested mapping exceeds allocation's boundary!\n");
+ return -EINVAL;
+ }
+
+ start = vma->vm_start;
+ for (j = i; j < (i + pages); j++) {
+ dma_addr = MODS_DMA_TO_PHYS(
+ p_page_tbl[j]->dma_addr);
+ if (remap_pfn_range(vma,
+ start,
+ dma_addr>>PAGE_SHIFT,
+ PAGE_SIZE,
+ prot)) {
+ mods_error_printk(
+ "failed to map memory\n");
+ return -EAGAIN;
+ }
+
+ start += PAGE_SIZE;
+ }
+
+ /* MODS_VMA_OFFSET(vma) can change so it can't be used
+ * to register the mapping */
+ mods_register_mapping(fp,
+ p_mem_info,
+ p_page_tbl[i]->dma_addr,
+ vma->vm_start,
+ MODS_VMA_SIZE(vma));
+ }
+ } else {
+ /* device memory */
+
+ NvU64 dma_addr = MODS_VMA_OFFSET(vma);
+ mods_debug_printk(DEBUG_MEM,
+ "map device mem: "
+ "dma 0x%llx, virt 0x%lx, size 0x%x, caching %s\n",
+ dma_addr,
+ (unsigned long)vma->vm_start,
+ (unsigned int)MODS_VMA_SIZE(vma),
+ mods_get_prot_str_for_range(fp,
+ MODS_VMA_OFFSET(vma),
+ MODS_VMA_SIZE(vma)));
+
+ if (io_remap_pfn_range(
+ vma,
+ vma->vm_start,
+ dma_addr>>PAGE_SHIFT,
+ MODS_VMA_SIZE(vma),
+ mods_get_prot_for_range(
+ fp,
+ MODS_VMA_OFFSET(vma),
+ MODS_VMA_SIZE(vma),
+ vma->vm_page_prot))) {
+ mods_error_printk("failed to map device memory\n");
+ return -EAGAIN;
+ }
+
+ /* MODS_VMA_OFFSET(vma) can change so it can't be used to
+ * register the mapping */
+ mods_register_mapping(fp,
+ NULL,
+ dma_addr,
+ vma->vm_start,
+ MODS_VMA_SIZE(vma));
+ }
+ return OK;
+}
+
+/*************************
+ * ESCAPE CALL FUNCTIONS *
+ *************************/
+
+int esc_mods_get_api_version(struct file *pfile, struct MODS_GET_VERSION *p)
+{
+ p->version = MODS_DRIVER_VERSION;
+ return OK;
+}
+
+int esc_mods_get_kernel_version(struct file *pfile, struct MODS_GET_VERSION *p)
+{
+ p->version = MODS_KERNEL_VERSION;
+ return OK;
+}
+
+int esc_mods_set_driver_para(struct file *pfile, struct MODS_SET_PARA *p)
+{
+ int rc = OK;
+ return rc;
+}
+
+/**************
+ * IO control *
+ **************/
+
+static long mods_krnl_ioctl(struct file *fp,
+ unsigned int cmd,
+ unsigned long i_arg)
+{
+ int ret;
+ void *arg_copy = 0;
+ void *arg = (void *) i_arg;
+ int arg_size;
+
+ LOG_ENT();
+
+ arg_size = _IOC_SIZE(cmd);
+
+ if (arg_size > 0) {
+ MODS_KMALLOC(arg_copy, arg_size);
+ if (unlikely(!arg_copy)) {
+ LOG_EXT();
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(arg_copy, arg, arg_size)) {
+ mods_error_printk("failed to copy ioctl data\n");
+ MODS_KFREE(arg_copy, arg_size);
+ LOG_EXT();
+ return -EFAULT;
+ }
+ }
+
+ switch (cmd) {
+
+#define MODS_IOCTL(code, function, argtype)\
+ ({\
+ do {\
+ mods_debug_printk(DEBUG_IOCTL, "ioctl(" #code ")\n");\
+ if (arg_size != sizeof(struct argtype)) {\
+ ret = -EINVAL;\
+ mods_error_printk( \
+ "invalid parameter passed to ioctl " #code \
+ "\n");\
+ } else {\
+ ret = function(fp, (struct argtype *)arg_copy);\
+ if ((ret == OK) && \
+ copy_to_user(arg, arg_copy, arg_size)) {\
+ ret = -EFAULT;\
+ mods_error_printk( \
+ "copying return value for ioctl " \
+ #code " to user space failed\n");\
+ } \
+ } \
+ } while (0);\
+ })
+
+#define MODS_IOCTL_NORETVAL(code, function, argtype)\
+ ({\
+ do {\
+ mods_debug_printk(DEBUG_IOCTL, "ioctl(" #code ")\n");\
+ if (arg_size != sizeof(struct argtype)) {\
+ ret = -EINVAL;\
+ mods_error_printk( \
+ "invalid parameter passed to ioctl " #code \
+ "\n");\
+ } else {\
+ ret = function(fp, (struct argtype *)arg_copy);\
+ } \
+ } while (0);\
+ })
+
+#define MODS_IOCTL_VOID(code, function)\
+ ({\
+ do {\
+ mods_debug_printk(DEBUG_IOCTL, "ioctl(" #code ")\n");\
+ if (arg_size != 0) {\
+ ret = -EINVAL;\
+ mods_error_printk( \
+ "invalid parameter passed to ioctl " #code \
+ "\n");\
+ } else {\
+ ret = function(fp);\
+ } \
+ } while (0);\
+ })
+
+#ifdef CONFIG_PCI
+ case MODS_ESC_FIND_PCI_DEVICE:
+ MODS_IOCTL(MODS_ESC_FIND_PCI_DEVICE,
+ esc_mods_find_pci_dev, MODS_FIND_PCI_DEVICE);
+ break;
+
+ case MODS_ESC_FIND_PCI_CLASS_CODE:
+ MODS_IOCTL(MODS_ESC_FIND_PCI_CLASS_CODE,
+ esc_mods_find_pci_class_code,
+ MODS_FIND_PCI_CLASS_CODE);
+ break;
+
+ case MODS_ESC_PCI_READ:
+ MODS_IOCTL(MODS_ESC_PCI_READ, esc_mods_pci_read, MODS_PCI_READ);
+ break;
+
+ case MODS_ESC_PCI_WRITE:
+ MODS_IOCTL_NORETVAL(MODS_ESC_PCI_WRITE,
+ esc_mods_pci_write, MODS_PCI_WRITE);
+ break;
+
+ case MODS_ESC_PCI_BUS_ADD_DEVICES:
+ MODS_IOCTL_NORETVAL(MODS_ESC_PCI_BUS_ADD_DEVICES,
+ esc_mods_pci_bus_add_dev,
+ MODS_PCI_BUS_ADD_DEVICES);
+ break;
+
+ case MODS_ESC_PIO_READ:
+ MODS_IOCTL(MODS_ESC_PIO_READ,
+ esc_mods_pio_read, MODS_PIO_READ);
+ break;
+
+ case MODS_ESC_PIO_WRITE:
+ MODS_IOCTL_NORETVAL(MODS_ESC_PIO_WRITE,
+ esc_mods_pio_write, MODS_PIO_WRITE);
+ break;
+
+ case MODS_ESC_DEVICE_NUMA_INFO:
+ MODS_IOCTL(MODS_ESC_DEVICE_NUMA_INFO,
+ esc_mods_device_numa_info,
+ MODS_DEVICE_NUMA_INFO);
+ break;
+#endif
+
+ case MODS_ESC_ALLOC_PAGES:
+ MODS_IOCTL(MODS_ESC_ALLOC_PAGES,
+ esc_mods_alloc_pages, MODS_ALLOC_PAGES);
+ break;
+
+ case MODS_ESC_DEVICE_ALLOC_PAGES:
+ MODS_IOCTL(MODS_ESC_DEVICE_ALLOC_PAGES,
+ esc_mods_device_alloc_pages,
+ MODS_DEVICE_ALLOC_PAGES);
+ break;
+
+ case MODS_ESC_FREE_PAGES:
+ MODS_IOCTL(MODS_ESC_FREE_PAGES,
+ esc_mods_free_pages, MODS_FREE_PAGES);
+ break;
+
+ case MODS_ESC_GET_PHYSICAL_ADDRESS:
+ MODS_IOCTL(MODS_ESC_GET_PHYSICAL_ADDRESS,
+ esc_mods_get_phys_addr,
+ MODS_GET_PHYSICAL_ADDRESS);
+ break;
+
+ case MODS_ESC_SET_MEMORY_TYPE:
+ MODS_IOCTL_NORETVAL(MODS_ESC_SET_MEMORY_TYPE,
+ esc_mods_set_mem_type,
+ MODS_MEMORY_TYPE);
+ break;
+
+ case MODS_ESC_VIRTUAL_TO_PHYSICAL:
+ MODS_IOCTL(MODS_ESC_VIRTUAL_TO_PHYSICAL,
+ esc_mods_virtual_to_phys,
+ MODS_VIRTUAL_TO_PHYSICAL);
+ break;
+
+ case MODS_ESC_PHYSICAL_TO_VIRTUAL:
+ MODS_IOCTL(MODS_ESC_PHYSICAL_TO_VIRTUAL,
+ esc_mods_phys_to_virtual, MODS_PHYSICAL_TO_VIRTUAL);
+ break;
+
+ case MODS_ESC_IRQ_REGISTER:
+ case MODS_ESC_MSI_REGISTER:
+ ret = -EINVAL;
+ break;
+
+ case MODS_ESC_REGISTER_IRQ:
+ MODS_IOCTL_NORETVAL(MODS_ESC_REGISTER_IRQ,
+ esc_mods_register_irq, MODS_REGISTER_IRQ);
+ break;
+
+ case MODS_ESC_UNREGISTER_IRQ:
+ MODS_IOCTL_NORETVAL(MODS_ESC_UNREGISTER_IRQ,
+ esc_mods_unregister_irq, MODS_REGISTER_IRQ);
+ break;
+
+ case MODS_ESC_QUERY_IRQ:
+ MODS_IOCTL(MODS_ESC_QUERY_IRQ,
+ esc_mods_query_irq, MODS_QUERY_IRQ);
+ break;
+
+ case MODS_ESC_SET_IRQ_MASK:
+ MODS_IOCTL_NORETVAL(MODS_ESC_SET_IRQ_MASK,
+ esc_mods_set_irq_mask, MODS_SET_IRQ_MASK);
+ break;
+
+ case MODS_ESC_IRQ_HANDLED:
+ MODS_IOCTL_NORETVAL(MODS_ESC_IRQ_HANDLED,
+ esc_mods_irq_handled, MODS_REGISTER_IRQ);
+ break;
+
+#ifdef CONFIG_ACPI
+ case MODS_ESC_EVAL_ACPI_METHOD:
+ MODS_IOCTL(MODS_ESC_EVAL_ACPI_METHOD,
+ esc_mods_eval_acpi_method, MODS_EVAL_ACPI_METHOD);
+ break;
+
+ case MODS_ESC_EVAL_DEV_ACPI_METHOD:
+ MODS_IOCTL(MODS_ESC_EVAL_DEV_ACPI_METHOD,
+ esc_mods_eval_dev_acpi_method,
+ MODS_EVAL_DEV_ACPI_METHOD);
+ break;
+
+ case MODS_ESC_ACPI_GET_DDC:
+ MODS_IOCTL(MODS_ESC_ACPI_GET_DDC,
+ esc_mods_acpi_get_ddc, MODS_ACPI_GET_DDC);
+ break;
+
+#elif defined(CONFIG_ARCH_TEGRA)
+ case MODS_ESC_EVAL_ACPI_METHOD:
+ case MODS_ESC_EVAL_DEV_ACPI_METHOD:
+ case MODS_ESC_ACPI_GET_DDC:
+ /* Silent failure on Tegra to avoid clogging kernel log */
+ ret = -EINVAL;
+ break;
+#endif
+ case MODS_ESC_GET_API_VERSION:
+ MODS_IOCTL(MODS_ESC_GET_API_VERSION,
+ esc_mods_get_api_version, MODS_GET_VERSION);
+ break;
+
+ case MODS_ESC_GET_KERNEL_VERSION:
+ MODS_IOCTL(MODS_ESC_GET_KERNEL_VERSION,
+ esc_mods_get_kernel_version, MODS_GET_VERSION);
+ break;
+
+ case MODS_ESC_SET_DRIVER_PARA:
+ MODS_IOCTL_NORETVAL(MODS_ESC_SET_DRIVER_PARA,
+ esc_mods_set_driver_para, MODS_SET_PARA);
+ break;
+
+#ifdef CONFIG_ARCH_TEGRA
+ case MODS_ESC_GET_CLOCK_HANDLE:
+ MODS_IOCTL(MODS_ESC_GET_CLOCK_HANDLE,
+ esc_mods_get_clock_handle, MODS_GET_CLOCK_HANDLE);
+ break;
+
+ case MODS_ESC_SET_CLOCK_RATE:
+ MODS_IOCTL_NORETVAL(MODS_ESC_SET_CLOCK_RATE,
+ esc_mods_set_clock_rate, MODS_CLOCK_RATE);
+ break;
+
+ case MODS_ESC_GET_CLOCK_RATE:
+ MODS_IOCTL(MODS_ESC_GET_CLOCK_RATE,
+ esc_mods_get_clock_rate, MODS_CLOCK_RATE);
+ break;
+
+ case MODS_ESC_GET_CLOCK_MAX_RATE:
+ MODS_IOCTL(MODS_ESC_GET_CLOCK_MAX_RATE,
+ esc_mods_get_clock_max_rate, MODS_CLOCK_RATE);
+ break;
+
+ case MODS_ESC_SET_CLOCK_MAX_RATE:
+ MODS_IOCTL_NORETVAL(MODS_ESC_SET_CLOCK_MAX_RATE,
+ esc_mods_set_clock_max_rate,
+ MODS_CLOCK_RATE);
+ break;
+
+ case MODS_ESC_SET_CLOCK_PARENT:
+ MODS_IOCTL_NORETVAL(MODS_ESC_SET_CLOCK_PARENT,
+ esc_mods_set_clock_parent,
+ MODS_CLOCK_PARENT);
+ break;
+
+ case MODS_ESC_GET_CLOCK_PARENT:
+ MODS_IOCTL(MODS_ESC_GET_CLOCK_PARENT,
+ esc_mods_get_clock_parent, MODS_CLOCK_PARENT);
+ break;
+
+ case MODS_ESC_ENABLE_CLOCK:
+ MODS_IOCTL_NORETVAL(MODS_ESC_ENABLE_CLOCK,
+ esc_mods_enable_clock, MODS_CLOCK_HANDLE);
+ break;
+
+ case MODS_ESC_DISABLE_CLOCK:
+ MODS_IOCTL_NORETVAL(MODS_ESC_DISABLE_CLOCK,
+ esc_mods_disable_clock, MODS_CLOCK_HANDLE);
+ break;
+
+ case MODS_ESC_IS_CLOCK_ENABLED:
+ MODS_IOCTL(MODS_ESC_IS_CLOCK_ENABLED,
+ esc_mods_is_clock_enabled, MODS_CLOCK_ENABLED);
+ break;
+
+ case MODS_ESC_CLOCK_RESET_ASSERT:
+ MODS_IOCTL_NORETVAL(MODS_ESC_CLOCK_RESET_ASSERT,
+ esc_mods_clock_reset_assert,
+ MODS_CLOCK_HANDLE);
+ break;
+
+ case MODS_ESC_CLOCK_RESET_DEASSERT:
+ MODS_IOCTL_NORETVAL(MODS_ESC_CLOCK_RESET_DEASSERT,
+ esc_mods_clock_reset_deassert,
+ MODS_CLOCK_HANDLE);
+ break;
+
+ case MODS_ESC_FLUSH_CPU_CACHE_RANGE:
+ MODS_IOCTL_NORETVAL(MODS_ESC_FLUSH_CPU_CACHE_RANGE,
+ esc_mods_flush_cpu_cache_range,
+ MODS_FLUSH_CPU_CACHE_RANGE);
+ break;
+#endif
+ case MODS_ESC_MEMORY_BARRIER:
+ MODS_IOCTL_VOID(MODS_ESC_MEMORY_BARRIER,
+ esc_mods_memory_barrier);
+ break;
+
+ default:
+ mods_error_printk("unrecognized ioctl (0x%x)\n", cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (arg_copy)
+ MODS_KFREE(arg_copy, arg_size);
+
+ LOG_EXT();
+ return ret;
+}
diff --git a/drivers/misc/mods/mods_mem.c b/drivers/misc/mods/mods_mem.c
new file mode 100644
index 000000000000..c605c0e135e3
--- /dev/null
+++ b/drivers/misc/mods/mods_mem.c
@@ -0,0 +1,1141 @@
+/*
+ * mods_mem.c - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2008-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mods_internal.h"
+
+#include <linux/pagemap.h>
+#include <linux/rbtree.h>
+
+#ifdef CONFIG_BIGPHYS_AREA
+#include <linux/bigphysarea.h>
+#endif
+
+#define P2M(x) ((x) >> (20 - PAGE_SHIFT))
+
+static spinlock_t km_lock;
+static NvU32 km_usage;
+
+static struct rb_root km_root;
+static int mods_post_alloc(NvU64 vaddr, NvU64 paddr, NvU64 pages,
+ NvU32 cachetype);
+
+struct mem_tracker {
+ void *addr;
+ NvU32 size;
+ const char *file;
+ NvU32 line;
+ struct rb_node node;
+};
+
+/************************************************************************ */
+/************************************************************************ */
+/** Kernel memory allocation tracker */
+/** Register all the allocation from the beginning and inform */
+/** about the memory leakage at unload time */
+/************************************************************************ */
+/************************************************************************ */
+
+/*********************
+ * PRIVATE FUNCTIONS *
+ *********************/
+
+/* Insert pmem_t into the indicated red-black tree, using pmem_t->addr
+ * as the sort key. Return 1 on success, or 0 if the insert failed
+ * because there is already a node in the tree with that addr.
+ */
+static int mods_insert_mem_tracker(struct rb_root *root,
+ struct mem_tracker *pmem_t)
+
+{
+ struct rb_node *parent_node = NULL;
+ struct mem_tracker *parent_data = NULL;
+ struct rb_node **child_ptr = &root->rb_node;
+
+ while (*child_ptr != NULL) {
+ parent_node = *child_ptr;
+ parent_data = rb_entry(parent_node, struct mem_tracker, node);
+ if (pmem_t->addr < parent_data->addr)
+ child_ptr = &parent_node->rb_left;
+ else if (pmem_t->addr > parent_data->addr)
+ child_ptr = &parent_node->rb_right;
+ else
+ return 0;
+ }
+
+ rb_link_node(&pmem_t->node, parent_node, child_ptr);
+ rb_insert_color(&pmem_t->node, root);
+ return 1;
+}
+
+/* Search the red-black tree at root for a mem_tracker with the
+ * indicated address. Return the node on success, or NULL on failure.
+ */
+static struct mem_tracker *mods_find_mem_tracker(struct rb_root *root,
+ void *addr)
+{
+ struct rb_node *node = root->rb_node;
+ struct mem_tracker *pmem_t;
+
+ while (node != NULL) {
+ pmem_t = rb_entry(node, struct mem_tracker, node);
+ if (addr < pmem_t->addr)
+ node = node->rb_left;
+ else if (addr > pmem_t->addr)
+ node = node->rb_right;
+ else
+ return pmem_t;
+ }
+ return NULL;
+}
+
+static void mods_list_mem(void)
+{
+ struct rb_root *root = &km_root;
+ struct rb_node *iter;
+ struct mem_tracker *pmem_t;
+
+ for (iter = rb_first(root); iter; iter = rb_next(iter)) {
+ pmem_t = rb_entry(iter, struct mem_tracker, node);
+
+ mods_debug_printk(DEBUG_MEM,
+ "leak: virt %p, size 0x%x, "
+ "alloc'd by %s:%d\n",
+ pmem_t->addr,
+ (unsigned int) pmem_t->size,
+ pmem_t->file,
+ (unsigned int) pmem_t->line);
+ }
+}
+
+static void mods_del_list_mem(void)
+{
+ struct rb_root *root = &km_root;
+ struct rb_node *node;
+ struct mem_tracker *pmem_t;
+
+ while (!RB_EMPTY_ROOT(root)) {
+ node = rb_first(root);
+ pmem_t = rb_entry(node, struct mem_tracker, node);
+
+ /* free the memory */
+ rb_erase(node, root);
+ MODS_FORCE_KFREE(pmem_t->addr);
+ MEMDBG_FREE(pmem_t);
+ }
+}
+
+#if !defined(CONFIG_ARCH_TEGRA) || defined(CONFIG_CPA) ||\
+ defined(CONFIG_ARCH_TEGRA_3x_SOC)
+static int mods_set_mem_type(NvU64 virt_addr, NvU64 pages, NvU32 type)
+{
+ if (type == MODS_MEMORY_UNCACHED)
+ return MODS_SET_MEMORY_UC(virt_addr, pages);
+ else if (type == MODS_MEMORY_WRITECOMBINE)
+ return MODS_SET_MEMORY_WC(virt_addr, pages);
+ return 0;
+}
+#endif
+
+static int mods_restore_mem_type(NvU64 virt_addr,
+ NvU64 pages,
+ NvU32 type_override)
+{
+ if ((type_override == MODS_MEMORY_UNCACHED) ||
+ (type_override == MODS_MEMORY_WRITECOMBINE)) {
+ return MODS_SET_MEMORY_WB(virt_addr, pages);
+ }
+ return 0;
+}
+
+static void mods_free_contig_pages(struct SYS_MEM_MODS_INFO *p_mem_info)
+{
+#ifdef CONFIG_BIGPHYS_AREA
+ if (p_mem_info->alloc_type == MODS_ALLOC_TYPE_BIGPHYS_AREA) {
+ bigphysarea_free_pages((void *)p_mem_info->logical_addr);
+ } else
+#endif
+ __MODS_FREE_PAGES(p_mem_info->p_page, p_mem_info->order);
+}
+
+static void mods_alloc_contig_sys_pages(struct SYS_MEM_MODS_INFO *p_mem_info)
+{
+ NvU32 order = 0;
+ NvU64 phys_addr;
+ NvU32 num_pages = 0;
+ NvU32 i_page = 0;
+ LOG_ENT();
+
+ while ((1 << order) < p_mem_info->num_pages)
+ order++;
+ p_mem_info->order = order;
+ num_pages = 1 << order;
+
+ __MODS_ALLOC_PAGES(p_mem_info->p_page, order,
+ GFP_KERNEL | __GFP_COMP
+ | (((p_mem_info->addr_bits & 0xff) == 32)
+ ? __GFP_DMA32 : __GFP_HIGHMEM),
+ p_mem_info->numa_node
+ );
+
+#ifdef CONFIG_BIGPHYS_AREA
+ if (p_mem_info->p_page == NULL) {
+ mods_debug_printk(DEBUG_MEM,
+ "failed to allocate %u contiguous pages, falling back to bigphysarea\n",
+ num_pages);
+ p_mem_info->logical_addr = (NvU64)
+ bigphysarea_alloc_pages(num_pages, 0, GFP_KERNEL);
+ p_mem_info->alloc_type = MODS_ALLOC_TYPE_BIGPHYS_AREA;
+ }
+#endif
+
+ if (p_mem_info->p_page == NULL && p_mem_info->logical_addr == 0) {
+ LOG_EXT();
+ return;
+ }
+
+#ifdef CONFIG_BIGPHYS_AREA
+ if (p_mem_info->alloc_type == MODS_ALLOC_TYPE_BIGPHYS_AREA) {
+ phys_addr = __pa(p_mem_info->logical_addr);
+ } else
+#endif
+ phys_addr = page_to_phys(p_mem_info->p_page);
+ if (phys_addr == 0) {
+ mods_error_printk(
+ "alloc_contig_sys_pages: failed to lookup physical address\n");
+ mods_free_contig_pages(p_mem_info);
+ p_mem_info->logical_addr = 0;
+ LOG_EXT();
+ return;
+ }
+ p_mem_info->dma_addr = MODS_PHYS_TO_DMA(phys_addr);
+
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "alloc_contig_sys_pages: alloc'd %u contig pages @ dma addr 0x%llu\n",
+ num_pages, p_mem_info->dma_addr);
+
+ if (((p_mem_info->addr_bits & 0xFF) == 32) &&
+ (p_mem_info->dma_addr + p_mem_info->length > 0x100000000ULL)) {
+
+ mods_error_printk(
+ "alloc_contig_sys_pages: alloc'd memory exceeds 32-bit addressing\n");
+ mods_free_contig_pages(p_mem_info);
+ p_mem_info->logical_addr = 0;
+ LOG_EXT();
+ return;
+ }
+
+ for (i_page = 0; i_page < num_pages; i_page++) {
+ NvU64 ptr = 0;
+#ifdef CONFIG_BIGPHYS_AREA
+ if (p_mem_info->alloc_type == MODS_ALLOC_TYPE_BIGPHYS_AREA) {
+ ptr = p_mem_info->logical_addr + i_page * PAGE_SIZE;
+ } else
+#endif
+ ptr = (NvU64)(size_t)kmap(p_mem_info->p_page + i_page);
+ if (ptr == 0) {
+ mods_error_printk(
+ "alloc_contig_sys_pages: unable to map pages\n");
+ mods_free_contig_pages(p_mem_info);
+ p_mem_info->logical_addr = 0;
+ LOG_EXT();
+ return;
+ }
+ if (mods_post_alloc(ptr,
+ phys_addr,
+ 1,
+ p_mem_info->cache_type)) {
+ mods_error_printk(
+ "alloc_contig_sys_pages: failed to set caching type\n");
+ mods_free_contig_pages(p_mem_info);
+ p_mem_info->logical_addr = 0;
+ LOG_EXT();
+ return;
+ }
+#ifdef CONFIG_BIGPHYS_AREA
+ if (p_mem_info->alloc_type != MODS_ALLOC_TYPE_BIGPHYS_AREA)
+#endif
+ kunmap(p_mem_info->p_page + i_page);
+ }
+ LOG_EXT();
+}
+
+static void mods_free_contig_sys_mem(struct SYS_MEM_MODS_INFO *p_mem_info)
+{
+ NvU32 num_pages = 1 << p_mem_info->order;
+ NvU32 i_page = 0;
+
+ for (i_page = 0; i_page < num_pages; i_page++) {
+ NvU64 ptr = 0;
+#ifdef CONFIG_BIGPHYS_AREA
+ if (p_mem_info->alloc_type == MODS_ALLOC_TYPE_BIGPHYS_AREA) {
+ ptr = p_mem_info->logical_addr + i_page * PAGE_SIZE;
+ } else
+#endif
+ ptr = (NvU64)(size_t)kmap(p_mem_info->p_page + i_page);
+ mods_restore_mem_type(ptr, 1, p_mem_info->cache_type);
+#ifdef CONFIG_BIGPHYS_AREA
+ if (p_mem_info->alloc_type != MODS_ALLOC_TYPE_BIGPHYS_AREA)
+#endif
+ kunmap(p_mem_info->p_page + i_page);
+ }
+ mods_free_contig_pages(p_mem_info);
+}
+
+static void mods_free_noncontig_sys_mem(struct SYS_MEM_MODS_INFO *p_mem_info)
+{
+ int i;
+ int pta_size;
+ struct SYS_PAGE_TABLE *pt;
+
+ pta_size = p_mem_info->num_pages * sizeof(pt);
+
+ if (p_mem_info->p_page_tbl) {
+ for (i = 0; i < p_mem_info->num_pages; i++) {
+ void *ptr;
+ pt = p_mem_info->p_page_tbl[i];
+ if (!pt)
+ continue;
+ if (!pt->p_page) {
+ MODS_KFREE(pt, sizeof(*pt));
+ continue;
+ }
+ ptr = kmap(pt->p_page);
+ if (ptr != NULL) {
+ mods_restore_mem_type((NvU64)(size_t)ptr,
+ 1,
+ p_mem_info->cache_type);
+ kunmap(pt->p_page);
+ }
+ __MODS_FREE_PAGES(pt->p_page, 0);
+ MODS_KFREE(pt, sizeof(*pt));
+ }
+ MODS_KFREE(p_mem_info->p_page_tbl, pta_size);
+ p_mem_info->p_page_tbl = 0;
+ }
+}
+
+static void mods_alloc_noncontig_sys_pages(struct SYS_MEM_MODS_INFO *p_mem_info)
+{
+ int pta_size;
+ int i;
+ struct SYS_PAGE_TABLE *pt;
+
+ LOG_ENT();
+
+ pta_size = p_mem_info->num_pages * sizeof(pt);
+
+ MODS_KMALLOC(p_mem_info->p_page_tbl, pta_size);
+ if (unlikely(!p_mem_info->p_page_tbl))
+ goto failed;
+ memset(p_mem_info->p_page_tbl, 0, pta_size);
+
+ /* allocate resources */
+ for (i = 0; i < p_mem_info->num_pages; i++) {
+ MODS_KMALLOC(p_mem_info->p_page_tbl[i], sizeof(*pt));
+ if (unlikely(!p_mem_info->p_page_tbl[i]))
+ goto failed;
+ memset(p_mem_info->p_page_tbl[i], 0, sizeof(*pt));
+ }
+
+ /* alloc pages */
+ for (i = 0; i < p_mem_info->num_pages; i++) {
+ NvU64 phys_addr = 0;
+ pt = p_mem_info->p_page_tbl[i];
+
+ __MODS_ALLOC_PAGES(pt->p_page, 0, GFP_KERNEL
+ | (((p_mem_info->addr_bits & 0xff) == 32)
+ ? __GFP_DMA32 : __GFP_HIGHMEM),
+ p_mem_info->numa_node
+ );
+ if (pt->p_page == NULL) {
+ mods_error_printk(
+ "can't allocate single page with alloc_pages\n");
+ goto failed;
+ }
+ phys_addr = page_to_phys(pt->p_page);
+ if (phys_addr == 0) {
+ mods_error_printk(
+ "alloc_noncontig_sys_pages: failed to lookup phys addr\n");
+ goto failed;
+ }
+ pt->dma_addr = MODS_PHYS_TO_DMA(phys_addr);
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "%d-th page is alloc'd, dma_addr=0x%llx\n",
+ i,
+ pt->dma_addr);
+
+ {
+ void *ptr = kmap(pt->p_page);
+ int ret;
+ if (ptr == NULL) {
+ mods_error_printk(
+ "alloc_noncontig_sys_pages: unable to map page\n");
+ goto failed;
+ }
+ ret = mods_post_alloc((NvU64)(size_t)ptr,
+ phys_addr,
+ 1,
+ p_mem_info->cache_type);
+ kunmap(pt->p_page);
+ if (ret) {
+ mods_error_printk(
+ "alloc_noncontig_sys_pages: failed to set caching type to uncached\n");
+ goto failed;
+ }
+ }
+ }
+
+ return;
+
+failed:
+ mods_free_noncontig_sys_mem(p_mem_info);
+}
+
+static void mods_register_alloc(struct file *fp,
+ struct SYS_MEM_MODS_INFO *p_mem_info)
+{
+ MODS_PRIVATE_DATA(private_data, fp);
+ spin_lock(&private_data->lock);
+ list_add(&p_mem_info->list, private_data->mods_alloc_list);
+ spin_unlock(&private_data->lock);
+}
+
+static void mods_unregister_and_free(struct file *fp,
+ struct SYS_MEM_MODS_INFO *p_del_mem)
+{
+ struct SYS_MEM_MODS_INFO *p_mem_info;
+
+ MODS_PRIVATE_DATA(private_data, fp);
+ struct list_head *head;
+ struct list_head *iter;
+
+ spin_lock(&private_data->lock);
+
+ head = private_data->mods_alloc_list;
+
+ list_for_each(iter, head) {
+ p_mem_info = list_entry(iter, struct SYS_MEM_MODS_INFO, list);
+
+ if (p_del_mem == p_mem_info) {
+ /* remove from the list */
+ list_del(iter);
+
+ spin_unlock(&private_data->lock);
+
+ if (p_mem_info->alloc_type !=
+ MODS_ALLOC_TYPE_NON_CONTIG) {
+ /* was a contiguous alloc */
+ mods_free_contig_sys_mem(p_mem_info);
+ } else {
+ /* was a normal, noncontiguous alloc */
+ mods_free_noncontig_sys_mem(p_mem_info);
+ }
+
+ /* free our data struct that keeps track of this
+ * allocation */
+ MODS_KFREE(p_mem_info, sizeof(*p_mem_info));
+
+ return;
+ }
+ }
+
+ spin_unlock(&private_data->lock);
+
+ mods_error_printk(
+ "mods_unregister_and_free: can't unregister allocation");
+}
+
+/********************
+ * PUBLIC FUNCTIONS *
+ ********************/
+void mods_init_mem(void)
+{
+ km_root = RB_ROOT;
+ spin_lock_init(&km_lock);
+ km_usage = 0;
+}
+
+/* implements mods kmalloc */
+void mods_add_mem(void *addr, NvU32 size, const char *file, NvU32 line)
+{
+ struct mem_tracker *mem_t;
+ unsigned long __eflags;
+
+ spin_lock_irqsave(&km_lock, __eflags);
+
+ km_usage += size;
+
+ MEMDBG_ALLOC(mem_t, sizeof(struct mem_tracker));
+ if (mem_t == NULL) {
+ spin_unlock_irqrestore(&km_lock, __eflags);
+ return;
+ }
+ mem_t->addr = addr;
+ mem_t->size = size;
+ mem_t->file = file;
+ mem_t->line = line;
+
+ if (!mods_insert_mem_tracker(&km_root, mem_t)) {
+ mods_error_printk(
+ "mods_add_mem already alloc'd the address\n");
+ }
+
+ spin_unlock_irqrestore(&km_lock, __eflags);
+}
+
+/* implements mods kfree */
+void mods_del_mem(void *addr, NvU32 size, const char *file, NvU32 line)
+{
+ struct rb_root *root = &km_root;
+ struct mem_tracker *pmem_t;
+ unsigned long __eflags;
+
+ spin_lock_irqsave(&km_lock, __eflags);
+
+ km_usage -= size;
+
+ pmem_t = mods_find_mem_tracker(root, addr);
+ if (pmem_t) {
+ if (pmem_t->size != size)
+ mods_error_printk(
+ "mods_del_mem size mismatch on free\n");
+ rb_erase(&pmem_t->node, root);
+ MEMDBG_FREE(pmem_t);
+ } else {
+ /* no allocation with given address */
+ mods_error_printk(
+ "mods_del_mem no allocation with given address\n");
+ }
+
+ spin_unlock_irqrestore(&km_lock, __eflags);
+}
+
+void mods_check_mem(void)
+{
+ if (km_usage != 0) {
+ mods_warning_printk("memory leaks detected: 0x%x bytes\n",
+ km_usage);
+ mods_list_mem();
+ mods_del_list_mem();
+ }
+}
+
+void mods_unregister_all_alloc(struct file *fp)
+{
+ struct SYS_MEM_MODS_INFO *p_mem_info;
+
+ MODS_PRIVATE_DATA(private_data, fp);
+ struct list_head *head = private_data->mods_alloc_list;
+ struct list_head *iter;
+ struct list_head *tmp;
+
+ list_for_each_safe(iter, tmp, head) {
+ p_mem_info = list_entry(iter, struct SYS_MEM_MODS_INFO, list);
+ mods_unregister_and_free(fp, p_mem_info);
+ }
+}
+
+/* Returns an offset of given dma address
+ * If dma address doesn't belong to the allocation, returns ERROR
+ */
+int mods_get_alloc_offset(struct SYS_MEM_MODS_INFO *p_mem_info,
+ NvU64 dma_addr,
+ NvU32 *ret_offs)
+{
+ int i;
+ int offset = 0;
+
+ if (p_mem_info->alloc_type != MODS_ALLOC_TYPE_NON_CONTIG) {
+ if (p_mem_info->dma_addr <= dma_addr &&
+ p_mem_info->dma_addr + p_mem_info->length > dma_addr) {
+
+ *ret_offs = dma_addr - p_mem_info->dma_addr;
+ return OK;
+ }
+ } else {
+ /* Non-contiguous: one page at a time */
+ for (i = 0; i < p_mem_info->num_pages; i++) {
+ NvU64 start_addr = p_mem_info->p_page_tbl[i]->dma_addr;
+ if (start_addr <= dma_addr &&
+ start_addr + PAGE_SIZE > dma_addr) {
+
+ offset = offset + dma_addr - start_addr;
+ *ret_offs = offset;
+ return OK;
+ }
+ offset += PAGE_SIZE;
+ }
+ }
+
+ /* Physical address doesn't belong to the allocation */
+ return ERROR;
+}
+
+struct SYS_MEM_MODS_INFO *mods_find_alloc(struct file *fp, NvU64 phys_addr)
+{
+ MODS_PRIVATE_DATA(private_data, fp);
+ struct list_head *plist_head = private_data->mods_alloc_list;
+ struct list_head *plist_iter;
+ struct SYS_MEM_MODS_INFO *p_mem_info;
+ NvU32 offset;
+
+ list_for_each(plist_iter, plist_head) {
+ int ret;
+ p_mem_info = list_entry(plist_iter,
+ struct SYS_MEM_MODS_INFO,
+ list);
+ ret = mods_get_alloc_offset(p_mem_info, phys_addr, &offset);
+ if (ret == OK)
+ return p_mem_info;
+ }
+ /* physical address doesn't belong to any memory allocation */
+ return NULL;
+}
+
+/************************
+ * ESCAPE CALL FUNCTONS *
+ ************************/
+
+int esc_mods_device_alloc_pages(struct file *fp,
+ struct MODS_DEVICE_ALLOC_PAGES *p)
+{
+ struct SYS_MEM_MODS_INFO *p_mem_info;
+
+ LOG_ENT();
+
+ switch (p->attrib) {
+ case MODS_MEMORY_CACHED:
+ case MODS_MEMORY_UNCACHED:
+ case MODS_MEMORY_WRITECOMBINE:
+ break;
+
+ default:
+ mods_error_printk("invalid memory type: %u\n", p->attrib);
+ return -EINVAL;
+ }
+
+ MODS_KMALLOC(p_mem_info, sizeof(*p_mem_info));
+ if (unlikely(!p_mem_info)) {
+ LOG_EXT();
+ return -ENOMEM;
+ }
+
+ p_mem_info->alloc_type = p->contiguous ? MODS_ALLOC_TYPE_CONTIG
+ : MODS_ALLOC_TYPE_NON_CONTIG;
+ p_mem_info->cache_type = p->attrib;
+ p_mem_info->length = p->num_bytes;
+ p_mem_info->order = 0;
+ p_mem_info->k_mapping_ref_cnt = 0;
+ p_mem_info->logical_addr = 0;
+ p_mem_info->p_page_tbl = NULL;
+ p_mem_info->addr_bits = p->address_bits;
+ p_mem_info->p_page = NULL;
+ p_mem_info->num_pages =
+ (p->num_bytes >> PAGE_SHIFT) + ((p->num_bytes & ~PAGE_MASK) ? 1
+ : 0);
+
+ p_mem_info->numa_node = numa_node_id();
+#ifdef MODS_HAS_DEV_TO_NUMA_NODE
+ if (p->pci_device.bus || p->pci_device.device) {
+ unsigned int devfn = PCI_DEVFN(p->pci_device.device,
+ p->pci_device.function);
+ struct pci_dev *dev = MODS_PCI_GET_SLOT(p->pci_device.bus,
+ devfn);
+
+ if (dev == NULL)
+ return -EINVAL;
+ p_mem_info->numa_node = dev_to_node(&dev->dev);
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "esc_mods_alloc_pages affinity %x:%x.%x node %d\n",
+ p->pci_device.bus,
+ p->pci_device.device,
+ p->pci_device.function,
+ p_mem_info->numa_node);
+ }
+#endif
+
+ mods_debug_printk(
+ DEBUG_MEM_DETAILED,
+ "esc_mods_alloc_pages - alloc %d %s pages on node %d\n",
+ (int)p_mem_info->num_pages,
+ p->contiguous ? "contiguous" : "noncontiguous",
+ p_mem_info->numa_node);
+
+ p->memory_handle = 0;
+
+ if (p->contiguous) {
+ mods_alloc_contig_sys_pages(p_mem_info);
+ if ((p_mem_info->logical_addr == 0) &&
+ (p_mem_info->p_page == NULL)) {
+
+ mods_error_printk(
+ "failed to allocate %u contiguous bytes\n",
+ 1 << p_mem_info->length);
+ MODS_KFREE(p_mem_info, sizeof(*p_mem_info));
+ LOG_EXT();
+ return -ENOMEM;
+ }
+ } else {
+ mods_alloc_noncontig_sys_pages(p_mem_info);
+ if (p_mem_info->p_page_tbl == NULL) {
+ mods_error_printk(
+ "failed to alloc noncontiguous system pages\n");
+ MODS_KFREE(p_mem_info, sizeof(*p_mem_info));
+ LOG_EXT();
+ return -ENOMEM;
+ }
+ }
+
+ p->memory_handle = (NvU64) (long) p_mem_info;
+
+ /* Register the allocation of the memory */
+ mods_register_alloc(fp, p_mem_info);
+ LOG_EXT();
+ return OK;
+}
+
+int esc_mods_alloc_pages(struct file *fp, struct MODS_ALLOC_PAGES *p)
+{
+ struct MODS_DEVICE_ALLOC_PAGES dev_alloc_pages;
+ int ret;
+ LOG_ENT();
+ dev_alloc_pages.num_bytes = p->num_bytes;
+ dev_alloc_pages.contiguous = p->contiguous;
+ dev_alloc_pages.address_bits = p->address_bits;
+ dev_alloc_pages.attrib = p->attrib;
+ dev_alloc_pages.pci_device.bus = 0;
+ dev_alloc_pages.pci_device.device = 0;
+ dev_alloc_pages.pci_device.function = 0;
+ ret = esc_mods_device_alloc_pages(fp, &dev_alloc_pages);
+ if (!ret)
+ p->memory_handle = dev_alloc_pages.memory_handle;
+ LOG_EXT();
+ return ret;
+}
+
+int esc_mods_free_pages(struct file *fp, struct MODS_FREE_PAGES *p)
+{
+ LOG_ENT();
+
+ /* unregister and free the allocation of the memory */
+ mods_unregister_and_free(fp,
+ (struct SYS_MEM_MODS_INFO *) (long)
+ p->memory_handle);
+
+ LOG_EXT();
+
+ return OK;
+}
+
+int esc_mods_set_mem_type(struct file *fp, struct MODS_MEMORY_TYPE *p)
+{
+ struct SYS_MEM_MODS_INFO *p_mem_info;
+ MODS_PRIVATE_DATA(private_data, fp);
+
+ LOG_ENT();
+
+ spin_lock(&private_data->lock);
+
+ p_mem_info = mods_find_alloc(fp, p->physical_address);
+ if (p_mem_info != NULL) {
+ spin_unlock(&private_data->lock);
+ mods_error_printk(
+ "unable to change mem type of an addr which was already alloc'd!\n");
+ LOG_EXT();
+ return -EINVAL;
+ }
+
+ switch (p->type) {
+ case MODS_MEMORY_CACHED:
+ case MODS_MEMORY_UNCACHED:
+ case MODS_MEMORY_WRITECOMBINE:
+ break;
+
+ default:
+ spin_unlock(&private_data->lock);
+ mods_error_printk("invalid memory type: %u\n", p->type);
+ LOG_EXT();
+ return -EINVAL;
+ }
+
+ private_data->mem_type.dma_addr = p->physical_address;
+ private_data->mem_type.size = p->size;
+ private_data->mem_type.type = p->type;
+
+ spin_unlock(&private_data->lock);
+
+ LOG_EXT();
+ return OK;
+}
+
+int esc_mods_get_phys_addr(struct file *fp, struct MODS_GET_PHYSICAL_ADDRESS *p)
+{
+ struct SYS_MEM_MODS_INFO *p_mem_info
+ = (struct SYS_MEM_MODS_INFO *)(long)p->memory_handle;
+ NvU32 page_n;
+ NvU32 page_offs;
+
+ LOG_ENT();
+
+ if (p_mem_info->alloc_type != MODS_ALLOC_TYPE_NON_CONTIG) {
+ p->physical_address = p_mem_info->dma_addr + p->offset;
+ } else {
+ page_n = p->offset >> PAGE_SHIFT;
+ page_offs = p->offset % PAGE_SIZE;
+
+ if (page_n >= p_mem_info->num_pages) {
+ mods_error_printk(
+ "get_phys_addr query exceeds allocation's boundary!\n");
+ LOG_EXT();
+ return -EINVAL;
+ }
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "esc_mods_get_phys_addr with offs=0x%x => page_n=%d, page_offs=0x%x\n",
+ (int) p->offset,
+ (int) page_n,
+ (int) page_offs);
+
+ p->physical_address =
+ p_mem_info->p_page_tbl[page_n]->dma_addr + page_offs;
+
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "esc_mods_get_phys_addr: dma_addr 0x%llx, returned phys_addr 0x%llx\n",
+ p_mem_info->p_page_tbl[page_n]->dma_addr,
+ p->physical_address);
+ }
+ LOG_EXT();
+ return OK;
+}
+
+int esc_mods_virtual_to_phys(struct file *fp,
+ struct MODS_VIRTUAL_TO_PHYSICAL *p)
+{
+ struct MODS_GET_PHYSICAL_ADDRESS get_phys_addr;
+ struct SYS_MAP_MEMORY *p_map_mem;
+ MODS_PRIVATE_DATA(private_data, fp);
+ struct list_head *head;
+ struct list_head *iter;
+ NvU32 phys_offset;
+ NvU32 virt_offset;
+ NvU32 rc;
+
+ LOG_ENT_C("virt_addr=0x%llx\n", p->virtual_address);
+
+ spin_lock(&private_data->lock);
+
+ head = private_data->mods_mapping_list;
+
+ list_for_each(iter, head) {
+ p_map_mem = list_entry(iter, struct SYS_MAP_MEMORY, list);
+
+ if (p_map_mem->virtual_addr <= p->virtual_address &&
+ p_map_mem->virtual_addr + p_map_mem->mapping_length
+ > p->virtual_address) {
+
+ virt_offset = p->virtual_address
+ - p_map_mem->virtual_addr;
+
+ if (p_map_mem->contiguous) {
+ p->physical_address = p_map_mem->dma_addr
+ + virt_offset;
+ spin_unlock(&private_data->lock);
+ LOG_EXT_C("phys: 0x%llx\n",
+ p->physical_address);
+ return OK;
+ }
+
+ /* non-contiguous */
+ if (mods_get_alloc_offset(p_map_mem->p_mem_info,
+ p_map_mem->dma_addr,
+ &phys_offset) != OK) {
+ spin_unlock(&private_data->lock);
+ return -EINVAL;
+ }
+
+ get_phys_addr.memory_handle =
+ (NvU64)(long)p_map_mem->p_mem_info;
+ get_phys_addr.offset = virt_offset + phys_offset;
+
+ spin_unlock(&private_data->lock);
+
+ rc = esc_mods_get_phys_addr(fp, &get_phys_addr);
+ if (rc != OK)
+ return rc;
+
+ p->physical_address = get_phys_addr.physical_address;
+ LOG_EXT_C("phys: 0x%llx\n", p->physical_address);
+ return OK;
+ }
+ }
+
+ spin_unlock(&private_data->lock);
+
+ mods_error_printk(
+ "esc_mods_virtual_to_phys query has invalid virt addr\n");
+ return -EINVAL;
+}
+
+int esc_mods_phys_to_virtual(struct file *fp,
+ struct MODS_PHYSICAL_TO_VIRTUAL *p)
+{
+ struct SYS_MAP_MEMORY *p_map_mem;
+ MODS_PRIVATE_DATA(private_data, fp);
+ struct list_head *head;
+ struct list_head *iter;
+ NvU32 offset;
+ NvU32 map_offset;
+
+ LOG_ENT_C("physAddr=0x%llx\n", p->physical_address);
+
+ spin_lock(&private_data->lock);
+
+ head = private_data->mods_mapping_list;
+
+ list_for_each(iter, head) {
+ p_map_mem = list_entry(iter, struct SYS_MAP_MEMORY, list);
+
+ if (p_map_mem->contiguous) {
+ if (p_map_mem->dma_addr <= p->physical_address &&
+ p_map_mem->dma_addr + p_map_mem->mapping_length
+ > p->physical_address) {
+
+ offset = p->physical_address
+ - p_map_mem->dma_addr;
+ p->virtual_address = p_map_mem->virtual_addr
+ + offset;
+ spin_unlock(&private_data->lock);
+ LOG_EXT_C("virt:0x%llx\n", p->virtual_address);
+ return OK;
+ }
+ continue;
+ }
+
+ /* non-contiguous */
+ if (mods_get_alloc_offset(p_map_mem->p_mem_info,
+ p->physical_address,
+ &offset))
+ continue;
+
+ /* offset the mapping starts from */
+ if (mods_get_alloc_offset(p_map_mem->p_mem_info,
+ p_map_mem->dma_addr,
+ &map_offset))
+ continue;
+
+ if ((map_offset <= offset) &&
+ (map_offset + p_map_mem->mapping_length > offset)) {
+
+ p->virtual_address = p_map_mem->virtual_addr + offset
+ - map_offset;
+ spin_unlock(&private_data->lock);
+ LOG_EXT_C("virt:0x%llx\n", p->virtual_address);
+ return OK;
+ }
+ }
+ spin_unlock(&private_data->lock);
+ mods_error_printk(
+ "esc_mods_virtual_to_phys query has invalid phys_addr\n");
+ return -EINVAL;
+}
+
+int esc_mods_memory_barrier(struct file *fp)
+{
+ wmb();
+ return OK;
+}
+
+#ifdef CONFIG_ARCH_TEGRA
+
+static void clear_contiguous_cache
+(
+ NvU64 virt_start,
+ NvU64 virt_end,
+ NvU64 phys_start,
+ NvU64 phys_end
+)
+{
+ /* We are expecting virt_end and phys_end to point to the first address
+ * of the next range */
+ NvU32 size = virt_end - virt_start;
+ size += (~virt_end + 1) % PAGE_SIZE; /* Align up to page boundary */
+
+#ifdef CONFIG_ARM64
+ /* Flush L1 cache */
+ __flush_dcache_area((void *)(size_t)(virt_start), size);
+#else
+ /* Flush L1 cache */
+ __cpuc_flush_dcache_area((void *)(size_t)(virt_start), size);
+
+ /* Now flush L2 cache. */
+ outer_flush_range(phys_start, phys_end);
+#endif
+}
+
+static void clear_entry_cache_mappings
+(
+ struct SYS_MAP_MEMORY *p_map_mem,
+ NvU64 virt_start,
+ NvU64 virt_end
+)
+{
+ struct SYS_MEM_MODS_INFO *p_mem_info = p_map_mem->p_mem_info;
+ NvU64 original_virt_end = virt_end;
+ NvU64 phys_start;
+ NvU64 phys_end;
+ NvU64 v_start_offset;
+ NvU64 v_end_offset;
+ NvU64 start_offset;
+ NvU64 start_page;
+ NvU64 end_offset;
+ NvU64 end_page;
+ NvU64 i;
+
+ if (NULL == p_mem_info || NULL == p_mem_info->p_page_tbl) {
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "Skipping unmapped region\n");
+ return;
+ }
+
+ if (p_mem_info->cache_type != MODS_MEMORY_CACHED) {
+ mods_debug_printk(DEBUG_MEM_DETAILED,
+ "Skipping uncached region\n");
+ return;
+ }
+
+ v_start_offset = (virt_start - p_map_mem->virtual_addr);
+ v_end_offset = (virt_end - p_map_mem->virtual_addr);
+ if (p_map_mem->contiguous) {
+ NvU64 start_addr = MODS_DMA_TO_PHYS(p_map_mem->dma_addr);
+ phys_start = start_addr + v_start_offset;
+ phys_end = start_addr + v_end_offset;
+
+ clear_contiguous_cache(virt_start,
+ virt_end,
+ phys_start,
+ phys_end);
+ return;
+ }
+
+ /* If not contiguous, go page by page clearing each page */
+ start_page = v_start_offset >> PAGE_SHIFT;
+ start_offset = v_start_offset % PAGE_SIZE;
+ end_page = v_end_offset >> PAGE_SHIFT;
+ end_offset = v_end_offset % PAGE_SIZE;
+
+ for (i = start_page; i <= end_page && i < p_mem_info->num_pages; i++) {
+ NvU64 start_addr = MODS_DMA_TO_PHYS(
+ p_mem_info->p_page_tbl[i]->dma_addr);
+ if (i == start_page) {
+ phys_start = start_addr + start_offset;
+ } else {
+ virt_start = p_map_mem->virtual_addr + (i * PAGE_SIZE);
+ phys_start = start_addr;
+ }
+
+ if (i == end_page) {
+ virt_end = original_virt_end;
+ phys_end = start_addr + end_offset;
+ } else {
+ virt_end = p_map_mem->virtual_addr
+ + ((i + 1) * PAGE_SIZE);
+ phys_end = start_addr + PAGE_SIZE;
+ }
+
+ clear_contiguous_cache(virt_start,
+ virt_end,
+ phys_start,
+ phys_end);
+ }
+}
+
+int esc_mods_flush_cpu_cache_range(struct file *fp,
+ struct MODS_FLUSH_CPU_CACHE_RANGE *p)
+{
+ MODS_PRIVATE_DATA(private_data, fp);
+ struct list_head *head;
+ struct list_head *iter;
+
+ if (irqs_disabled() || in_interrupt() ||
+ p->virt_addr_start > p->virt_addr_end ||
+ MODS_INVALIDATE_CPU_CACHE == p->flags) {
+
+ mods_debug_printk(DEBUG_MEM_DETAILED, "cannot clear cache\n");
+ return ~EINVAL;
+ }
+
+ spin_lock(&private_data->lock);
+
+ head = private_data->mods_mapping_list;
+
+ list_for_each(iter, head) {
+ struct SYS_MAP_MEMORY *p_map_mem
+ = list_entry(iter, struct SYS_MAP_MEMORY, list);
+
+ NvU64 mapped_va = p_map_mem->virtual_addr;
+
+ /* Note: mapping end points to the first address of next range*/
+ NvU64 mapping_end = mapped_va + p_map_mem->mapping_length;
+
+ int start_on_page = p->virt_addr_start >= mapped_va
+ && p->virt_addr_start < mapping_end;
+ int start_before_page = p->virt_addr_start < mapped_va;
+ int end_on_page = p->virt_addr_end >= mapped_va
+ && p->virt_addr_end < mapping_end;
+ int end_after_page = p->virt_addr_end >= mapping_end;
+ NvU64 virt_start = p->virt_addr_start;
+
+ /* Kernel expects end to point to the first address of next
+ * range */
+ NvU64 virt_end = p->virt_addr_end + 1;
+
+ if ((start_on_page || start_before_page)
+ && (end_on_page || end_after_page)) {
+
+ if (!start_on_page)
+ virt_start = p_map_mem->virtual_addr;
+ if (!end_on_page)
+ virt_end = mapping_end;
+ clear_entry_cache_mappings(p_map_mem,
+ virt_start,
+ virt_end);
+ }
+ }
+ spin_unlock(&private_data->lock);
+ return OK;
+}
+
+#endif
+
+static int mods_post_alloc(NvU64 vaddr,
+ NvU64 paddr,
+ NvU64 pages,
+ NvU32 cachetype)
+{
+#if defined(CONFIG_ARCH_TEGRA) && !defined(CONFIG_CPA) &&\
+ !defined(CONFIG_ARCH_TEGRA_3x_SOC)
+ NvU64 size = pages * PAGE_SIZE;
+ clear_contiguous_cache(vaddr,
+ vaddr + size,
+ paddr,
+ paddr + size);
+ return 0;
+#else
+ return mods_set_mem_type(vaddr, pages, cachetype);
+#endif
+}
diff --git a/drivers/misc/mods/mods_pci.c b/drivers/misc/mods/mods_pci.c
new file mode 100644
index 000000000000..705058a625cd
--- /dev/null
+++ b/drivers/misc/mods/mods_pci.c
@@ -0,0 +1,265 @@
+/*
+ * mods_pci.c - This file is part of NVIDIA MODS kernel driver.
+ *
+ * Copyright (c) 2008-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * NVIDIA MODS kernel driver is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * NVIDIA MODS kernel driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with NVIDIA MODS kernel driver.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mods_internal.h"
+
+#include <linux/io.h>
+
+/************************
+ * PCI ESCAPE FUNCTIONS *
+ ************************/
+
+int esc_mods_find_pci_dev(struct file *pfile, struct MODS_FIND_PCI_DEVICE *p)
+{
+ struct pci_dev *dev;
+ int index = 0;
+
+ mods_debug_printk(DEBUG_PCICFG,
+ "find pci dev %04x:%04x, index %d\n",
+ (int) p->vendor_id,
+ (int) p->device_id,
+ (int) p->index);
+
+ dev = pci_get_device(p->vendor_id, p->device_id, NULL);
+
+ while (dev) {
+ if (index == p->index) {
+ p->bus_number = dev->bus->number;
+ p->device_number = PCI_SLOT(dev->devfn);
+ p->function_number = PCI_FUNC(dev->devfn);
+ return OK;
+ }
+ dev = pci_get_device(p->vendor_id, p->device_id, dev);
+ index++;
+ }
+
+ return -EINVAL;
+}
+
+int esc_mods_find_pci_class_code(struct file *pfile,
+ struct MODS_FIND_PCI_CLASS_CODE *p)
+{
+ struct pci_dev *dev;
+ int index = 0;
+
+ mods_debug_printk(DEBUG_PCICFG, "find pci class code %04x, index %d\n",
+ (int) p->class_code, (int) p->index);
+
+ dev = pci_get_class(p->class_code, NULL);
+
+ while (dev) {
+ if (index == p->index) {
+ p->bus_number = dev->bus->number;
+ p->device_number = PCI_SLOT(dev->devfn);
+ p->function_number = PCI_FUNC(dev->devfn);
+ return OK;
+ }
+ dev = pci_get_class(p->class_code, dev);
+ index++;
+ }
+
+ return -EINVAL;
+}
+
+int esc_mods_pci_read(struct file *pfile, struct MODS_PCI_READ *p)
+{
+ struct pci_dev *dev;
+ unsigned int devfn;
+
+ devfn = PCI_DEVFN(p->device_number, p->function_number);
+ dev = MODS_PCI_GET_SLOT(p->bus_number, devfn);
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ mods_debug_printk(DEBUG_PCICFG,
+ "pci read %x:%02x.%x, addr 0x%04x, size %d\n",
+ (int) p->bus_number, (int) p->device_number,
+ (int) p->function_number, (int) p->address,
+ (int) p->data_size);
+
+ p->data = 0;
+ switch (p->data_size) {
+ case 1:
+ pci_read_config_byte(dev, p->address, (u8 *) &p->data);
+ break;
+ case 2:
+ pci_read_config_word(dev, p->address, (u16 *) &p->data);
+ break;
+ case 4:
+ pci_read_config_dword(dev, p->address, (u32 *) &p->data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return OK;
+}
+
+int esc_mods_pci_write(struct file *pfile, struct MODS_PCI_WRITE *p)
+{
+ struct pci_dev *dev;
+ unsigned int devfn;
+
+ mods_debug_printk(DEBUG_PCICFG,
+ "pci write %x:%02x.%x, addr 0x%04x, size %d, "
+ "data 0x%x\n",
+ (int) p->bus_number, (int) p->device_number,
+ (int) p->function_number,
+ (int) p->address, (int) p->data_size, (int) p->data);
+
+ devfn = PCI_DEVFN(p->device_number, p->function_number);
+ dev = MODS_PCI_GET_SLOT(p->bus_number, devfn);
+
+ if (dev == NULL) {
+ mods_error_printk(
+ "pci write to %x:%02x.%x, addr 0x%04x, size %d failed\n",
+ (unsigned)p->bus_number,
+ (unsigned)p->device_number,
+ (unsigned)p->function_number,
+ (unsigned)p->address,
+ (int)p->data_size);
+ return -EINVAL;
+ }
+
+ switch (p->data_size) {
+ case 1:
+ pci_write_config_byte(dev, p->address, p->data);
+ break;
+ case 2:
+ pci_write_config_word(dev, p->address, p->data);
+ break;
+ case 4:
+ pci_write_config_dword(dev, p->address, p->data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return OK;
+}
+
+int esc_mods_pci_bus_add_dev(struct file *pfile,
+ struct MODS_PCI_BUS_ADD_DEVICES *scan)
+{
+#if defined(CONFIG_PCI)
+ mods_info_printk("scanning pci bus %x\n", scan->bus);
+
+ /* initiate a PCI bus scan to find hotplugged PCI devices in domain 0 */
+ pci_scan_child_bus(pci_find_bus(0, scan->bus));
+
+ /* add newly found devices */
+ pci_bus_add_devices(pci_find_bus(0, scan->bus));
+
+ return OK;
+#else
+ return -EINVAL;
+#endif
+}
+
+/************************
+ * PIO ESCAPE FUNCTIONS *
+ ************************/
+
+int esc_mods_pio_read(struct file *pfile, struct MODS_PIO_READ *p)
+{
+ LOG_ENT();
+ switch (p->data_size) {
+ case 1:
+ p->data = inb(p->port);
+ break;
+ case 2:
+ p->data = inw(p->port);
+ break;
+ case 4:
+ p->data = inl(p->port);
+ break;
+ default:
+ return -EINVAL;
+ }
+ LOG_EXT();
+ return OK;
+}
+
+int esc_mods_pio_write(struct file *pfile, struct MODS_PIO_WRITE *p)
+{
+ LOG_ENT();
+ switch (p->data_size) {
+ case 1:
+ outb(p->data, p->port);
+ break;
+ case 2:
+ outw(p->data, p->port);
+ break;
+ case 4:
+ outl(p->data, p->port);
+ break;
+ default:
+ return -EINVAL;
+ }
+ LOG_EXT();
+ return OK;
+}
+
+int esc_mods_device_numa_info(struct file *fp, struct MODS_DEVICE_NUMA_INFO *p)
+{
+#ifdef MODS_HAS_WC
+ unsigned int devfn = PCI_DEVFN(p->pci_device.device,
+ p->pci_device.function);
+ struct pci_dev *dev = MODS_PCI_GET_SLOT(p->pci_device.bus, devfn);
+
+ LOG_ENT();
+
+ if (dev == NULL) {
+ mods_error_printk("PCI device %u:%u.%u not found\n",
+ p->pci_device.bus, p->pci_device.device,
+ p->pci_device.function);
+ LOG_EXT();
+ return -EINVAL;
+ }
+
+ p->node = dev_to_node(&dev->dev);
+ if (-1 != p->node) {
+ const unsigned long *maskp
+ = cpumask_bits(cpumask_of_node(p->node));
+ unsigned int i, word, bit, maskidx;
+
+ if (((nr_cpumask_bits + 31) / 32) > MAX_CPU_MASKS) {
+ mods_error_printk("too many CPUs (%d) for mask bits\n",
+ nr_cpumask_bits);
+ LOG_EXT();
+ return -EINVAL;
+ }
+
+ for (i = 0, maskidx = 0;
+ i < nr_cpumask_bits;
+ i += 32, maskidx++) {
+ word = i / BITS_PER_LONG;
+ bit = i % BITS_PER_LONG;
+ p->node_cpu_mask[maskidx]
+ = (maskp[word] >> bit) & 0xFFFFFFFFUL;
+ }
+ }
+ p->node_count = num_possible_nodes();
+ p->cpu_count = num_possible_cpus();
+
+ LOG_EXT();
+ return OK;
+#else
+ return -EINVAL;
+#endif
+}