summaryrefslogtreecommitdiff
path: root/drivers/gpu/ipu-v3
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 11:32:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 11:32:30 -0700
commit682b7c1c8ea8885aa681ddf530d6cf2ad4f2dc15 (patch)
tree882003bb4fc56af816246168f8c85d6dde8c6ed9 /drivers/gpu/ipu-v3
parent16b9057804c02e2d351e9c8f606e909b43cbd9e7 (diff)
parentbc1dfff04a5d4064ba0db1fab13f84ab4f333d2b (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main drm merge window pull request, changes all over the place, mostly normal levels of churn. Highlights: Core drm: More cleanups, fix race on connector/encoder naming, docs updates, object locking rework in prep for atomic modeset i915: mipi DSI support, valleyview power fixes, cursor size fixes, execlist refactoring, vblank improvements, userptr support, OOM handling improvements radeon: GPUVM tuning and large page size support, gart fixes, deep color HDMI support, HDMI audio cleanups nouveau: - displayport rework should fix lots of issues - initial gk20a support - gk110b support - gk208 fixes exynos: probe order fixes, HDMI changes, IPP consolidation msm: debugfs updates, misc fixes ast: ast2400 support, sync with UMS driver tegra: cleanups, hdmi + hw cursor for Tegra 124. panel: fixes existing panels add some new ones. ipuv3: moved from staging to drivers/gpu" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (761 commits) drm/nouveau/disp/dp: fix tmds passthrough on dp connector drm/nouveau/dp: probe dpcd to determine connectedness drm/nv50-: trigger update after all connectors disabled drm/nv50-: prepare for attaching a SOR to multiple heads drm/gf119-/disp: fix debug output on update failure drm/nouveau/disp/dp: make use of postcursor when its available drm/g94-/disp/dp: take max pullup value across all lanes drm/nouveau/bios/dp: parse lane postcursor data drm/nouveau/dp: fix support for dpms drm/nouveau: register a drm_dp_aux channel for each dp connector drm/g94-/disp: add method to power-off dp lanes drm/nouveau/disp/dp: maintain link in response to hpd signal drm/g94-/disp: bash and wait for something after changing lane power regs drm/nouveau/disp/dp: split link config/power into two steps drm/nv50/disp: train PIOR-attached DP from second supervisor drm/nouveau/disp/dp: make use of existing output data for link training drm/gf119/disp: start removing direct vbios parsing from supervisor drm/nv50/disp: start removing direct vbios parsing from supervisor drm/nouveau/disp/dp: maintain receiver caps in response to hpd signal drm/nouveau/disp/dp: create subclass for dp outputs ...
Diffstat (limited to 'drivers/gpu/ipu-v3')
-rw-r--r--drivers/gpu/ipu-v3/Kconfig7
-rw-r--r--drivers/gpu/ipu-v3/Makefile3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c1362
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c460
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c730
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c436
-rw-r--r--drivers/gpu/ipu-v3/ipu-dp.c363
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h215
-rw-r--r--drivers/gpu/ipu-v3/ipu-smfc.c97
9 files changed, 3673 insertions, 0 deletions
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
new file mode 100644
index 000000000000..2f228a2f2a48
--- /dev/null
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -0,0 +1,7 @@
+config IMX_IPUV3_CORE
+ tristate "IPUv3 core support"
+ depends on SOC_IMX5 || SOC_IMX6Q || SOC_IMX6SL || ARCH_MULTIPLATFORM
+ depends on RESET_CONTROLLER
+ help
+ Choose this if you have a i.MX5/6 system and want to use the Image
+ Processing Unit. This option only enables IPU base support.
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
new file mode 100644
index 000000000000..1887972b4ac2
--- /dev/null
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
+
+imx-ipu-v3-objs := ipu-common.o ipu-dc.o ipu-di.o ipu-dp.o ipu-dmfc.o ipu-smfc.o
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
new file mode 100644
index 000000000000..04e7b2eafbdd
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -0,0 +1,1362 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_fourcc.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->cm_reg + offset);
+}
+
+static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
+{
+ writel(value, ipu->cm_reg + offset);
+}
+
+static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
+{
+ return readl(ipu->idmac_reg + offset);
+}
+
+static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
+ unsigned offset)
+{
+ writel(value, ipu->idmac_reg + offset);
+}
+
+void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
+{
+ u32 val;
+
+ val = ipu_cm_read(ipu, IPU_SRM_PRI2);
+ val |= 0x8;
+ ipu_cm_write(ipu, val, IPU_SRM_PRI2);
+}
+EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
+
+struct ipu_ch_param __iomem *ipu_get_cpmem(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+
+ return ipu->cpmem_base + channel->num;
+}
+EXPORT_SYMBOL_GPL(ipu_get_cpmem);
+
+void ipu_cpmem_set_high_priority(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ struct ipu_ch_param __iomem *p = ipu_get_cpmem(channel);
+ u32 val;
+
+ if (ipu->ipu_type == IPUV3EX)
+ ipu_ch_param_write_field(p, IPU_FIELD_ID, 1);
+
+ val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(channel->num));
+ val |= 1 << (channel->num % 32);
+ ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(channel->num));
+};
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
+
+void ipu_ch_param_write_field(struct ipu_ch_param __iomem *base, u32 wbs, u32 v)
+{
+ u32 bit = (wbs >> 8) % 160;
+ u32 size = wbs & 0xff;
+ u32 word = (wbs >> 8) / 160;
+ u32 i = bit / 32;
+ u32 ofs = bit % 32;
+ u32 mask = (1 << size) - 1;
+ u32 val;
+
+ pr_debug("%s %d %d %d\n", __func__, word, bit , size);
+
+ val = readl(&base->word[word].data[i]);
+ val &= ~(mask << ofs);
+ val |= v << ofs;
+ writel(val, &base->word[word].data[i]);
+
+ if ((bit + size - 1) / 32 > i) {
+ val = readl(&base->word[word].data[i + 1]);
+ val &= ~(mask >> (ofs ? (32 - ofs) : 0));
+ val |= v >> (ofs ? (32 - ofs) : 0);
+ writel(val, &base->word[word].data[i + 1]);
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_ch_param_write_field);
+
+u32 ipu_ch_param_read_field(struct ipu_ch_param __iomem *base, u32 wbs)
+{
+ u32 bit = (wbs >> 8) % 160;
+ u32 size = wbs & 0xff;
+ u32 word = (wbs >> 8) / 160;
+ u32 i = bit / 32;
+ u32 ofs = bit % 32;
+ u32 mask = (1 << size) - 1;
+ u32 val = 0;
+
+ pr_debug("%s %d %d %d\n", __func__, word, bit , size);
+
+ val = (readl(&base->word[word].data[i]) >> ofs) & mask;
+
+ if ((bit + size - 1) / 32 > i) {
+ u32 tmp;
+ tmp = readl(&base->word[word].data[i + 1]);
+ tmp &= mask >> (ofs ? (32 - ofs) : 0);
+ val |= tmp << (ofs ? (32 - ofs) : 0);
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(ipu_ch_param_read_field);
+
+int ipu_cpmem_set_format_rgb(struct ipu_ch_param __iomem *p,
+ const struct ipu_rgb *rgb)
+{
+ int bpp = 0, npb = 0, ro, go, bo, to;
+
+ ro = rgb->bits_per_pixel - rgb->red.length - rgb->red.offset;
+ go = rgb->bits_per_pixel - rgb->green.length - rgb->green.offset;
+ bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset;
+ to = rgb->bits_per_pixel - rgb->transp.length - rgb->transp.offset;
+
+ ipu_ch_param_write_field(p, IPU_FIELD_WID0, rgb->red.length - 1);
+ ipu_ch_param_write_field(p, IPU_FIELD_OFS0, ro);
+ ipu_ch_param_write_field(p, IPU_FIELD_WID1, rgb->green.length - 1);
+ ipu_ch_param_write_field(p, IPU_FIELD_OFS1, go);
+ ipu_ch_param_write_field(p, IPU_FIELD_WID2, rgb->blue.length - 1);
+ ipu_ch_param_write_field(p, IPU_FIELD_OFS2, bo);
+
+ if (rgb->transp.length) {
+ ipu_ch_param_write_field(p, IPU_FIELD_WID3,
+ rgb->transp.length - 1);
+ ipu_ch_param_write_field(p, IPU_FIELD_OFS3, to);
+ } else {
+ ipu_ch_param_write_field(p, IPU_FIELD_WID3, 7);
+ ipu_ch_param_write_field(p, IPU_FIELD_OFS3,
+ rgb->bits_per_pixel);
+ }
+
+ switch (rgb->bits_per_pixel) {
+ case 32:
+ bpp = 0;
+ npb = 15;
+ break;
+ case 24:
+ bpp = 1;
+ npb = 19;
+ break;
+ case 16:
+ bpp = 3;
+ npb = 31;
+ break;
+ case 8:
+ bpp = 5;
+ npb = 63;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ipu_ch_param_write_field(p, IPU_FIELD_BPP, bpp);
+ ipu_ch_param_write_field(p, IPU_FIELD_NPB, npb);
+ ipu_ch_param_write_field(p, IPU_FIELD_PFS, 7); /* rgb mode */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_rgb);
+
+int ipu_cpmem_set_format_passthrough(struct ipu_ch_param __iomem *p,
+ int width)
+{
+ int bpp = 0, npb = 0;
+
+ switch (width) {
+ case 32:
+ bpp = 0;
+ npb = 15;
+ break;
+ case 24:
+ bpp = 1;
+ npb = 19;
+ break;
+ case 16:
+ bpp = 3;
+ npb = 31;
+ break;
+ case 8:
+ bpp = 5;
+ npb = 63;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ipu_ch_param_write_field(p, IPU_FIELD_BPP, bpp);
+ ipu_ch_param_write_field(p, IPU_FIELD_NPB, npb);
+ ipu_ch_param_write_field(p, IPU_FIELD_PFS, 6); /* raw mode */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_format_passthrough);
+
+void ipu_cpmem_set_yuv_interleaved(struct ipu_ch_param __iomem *p,
+ u32 pixel_format)
+{
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_UYVY:
+ ipu_ch_param_write_field(p, IPU_FIELD_BPP, 3); /* bits/pixel */
+ ipu_ch_param_write_field(p, IPU_FIELD_PFS, 0xA); /* pix format */
+ ipu_ch_param_write_field(p, IPU_FIELD_NPB, 31); /* burst size */
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ ipu_ch_param_write_field(p, IPU_FIELD_BPP, 3); /* bits/pixel */
+ ipu_ch_param_write_field(p, IPU_FIELD_PFS, 0x8); /* pix format */
+ ipu_ch_param_write_field(p, IPU_FIELD_NPB, 31); /* burst size */
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
+
+void ipu_cpmem_set_yuv_planar_full(struct ipu_ch_param __iomem *p,
+ u32 pixel_format, int stride, int u_offset, int v_offset)
+{
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_YUV420:
+ ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
+ ipu_ch_param_write_field(p, IPU_FIELD_UBO, u_offset / 8);
+ ipu_ch_param_write_field(p, IPU_FIELD_VBO, v_offset / 8);
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ ipu_ch_param_write_field(p, IPU_FIELD_SLUV, (stride / 2) - 1);
+ ipu_ch_param_write_field(p, IPU_FIELD_UBO, v_offset / 8);
+ ipu_ch_param_write_field(p, IPU_FIELD_VBO, u_offset / 8);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
+
+void ipu_cpmem_set_yuv_planar(struct ipu_ch_param __iomem *p, u32 pixel_format,
+ int stride, int height)
+{
+ int u_offset, v_offset;
+ int uv_stride = 0;
+
+ switch (pixel_format) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ uv_stride = stride / 2;
+ u_offset = stride * height;
+ v_offset = u_offset + (uv_stride * height / 2);
+ ipu_cpmem_set_yuv_planar_full(p, pixel_format, stride,
+ u_offset, v_offset);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
+
+static const struct ipu_rgb def_rgb_32 = {
+ .red = { .offset = 16, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 0, .length = 8, },
+ .transp = { .offset = 24, .length = 8, },
+ .bits_per_pixel = 32,
+};
+
+static const struct ipu_rgb def_bgr_32 = {
+ .red = { .offset = 0, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 16, .length = 8, },
+ .transp = { .offset = 24, .length = 8, },
+ .bits_per_pixel = 32,
+};
+
+static const struct ipu_rgb def_rgb_24 = {
+ .red = { .offset = 16, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 0, .length = 8, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 24,
+};
+
+static const struct ipu_rgb def_bgr_24 = {
+ .red = { .offset = 0, .length = 8, },
+ .green = { .offset = 8, .length = 8, },
+ .blue = { .offset = 16, .length = 8, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 24,
+};
+
+static const struct ipu_rgb def_rgb_16 = {
+ .red = { .offset = 11, .length = 5, },
+ .green = { .offset = 5, .length = 6, },
+ .blue = { .offset = 0, .length = 5, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 16,
+};
+
+static const struct ipu_rgb def_bgr_16 = {
+ .red = { .offset = 0, .length = 5, },
+ .green = { .offset = 5, .length = 6, },
+ .blue = { .offset = 11, .length = 5, },
+ .transp = { .offset = 0, .length = 0, },
+ .bits_per_pixel = 16,
+};
+
+#define Y_OFFSET(pix, x, y) ((x) + pix->width * (y))
+#define U_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * (y) / 4) + (x) / 2)
+#define V_OFFSET(pix, x, y) ((pix->width * pix->height) + \
+ (pix->width * pix->height / 4) + \
+ (pix->width * (y) / 4) + (x) / 2)
+
+int ipu_cpmem_set_fmt(struct ipu_ch_param __iomem *cpmem, u32 drm_fourcc)
+{
+ switch (drm_fourcc) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /* pix format */
+ ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 2);
+ /* burst size */
+ ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 63);
+ break;
+ case DRM_FORMAT_UYVY:
+ /* bits/pixel */
+ ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
+ /* pix format */
+ ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 0xA);
+ /* burst size */
+ ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_YUYV:
+ /* bits/pixel */
+ ipu_ch_param_write_field(cpmem, IPU_FIELD_BPP, 3);
+ /* pix format */
+ ipu_ch_param_write_field(cpmem, IPU_FIELD_PFS, 0x8);
+ /* burst size */
+ ipu_ch_param_write_field(cpmem, IPU_FIELD_NPB, 31);
+ break;
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ ipu_cpmem_set_format_rgb(cpmem, &def_bgr_32);
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ ipu_cpmem_set_format_rgb(cpmem, &def_rgb_32);
+ break;
+ case DRM_FORMAT_BGR888:
+ ipu_cpmem_set_format_rgb(cpmem, &def_bgr_24);
+ break;
+ case DRM_FORMAT_RGB888:
+ ipu_cpmem_set_format_rgb(cpmem, &def_rgb_24);
+ break;
+ case DRM_FORMAT_RGB565:
+ ipu_cpmem_set_format_rgb(cpmem, &def_rgb_16);
+ break;
+ case DRM_FORMAT_BGR565:
+ ipu_cpmem_set_format_rgb(cpmem, &def_bgr_16);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
+
+/*
+ * The V4L2 spec defines packed RGB formats in memory byte order, which from
+ * point of view of the IPU corresponds to little-endian words with the first
+ * component in the least significant bits.
+ * The DRM pixel formats and IPU internal representation are ordered the other
+ * way around, with the first named component ordered at the most significant
+ * bits. Further, V4L2 formats are not well defined:
+ * http://linuxtv.org/downloads/v4l-dvb-apis/packed-rgb.html
+ * We choose the interpretation which matches GStreamer behavior.
+ */
+static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_RGB565:
+ /*
+ * Here we choose the 'corrected' interpretation of RGBP, a
+ * little-endian 16-bit word with the red component at the most
+ * significant bits:
+ * g[2:0]b[4:0] r[4:0]g[5:3] <=> [16:0] R:G:B
+ */
+ return DRM_FORMAT_RGB565;
+ case V4L2_PIX_FMT_BGR24:
+ /* B G R <=> [24:0] R:G:B */
+ return DRM_FORMAT_RGB888;
+ case V4L2_PIX_FMT_RGB24:
+ /* R G B <=> [24:0] B:G:R */
+ return DRM_FORMAT_BGR888;
+ case V4L2_PIX_FMT_BGR32:
+ /* B G R A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_XRGB8888;
+ case V4L2_PIX_FMT_RGB32:
+ /* R G B A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_UYVY:
+ return DRM_FORMAT_UYVY;
+ case V4L2_PIX_FMT_YUYV:
+ return DRM_FORMAT_YUYV;
+ case V4L2_PIX_FMT_YUV420:
+ return DRM_FORMAT_YUV420;
+ case V4L2_PIX_FMT_YVU420:
+ return DRM_FORMAT_YVU420;
+ }
+
+ return -EINVAL;
+}
+
+enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
+{
+ switch (drm_fourcc) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ return IPUV3_COLORSPACE_RGB;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return IPUV3_COLORSPACE_YUV;
+ default:
+ return IPUV3_COLORSPACE_UNKNOWN;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
+
+int ipu_cpmem_set_image(struct ipu_ch_param __iomem *cpmem,
+ struct ipu_image *image)
+{
+ struct v4l2_pix_format *pix = &image->pix;
+ int y_offset, u_offset, v_offset;
+
+ pr_debug("%s: resolution: %dx%d stride: %d\n",
+ __func__, pix->width, pix->height,
+ pix->bytesperline);
+
+ ipu_cpmem_set_resolution(cpmem, image->rect.width,
+ image->rect.height);
+ ipu_cpmem_set_stride(cpmem, pix->bytesperline);
+
+ ipu_cpmem_set_fmt(cpmem, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat));
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ y_offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = U_OFFSET(pix, image->rect.left,
+ image->rect.top) - y_offset;
+ v_offset = V_OFFSET(pix, image->rect.left,
+ image->rect.top) - y_offset;
+
+ ipu_cpmem_set_yuv_planar_full(cpmem, pix->pixelformat,
+ pix->bytesperline, u_offset, v_offset);
+ ipu_cpmem_set_buffer(cpmem, 0, image->phys + y_offset);
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUYV:
+ ipu_cpmem_set_buffer(cpmem, 0, image->phys +
+ image->rect.left * 2 +
+ image->rect.top * image->pix.bytesperline);
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ ipu_cpmem_set_buffer(cpmem, 0, image->phys +
+ image->rect.left * 4 +
+ image->rect.top * image->pix.bytesperline);
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ ipu_cpmem_set_buffer(cpmem, 0, image->phys +
+ image->rect.left * 2 +
+ image->rect.top * image->pix.bytesperline);
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ ipu_cpmem_set_buffer(cpmem, 0, image->phys +
+ image->rect.left * 3 +
+ image->rect.top * image->pix.bytesperline);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
+
+enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUYV:
+ return IPUV3_COLORSPACE_YUV;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB565:
+ return IPUV3_COLORSPACE_RGB;
+ default:
+ return IPUV3_COLORSPACE_UNKNOWN;
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
+
+struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
+{
+ struct ipuv3_channel *channel;
+
+ dev_dbg(ipu->dev, "%s %d\n", __func__, num);
+
+ if (num > 63)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&ipu->channel_lock);
+
+ channel = &ipu->channel[num];
+
+ if (channel->busy) {
+ channel = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ channel->busy = true;
+ channel->num = num;
+
+out:
+ mutex_unlock(&ipu->channel_lock);
+
+ return channel;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_get);
+
+void ipu_idmac_put(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+
+ dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
+
+ mutex_lock(&ipu->channel_lock);
+
+ channel->busy = false;
+
+ mutex_unlock(&ipu->channel_lock);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_put);
+
+#define idma_mask(ch) (1 << (ch & 0x1f))
+
+void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
+ bool doublebuffer)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
+ if (doublebuffer)
+ reg |= idma_mask(channel->num);
+ else
+ reg &= ~idma_mask(channel->num);
+ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
+
+int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
+{
+ unsigned long lock_flags;
+ u32 val;
+
+ spin_lock_irqsave(&ipu->lock, lock_flags);
+
+ val = ipu_cm_read(ipu, IPU_DISP_GEN);
+
+ if (mask & IPU_CONF_DI0_EN)
+ val |= IPU_DI0_COUNTER_RELEASE;
+ if (mask & IPU_CONF_DI1_EN)
+ val |= IPU_DI1_COUNTER_RELEASE;
+
+ ipu_cm_write(ipu, val, IPU_DISP_GEN);
+
+ val = ipu_cm_read(ipu, IPU_CONF);
+ val |= mask;
+ ipu_cm_write(ipu, val, IPU_CONF);
+
+ spin_unlock_irqrestore(&ipu->lock, lock_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_module_enable);
+
+int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
+{
+ unsigned long lock_flags;
+ u32 val;
+
+ spin_lock_irqsave(&ipu->lock, lock_flags);
+
+ val = ipu_cm_read(ipu, IPU_CONF);
+ val &= ~mask;
+ ipu_cm_write(ipu, val, IPU_CONF);
+
+ val = ipu_cm_read(ipu, IPU_DISP_GEN);
+
+ if (mask & IPU_CONF_DI0_EN)
+ val &= ~IPU_DI0_COUNTER_RELEASE;
+ if (mask & IPU_CONF_DI1_EN)
+ val &= ~IPU_DI1_COUNTER_RELEASE;
+
+ ipu_cm_write(ipu, val, IPU_DISP_GEN);
+
+ spin_unlock_irqrestore(&ipu->lock, lock_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_module_disable);
+
+int ipu_csi_enable(struct ipu_soc *ipu, int csi)
+{
+ return ipu_module_enable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_enable);
+
+int ipu_csi_disable(struct ipu_soc *ipu, int csi)
+{
+ return ipu_module_disable(ipu, csi ? IPU_CONF_CSI1_EN : IPU_CONF_CSI0_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_csi_disable);
+
+int ipu_smfc_enable(struct ipu_soc *ipu)
+{
+ return ipu_module_enable(ipu, IPU_CONF_SMFC_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_enable);
+
+int ipu_smfc_disable(struct ipu_soc *ipu)
+{
+ return ipu_module_disable(ipu, IPU_CONF_SMFC_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_disable);
+
+int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+
+ return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
+
+void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned int chno = channel->num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ /* Mark buffer as ready. */
+ if (buf_num == 0)
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
+ else
+ ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
+
+int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
+ val |= idma_mask(channel->num);
+ ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
+
+bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
+{
+ return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
+
+int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
+ idma_mask(channel->num)) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
+
+int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
+ while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
+
+int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
+{
+ struct ipu_soc *ipu = channel->ipu;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipu->lock, flags);
+
+ /* Disable DMA channel(s) */
+ val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
+ val &= ~idma_mask(channel->num);
+ ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
+
+ /* Set channel buffers NOT to be ready */
+ ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
+
+ if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
+ idma_mask(channel->num)) {
+ ipu_cm_write(ipu, idma_mask(channel->num),
+ IPU_CHA_BUF0_RDY(channel->num));
+ }
+
+ if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
+ idma_mask(channel->num)) {
+ ipu_cm_write(ipu, idma_mask(channel->num),
+ IPU_CHA_BUF1_RDY(channel->num));
+ }
+
+ ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
+
+ /* Reset the double buffer */
+ val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
+ val &= ~idma_mask(channel->num);
+ ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
+
+ spin_unlock_irqrestore(&ipu->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
+
+static int ipu_memory_reset(struct ipu_soc *ipu)
+{
+ unsigned long timeout;
+
+ ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
+ if (time_after(jiffies, timeout))
+ return -ETIME;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+struct ipu_devtype {
+ const char *name;
+ unsigned long cm_ofs;
+ unsigned long cpmem_ofs;
+ unsigned long srm_ofs;
+ unsigned long tpm_ofs;
+ unsigned long disp0_ofs;
+ unsigned long disp1_ofs;
+ unsigned long dc_tmpl_ofs;
+ unsigned long vdi_ofs;
+ enum ipuv3_type type;
+};
+
+static struct ipu_devtype ipu_type_imx51 = {
+ .name = "IPUv3EX",
+ .cm_ofs = 0x1e000000,
+ .cpmem_ofs = 0x1f000000,
+ .srm_ofs = 0x1f040000,
+ .tpm_ofs = 0x1f060000,
+ .disp0_ofs = 0x1e040000,
+ .disp1_ofs = 0x1e048000,
+ .dc_tmpl_ofs = 0x1f080000,
+ .vdi_ofs = 0x1e068000,
+ .type = IPUV3EX,
+};
+
+static struct ipu_devtype ipu_type_imx53 = {
+ .name = "IPUv3M",
+ .cm_ofs = 0x06000000,
+ .cpmem_ofs = 0x07000000,
+ .srm_ofs = 0x07040000,
+ .tpm_ofs = 0x07060000,
+ .disp0_ofs = 0x06040000,
+ .disp1_ofs = 0x06048000,
+ .dc_tmpl_ofs = 0x07080000,
+ .vdi_ofs = 0x06068000,
+ .type = IPUV3M,
+};
+
+static struct ipu_devtype ipu_type_imx6q = {
+ .name = "IPUv3H",
+ .cm_ofs = 0x00200000,
+ .cpmem_ofs = 0x00300000,
+ .srm_ofs = 0x00340000,
+ .tpm_ofs = 0x00360000,
+ .disp0_ofs = 0x00240000,
+ .disp1_ofs = 0x00248000,
+ .dc_tmpl_ofs = 0x00380000,
+ .vdi_ofs = 0x00268000,
+ .type = IPUV3H,
+};
+
+static const struct of_device_id imx_ipu_dt_ids[] = {
+ { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
+ { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
+ { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
+
+static int ipu_submodules_init(struct ipu_soc *ipu,
+ struct platform_device *pdev, unsigned long ipu_base,
+ struct clk *ipu_clk)
+{
+ char *unit;
+ int ret;
+ struct device *dev = &pdev->dev;
+ const struct ipu_devtype *devtype = ipu->devtype;
+
+ ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
+ IPU_CONF_DI0_EN, ipu_clk);
+ if (ret) {
+ unit = "di0";
+ goto err_di_0;
+ }
+
+ ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
+ IPU_CONF_DI1_EN, ipu_clk);
+ if (ret) {
+ unit = "di1";
+ goto err_di_1;
+ }
+
+ ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
+ IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
+ if (ret) {
+ unit = "dc_template";
+ goto err_dc;
+ }
+
+ ret = ipu_dmfc_init(ipu, dev, ipu_base +
+ devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
+ if (ret) {
+ unit = "dmfc";
+ goto err_dmfc;
+ }
+
+ ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
+ if (ret) {
+ unit = "dp";
+ goto err_dp;
+ }
+
+ ret = ipu_smfc_init(ipu, dev, ipu_base +
+ devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
+ if (ret) {
+ unit = "smfc";
+ goto err_smfc;
+ }
+
+ return 0;
+
+err_smfc:
+ ipu_dp_exit(ipu);
+err_dp:
+ ipu_dmfc_exit(ipu);
+err_dmfc:
+ ipu_dc_exit(ipu);
+err_dc:
+ ipu_di_exit(ipu, 1);
+err_di_1:
+ ipu_di_exit(ipu, 0);
+err_di_0:
+ dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
+ return ret;
+}
+
+static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
+{
+ unsigned long status;
+ int i, bit, irq;
+
+ for (i = 0; i < num_regs; i++) {
+
+ status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
+ status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
+
+ for_each_set_bit(bit, &status, 32) {
+ irq = irq_linear_revmap(ipu->domain,
+ regs[i] * 32 + bit);
+ if (irq)
+ generic_handle_irq(irq);
+ }
+ }
+}
+
+static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+ const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
+ struct irq_chip *chip = irq_get_chip(irq);
+
+ chained_irq_enter(chip, desc);
+
+ ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
+
+ chained_irq_exit(chip, desc);
+}
+
+static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+ const int int_reg[] = { 4, 5, 8, 9};
+ struct irq_chip *chip = irq_get_chip(irq);
+
+ chained_irq_enter(chip, desc);
+
+ ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
+
+ chained_irq_exit(chip, desc);
+}
+
+int ipu_map_irq(struct ipu_soc *ipu, int irq)
+{
+ int virq;
+
+ virq = irq_linear_revmap(ipu->domain, irq);
+ if (!virq)
+ virq = irq_create_mapping(ipu->domain, irq);
+
+ return virq;
+}
+EXPORT_SYMBOL_GPL(ipu_map_irq);
+
+int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
+ enum ipu_channel_irq irq_type)
+{
+ return ipu_map_irq(ipu, irq_type + channel->num);
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
+
+static void ipu_submodules_exit(struct ipu_soc *ipu)
+{
+ ipu_smfc_exit(ipu);
+ ipu_dp_exit(ipu);
+ ipu_dmfc_exit(ipu);
+ ipu_dc_exit(ipu);
+ ipu_di_exit(ipu, 1);
+ ipu_di_exit(ipu, 0);
+}
+
+static int platform_remove_devices_fn(struct device *dev, void *unused)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static void platform_device_unregister_children(struct platform_device *pdev)
+{
+ device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
+}
+
+struct ipu_platform_reg {
+ struct ipu_client_platformdata pdata;
+ const char *name;
+ int reg_offset;
+};
+
+static const struct ipu_platform_reg client_reg[] = {
+ {
+ .pdata = {
+ .di = 0,
+ .dc = 5,
+ .dp = IPU_DP_FLOW_SYNC_BG,
+ .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
+ .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
+ },
+ .name = "imx-ipuv3-crtc",
+ }, {
+ .pdata = {
+ .di = 1,
+ .dc = 1,
+ .dp = -EINVAL,
+ .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
+ .dma[1] = -EINVAL,
+ },
+ .name = "imx-ipuv3-crtc",
+ }, {
+ .pdata = {
+ .csi = 0,
+ .dma[0] = IPUV3_CHANNEL_CSI0,
+ .dma[1] = -EINVAL,
+ },
+ .reg_offset = IPU_CM_CSI0_REG_OFS,
+ .name = "imx-ipuv3-camera",
+ }, {
+ .pdata = {
+ .csi = 1,
+ .dma[0] = IPUV3_CHANNEL_CSI1,
+ .dma[1] = -EINVAL,
+ },
+ .reg_offset = IPU_CM_CSI1_REG_OFS,
+ .name = "imx-ipuv3-camera",
+ },
+};
+
+static DEFINE_MUTEX(ipu_client_id_mutex);
+static int ipu_client_id;
+
+static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+{
+ struct device *dev = ipu->dev;
+ unsigned i;
+ int id, ret;
+
+ mutex_lock(&ipu_client_id_mutex);
+ id = ipu_client_id;
+ ipu_client_id += ARRAY_SIZE(client_reg);
+ mutex_unlock(&ipu_client_id_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
+ const struct ipu_platform_reg *reg = &client_reg[i];
+ struct platform_device *pdev;
+ struct resource res;
+
+ if (reg->reg_offset) {
+ memset(&res, 0, sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset;
+ res.end = res.start + PAGE_SIZE - 1;
+ pdev = platform_device_register_resndata(dev, reg->name,
+ id++, &res, 1, &reg->pdata, sizeof(reg->pdata));
+ } else {
+ pdev = platform_device_register_data(dev, reg->name,
+ id++, &reg->pdata, sizeof(reg->pdata));
+ }
+
+ if (IS_ERR(pdev))
+ goto err_register;
+ }
+
+ return 0;
+
+err_register:
+ platform_device_unregister_children(to_platform_device(dev));
+
+ return ret;
+}
+
+
+static int ipu_irq_init(struct ipu_soc *ipu)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned long unused[IPU_NUM_IRQS / 32] = {
+ 0x400100d0, 0xffe000fd,
+ 0x400100d0, 0xffe000fd,
+ 0x400100d0, 0xffe000fd,
+ 0x4077ffff, 0xffe7e1fd,
+ 0x23fffffe, 0x8880fff0,
+ 0xf98fe7d0, 0xfff81fff,
+ 0x400100d0, 0xffe000fd,
+ 0x00000000,
+ };
+ int ret, i;
+
+ ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
+ &irq_generic_chip_ops, ipu);
+ if (!ipu->domain) {
+ dev_err(ipu->dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
+
+ ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
+ handle_level_irq, 0,
+ IRQF_VALID, 0);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to alloc generic irq chips\n");
+ irq_domain_remove(ipu->domain);
+ return ret;
+ }
+
+ for (i = 0; i < IPU_NUM_IRQS; i += 32) {
+ gc = irq_get_domain_generic_chip(ipu->domain, i);
+ gc->reg_base = ipu->cm_reg;
+ gc->unused = unused[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = IPU_INT_STAT(i / 32);
+ ct->regs.mask = IPU_INT_CTRL(i / 32);
+ }
+
+ irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
+ irq_set_handler_data(ipu->irq_sync, ipu);
+ irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler);
+ irq_set_handler_data(ipu->irq_err, ipu);
+
+ return 0;
+}
+
+static void ipu_irq_exit(struct ipu_soc *ipu)
+{
+ int i, irq;
+
+ irq_set_chained_handler(ipu->irq_err, NULL);
+ irq_set_handler_data(ipu->irq_err, NULL);
+ irq_set_chained_handler(ipu->irq_sync, NULL);
+ irq_set_handler_data(ipu->irq_sync, NULL);
+
+ /* TODO: remove irq_domain_generic_chips */
+
+ for (i = 0; i < IPU_NUM_IRQS; i++) {
+ irq = irq_linear_revmap(ipu->domain, i);
+ if (irq)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(ipu->domain);
+}
+
+static int ipu_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(imx_ipu_dt_ids, &pdev->dev);
+ struct ipu_soc *ipu;
+ struct resource *res;
+ unsigned long ipu_base;
+ int i, ret, irq_sync, irq_err;
+ const struct ipu_devtype *devtype;
+
+ devtype = of_id->data;
+
+ irq_sync = platform_get_irq(pdev, 0);
+ irq_err = platform_get_irq(pdev, 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
+ irq_sync, irq_err);
+
+ if (!res || irq_sync < 0 || irq_err < 0)
+ return -ENODEV;
+
+ ipu_base = res->start;
+
+ ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
+ if (!ipu)
+ return -ENODEV;
+
+ for (i = 0; i < 64; i++)
+ ipu->channel[i].ipu = ipu;
+ ipu->devtype = devtype;
+ ipu->ipu_type = devtype->type;
+
+ spin_lock_init(&ipu->lock);
+ mutex_init(&ipu->channel_lock);
+
+ dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs);
+ dev_dbg(&pdev->dev, "idmac: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
+ dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
+ ipu_base + devtype->cpmem_ofs);
+ dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
+ ipu_base + devtype->disp0_ofs);
+ dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
+ ipu_base + devtype->disp1_ofs);
+ dev_dbg(&pdev->dev, "srm: 0x%08lx\n",
+ ipu_base + devtype->srm_ofs);
+ dev_dbg(&pdev->dev, "tpm: 0x%08lx\n",
+ ipu_base + devtype->tpm_ofs);
+ dev_dbg(&pdev->dev, "dc: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
+ dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
+ dev_dbg(&pdev->dev, "dmfc: 0x%08lx\n",
+ ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
+ dev_dbg(&pdev->dev, "vdi: 0x%08lx\n",
+ ipu_base + devtype->vdi_ofs);
+
+ ipu->cm_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->cm_ofs, PAGE_SIZE);
+ ipu->idmac_reg = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
+ PAGE_SIZE);
+ ipu->cpmem_base = devm_ioremap(&pdev->dev,
+ ipu_base + devtype->cpmem_ofs, PAGE_SIZE);
+
+ if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base)
+ return -ENOMEM;
+
+ ipu->clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(ipu->clk)) {
+ ret = PTR_ERR(ipu->clk);
+ dev_err(&pdev->dev, "clk_get failed with %d", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ipu);
+
+ ret = clk_prepare_enable(ipu->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ipu->dev = &pdev->dev;
+ ipu->irq_sync = irq_sync;
+ ipu->irq_err = irq_err;
+
+ ret = ipu_irq_init(ipu);
+ if (ret)
+ goto out_failed_irq;
+
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
+ goto out_failed_reset;
+ }
+ ret = ipu_memory_reset(ipu);
+ if (ret)
+ goto out_failed_reset;
+
+ /* Set MCU_T to divide MCU access window into 2 */
+ ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
+ IPU_DISP_GEN);
+
+ ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
+ if (ret)
+ goto failed_submodules_init;
+
+ ret = ipu_add_client_devices(ipu, ipu_base);
+ if (ret) {
+ dev_err(&pdev->dev, "adding client devices failed with %d\n",
+ ret);
+ goto failed_add_clients;
+ }
+
+ dev_info(&pdev->dev, "%s probed\n", devtype->name);
+
+ return 0;
+
+failed_add_clients:
+ ipu_submodules_exit(ipu);
+failed_submodules_init:
+out_failed_reset:
+ ipu_irq_exit(ipu);
+out_failed_irq:
+ clk_disable_unprepare(ipu->clk);
+ return ret;
+}
+
+static int ipu_remove(struct platform_device *pdev)
+{
+ struct ipu_soc *ipu = platform_get_drvdata(pdev);
+
+ platform_device_unregister_children(pdev);
+ ipu_submodules_exit(ipu);
+ ipu_irq_exit(ipu);
+
+ clk_disable_unprepare(ipu->clk);
+
+ return 0;
+}
+
+static struct platform_driver imx_ipu_driver = {
+ .driver = {
+ .name = "imx-ipuv3",
+ .of_match_table = imx_ipu_dt_ids,
+ },
+ .probe = ipu_probe,
+ .remove = ipu_remove,
+};
+
+module_platform_driver(imx_ipu_driver);
+
+MODULE_ALIAS("platform:imx-ipuv3");
+MODULE_DESCRIPTION("i.MX IPU v3 driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
new file mode 100644
index 000000000000..2326c752d89b
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -0,0 +1,460 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+#define DC_MAP_CONF_PTR(n) (0x108 + ((n) & ~0x1) * 2)
+#define DC_MAP_CONF_VAL(n) (0x144 + ((n) & ~0x1) * 2)
+
+#define DC_EVT_NF 0
+#define DC_EVT_NL 1
+#define DC_EVT_EOF 2
+#define DC_EVT_NFIELD 3
+#define DC_EVT_EOL 4
+#define DC_EVT_EOFIELD 5
+#define DC_EVT_NEW_ADDR 6
+#define DC_EVT_NEW_CHAN 7
+#define DC_EVT_NEW_DATA 8
+
+#define DC_EVT_NEW_ADDR_W_0 0
+#define DC_EVT_NEW_ADDR_W_1 1
+#define DC_EVT_NEW_CHAN_W_0 2
+#define DC_EVT_NEW_CHAN_W_1 3
+#define DC_EVT_NEW_DATA_W_0 4
+#define DC_EVT_NEW_DATA_W_1 5
+#define DC_EVT_NEW_ADDR_R_0 6
+#define DC_EVT_NEW_ADDR_R_1 7
+#define DC_EVT_NEW_CHAN_R_0 8
+#define DC_EVT_NEW_CHAN_R_1 9
+#define DC_EVT_NEW_DATA_R_0 10
+#define DC_EVT_NEW_DATA_R_1 11
+
+#define DC_WR_CH_CONF 0x0
+#define DC_WR_CH_ADDR 0x4
+#define DC_RL_CH(evt) (8 + ((evt) & ~0x1) * 2)
+
+#define DC_GEN 0xd4
+#define DC_DISP_CONF1(disp) (0xd8 + (disp) * 4)
+#define DC_DISP_CONF2(disp) (0xe8 + (disp) * 4)
+#define DC_STAT 0x1c8
+
+#define WROD(lf) (0x18 | ((lf) << 1))
+#define WRG 0x01
+#define WCLK 0xc9
+
+#define SYNC_WAVE 0
+#define NULL_WAVE (-1)
+
+#define DC_GEN_SYNC_1_6_SYNC (2 << 1)
+#define DC_GEN_SYNC_PRIORITY_1 (1 << 7)
+
+#define DC_WR_CH_CONF_WORD_SIZE_8 (0 << 0)
+#define DC_WR_CH_CONF_WORD_SIZE_16 (1 << 0)
+#define DC_WR_CH_CONF_WORD_SIZE_24 (2 << 0)
+#define DC_WR_CH_CONF_WORD_SIZE_32 (3 << 0)
+#define DC_WR_CH_CONF_DISP_ID_PARALLEL(i) (((i) & 0x1) << 3)
+#define DC_WR_CH_CONF_DISP_ID_SERIAL (2 << 3)
+#define DC_WR_CH_CONF_DISP_ID_ASYNC (3 << 4)
+#define DC_WR_CH_CONF_FIELD_MODE (1 << 9)
+#define DC_WR_CH_CONF_PROG_TYPE_NORMAL (4 << 5)
+#define DC_WR_CH_CONF_PROG_TYPE_MASK (7 << 5)
+#define DC_WR_CH_CONF_PROG_DI_ID (1 << 2)
+#define DC_WR_CH_CONF_PROG_DISP_ID(i) (((i) & 0x1) << 3)
+
+#define IPU_DC_NUM_CHANNELS 10
+
+struct ipu_dc_priv;
+
+enum ipu_dc_map {
+ IPU_DC_MAP_RGB24,
+ IPU_DC_MAP_RGB565,
+ IPU_DC_MAP_GBR24, /* TVEv2 */
+ IPU_DC_MAP_BGR666,
+ IPU_DC_MAP_LVDS666,
+ IPU_DC_MAP_BGR24,
+};
+
+struct ipu_dc {
+ /* The display interface number assigned to this dc channel */
+ unsigned int di;
+ void __iomem *base;
+ struct ipu_dc_priv *priv;
+ int chno;
+ bool in_use;
+};
+
+struct ipu_dc_priv {
+ void __iomem *dc_reg;
+ void __iomem *dc_tmpl_reg;
+ struct ipu_soc *ipu;
+ struct device *dev;
+ struct ipu_dc channels[IPU_DC_NUM_CHANNELS];
+ struct mutex mutex;
+ struct completion comp;
+ int dc_irq;
+ int dp_irq;
+};
+
+static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
+{
+ u32 reg;
+
+ reg = readl(dc->base + DC_RL_CH(event));
+ reg &= ~(0xffff << (16 * (event & 0x1)));
+ reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
+ writel(reg, dc->base + DC_RL_CH(event));
+}
+
+static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
+ int map, int wave, int glue, int sync, int stop)
+{
+ struct ipu_dc_priv *priv = dc->priv;
+ u32 reg1, reg2;
+
+ if (opcode == WCLK) {
+ reg1 = (operand << 20) & 0xfff00000;
+ reg2 = operand >> 12 | opcode << 1 | stop << 9;
+ } else if (opcode == WRG) {
+ reg1 = sync | glue << 4 | ++wave << 11 | ((operand << 15) & 0xffff8000);
+ reg2 = operand >> 17 | opcode << 7 | stop << 9;
+ } else {
+ reg1 = sync | glue << 4 | ++wave << 11 | ++map << 15 | ((operand << 20) & 0xfff00000);
+ reg2 = operand >> 12 | opcode << 4 | stop << 9;
+ }
+ writel(reg1, priv->dc_tmpl_reg + word * 8);
+ writel(reg2, priv->dc_tmpl_reg + word * 8 + 4);
+}
+
+static int ipu_pixfmt_to_map(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_RGB24:
+ return IPU_DC_MAP_RGB24;
+ case V4L2_PIX_FMT_RGB565:
+ return IPU_DC_MAP_RGB565;
+ case IPU_PIX_FMT_GBR24:
+ return IPU_DC_MAP_GBR24;
+ case V4L2_PIX_FMT_BGR666:
+ return IPU_DC_MAP_BGR666;
+ case v4l2_fourcc('L', 'V', 'D', '6'):
+ return IPU_DC_MAP_LVDS666;
+ case V4L2_PIX_FMT_BGR24:
+ return IPU_DC_MAP_BGR24;
+ default:
+ return -EINVAL;
+ }
+}
+
+int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
+ u32 pixel_fmt, u32 width)
+{
+ struct ipu_dc_priv *priv = dc->priv;
+ u32 reg = 0;
+ int map;
+
+ dc->di = ipu_di_get_num(di);
+
+ map = ipu_pixfmt_to_map(pixel_fmt);
+ if (map < 0) {
+ dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
+ return map;
+ }
+
+ if (interlaced) {
+ dc_link_event(dc, DC_EVT_NL, 0, 3);
+ dc_link_event(dc, DC_EVT_EOL, 0, 2);
+ dc_link_event(dc, DC_EVT_NEW_DATA, 0, 1);
+
+ /* Init template microcode */
+ dc_write_tmpl(dc, 0, WROD(0), 0, map, SYNC_WAVE, 0, 8, 1);
+ } else {
+ if (dc->di) {
+ dc_link_event(dc, DC_EVT_NL, 2, 3);
+ dc_link_event(dc, DC_EVT_EOL, 3, 2);
+ dc_link_event(dc, DC_EVT_NEW_DATA, 1, 1);
+ /* Init template microcode */
+ dc_write_tmpl(dc, 2, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
+ dc_write_tmpl(dc, 3, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
+ dc_write_tmpl(dc, 4, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+ dc_write_tmpl(dc, 1, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
+ } else {
+ dc_link_event(dc, DC_EVT_NL, 5, 3);
+ dc_link_event(dc, DC_EVT_EOL, 6, 2);
+ dc_link_event(dc, DC_EVT_NEW_DATA, 8, 1);
+ /* Init template microcode */
+ dc_write_tmpl(dc, 5, WROD(0), 0, map, SYNC_WAVE, 8, 5, 1);
+ dc_write_tmpl(dc, 6, WROD(0), 0, map, SYNC_WAVE, 4, 5, 0);
+ dc_write_tmpl(dc, 7, WRG, 0, map, NULL_WAVE, 0, 0, 1);
+ dc_write_tmpl(dc, 8, WROD(0), 0, map, SYNC_WAVE, 0, 5, 1);
+ }
+ }
+ dc_link_event(dc, DC_EVT_NF, 0, 0);
+ dc_link_event(dc, DC_EVT_NFIELD, 0, 0);
+ dc_link_event(dc, DC_EVT_EOF, 0, 0);
+ dc_link_event(dc, DC_EVT_EOFIELD, 0, 0);
+ dc_link_event(dc, DC_EVT_NEW_CHAN, 0, 0);
+ dc_link_event(dc, DC_EVT_NEW_ADDR, 0, 0);
+
+ reg = readl(dc->base + DC_WR_CH_CONF);
+ if (interlaced)
+ reg |= DC_WR_CH_CONF_FIELD_MODE;
+ else
+ reg &= ~DC_WR_CH_CONF_FIELD_MODE;
+ writel(reg, dc->base + DC_WR_CH_CONF);
+
+ writel(0x0, dc->base + DC_WR_CH_ADDR);
+ writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
+
+void ipu_dc_enable(struct ipu_soc *ipu)
+{
+ ipu_module_enable(ipu, IPU_CONF_DC_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_enable);
+
+void ipu_dc_enable_channel(struct ipu_dc *dc)
+{
+ int di;
+ u32 reg;
+
+ di = dc->di;
+
+ reg = readl(dc->base + DC_WR_CH_CONF);
+ reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
+ writel(reg, dc->base + DC_WR_CH_CONF);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
+
+static irqreturn_t dc_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_dc *dc = dev_id;
+ u32 reg;
+
+ reg = readl(dc->base + DC_WR_CH_CONF);
+ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ writel(reg, dc->base + DC_WR_CH_CONF);
+
+ /* The Freescale BSP kernel clears DIx_COUNTER_RELEASE here */
+
+ complete(&dc->priv->comp);
+ return IRQ_HANDLED;
+}
+
+void ipu_dc_disable_channel(struct ipu_dc *dc)
+{
+ struct ipu_dc_priv *priv = dc->priv;
+ int irq, ret;
+ u32 val;
+
+ /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */
+ if (dc->chno == 1)
+ irq = priv->dc_irq;
+ else if (dc->chno == 5)
+ irq = priv->dp_irq;
+ else
+ return;
+
+ init_completion(&priv->comp);
+ enable_irq(irq);
+ ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50));
+ disable_irq(irq);
+ if (ret <= 0) {
+ dev_warn(priv->dev, "DC stop timeout after 50 ms\n");
+
+ val = readl(dc->base + DC_WR_CH_CONF);
+ val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ writel(val, dc->base + DC_WR_CH_CONF);
+ }
+}
+EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
+
+void ipu_dc_disable(struct ipu_soc *ipu)
+{
+ ipu_module_disable(ipu, IPU_CONF_DC_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_disable);
+
+static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
+ int byte_num, int offset, int mask)
+{
+ int ptr = map * 3 + byte_num;
+ u32 reg;
+
+ reg = readl(priv->dc_reg + DC_MAP_CONF_VAL(ptr));
+ reg &= ~(0xffff << (16 * (ptr & 0x1)));
+ reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
+ writel(reg, priv->dc_reg + DC_MAP_CONF_VAL(ptr));
+
+ reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
+ reg &= ~(0x1f << ((16 * (map & 0x1)) + (5 * byte_num)));
+ reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
+ writel(reg, priv->dc_reg + DC_MAP_CONF_PTR(map));
+}
+
+static void ipu_dc_map_clear(struct ipu_dc_priv *priv, int map)
+{
+ u32 reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map));
+
+ writel(reg & ~(0xffff << (16 * (map & 0x1))),
+ priv->dc_reg + DC_MAP_CONF_PTR(map));
+}
+
+struct ipu_dc *ipu_dc_get(struct ipu_soc *ipu, int channel)
+{
+ struct ipu_dc_priv *priv = ipu->dc_priv;
+ struct ipu_dc *dc;
+
+ if (channel >= IPU_DC_NUM_CHANNELS)
+ return ERR_PTR(-ENODEV);
+
+ dc = &priv->channels[channel];
+
+ mutex_lock(&priv->mutex);
+
+ if (dc->in_use) {
+ mutex_unlock(&priv->mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ dc->in_use = true;
+
+ mutex_unlock(&priv->mutex);
+
+ return dc;
+}
+EXPORT_SYMBOL_GPL(ipu_dc_get);
+
+void ipu_dc_put(struct ipu_dc *dc)
+{
+ struct ipu_dc_priv *priv = dc->priv;
+
+ mutex_lock(&priv->mutex);
+ dc->in_use = false;
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_put);
+
+int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base, unsigned long template_base)
+{
+ struct ipu_dc_priv *priv;
+ static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c,
+ 0x78, 0, 0x94, 0xb4};
+ int i, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->mutex);
+
+ priv->dev = dev;
+ priv->ipu = ipu;
+ priv->dc_reg = devm_ioremap(dev, base, PAGE_SIZE);
+ priv->dc_tmpl_reg = devm_ioremap(dev, template_base, PAGE_SIZE);
+ if (!priv->dc_reg || !priv->dc_tmpl_reg)
+ return -ENOMEM;
+
+ for (i = 0; i < IPU_DC_NUM_CHANNELS; i++) {
+ priv->channels[i].chno = i;
+ priv->channels[i].priv = priv;
+ priv->channels[i].base = priv->dc_reg + channel_offsets[i];
+ }
+
+ priv->dc_irq = ipu_map_irq(ipu, IPU_IRQ_DC_FC_1);
+ if (!priv->dc_irq)
+ return -EINVAL;
+ ret = devm_request_irq(dev, priv->dc_irq, dc_irq_handler, 0, NULL,
+ &priv->channels[1]);
+ if (ret < 0)
+ return ret;
+ disable_irq(priv->dc_irq);
+ priv->dp_irq = ipu_map_irq(ipu, IPU_IRQ_DP_SF_END);
+ if (!priv->dp_irq)
+ return -EINVAL;
+ ret = devm_request_irq(dev, priv->dp_irq, dc_irq_handler, 0, NULL,
+ &priv->channels[5]);
+ if (ret < 0)
+ return ret;
+ disable_irq(priv->dp_irq);
+
+ writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
+ DC_WR_CH_CONF_PROG_DI_ID,
+ priv->channels[1].base + DC_WR_CH_CONF);
+ writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(0),
+ priv->channels[5].base + DC_WR_CH_CONF);
+
+ writel(DC_GEN_SYNC_1_6_SYNC | DC_GEN_SYNC_PRIORITY_1,
+ priv->dc_reg + DC_GEN);
+
+ ipu->dc_priv = priv;
+
+ dev_dbg(dev, "DC base: 0x%08lx template base: 0x%08lx\n",
+ base, template_base);
+
+ /* rgb24 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_RGB24);
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 0, 7, 0xff); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 1, 15, 0xff); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB24, 2, 23, 0xff); /* red */
+
+ /* rgb565 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_RGB565);
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 0, 4, 0xf8); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 1, 10, 0xfc); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_RGB565, 2, 15, 0xf8); /* red */
+
+ /* gbr24 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_GBR24);
+ ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 2, 15, 0xff); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 1, 7, 0xff); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_GBR24, 0, 23, 0xff); /* red */
+
+ /* bgr666 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_BGR666);
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 0, 5, 0xfc); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 1, 11, 0xfc); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 2, 17, 0xfc); /* red */
+
+ /* lvds666 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_LVDS666);
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 0, 5, 0xfc); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 1, 13, 0xfc); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 2, 21, 0xfc); /* red */
+
+ /* bgr24 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_BGR24);
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 2, 7, 0xff); /* red */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */
+
+ return 0;
+}
+
+void ipu_dc_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
new file mode 100644
index 000000000000..c490ba4384fc
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+struct ipu_di {
+ void __iomem *base;
+ int id;
+ u32 module;
+ struct clk *clk_di; /* display input clock */
+ struct clk *clk_ipu; /* IPU bus clock */
+ struct clk *clk_di_pixel; /* resulting pixel clock */
+ bool inuse;
+ struct ipu_soc *ipu;
+};
+
+static DEFINE_MUTEX(di_mutex);
+
+struct di_sync_config {
+ int run_count;
+ int run_src;
+ int offset_count;
+ int offset_src;
+ int repeat_count;
+ int cnt_clr_src;
+ int cnt_polarity_gen_en;
+ int cnt_polarity_clr_src;
+ int cnt_polarity_trigger_src;
+ int cnt_up;
+ int cnt_down;
+};
+
+enum di_pins {
+ DI_PIN11 = 0,
+ DI_PIN12 = 1,
+ DI_PIN13 = 2,
+ DI_PIN14 = 3,
+ DI_PIN15 = 4,
+ DI_PIN16 = 5,
+ DI_PIN17 = 6,
+ DI_PIN_CS = 7,
+
+ DI_PIN_SER_CLK = 0,
+ DI_PIN_SER_RS = 1,
+};
+
+enum di_sync_wave {
+ DI_SYNC_NONE = 0,
+ DI_SYNC_CLK = 1,
+ DI_SYNC_INT_HSYNC = 2,
+ DI_SYNC_HSYNC = 3,
+ DI_SYNC_VSYNC = 4,
+ DI_SYNC_DE = 6,
+};
+
+#define SYNC_WAVE 0
+
+#define DI_GENERAL 0x0000
+#define DI_BS_CLKGEN0 0x0004
+#define DI_BS_CLKGEN1 0x0008
+#define DI_SW_GEN0(gen) (0x000c + 4 * ((gen) - 1))
+#define DI_SW_GEN1(gen) (0x0030 + 4 * ((gen) - 1))
+#define DI_STP_REP(gen) (0x0148 + 4 * (((gen) - 1)/2))
+#define DI_SYNC_AS_GEN 0x0054
+#define DI_DW_GEN(gen) (0x0058 + 4 * (gen))
+#define DI_DW_SET(gen, set) (0x0088 + 4 * ((gen) + 0xc * (set)))
+#define DI_SER_CONF 0x015c
+#define DI_SSC 0x0160
+#define DI_POL 0x0164
+#define DI_AW0 0x0168
+#define DI_AW1 0x016c
+#define DI_SCR_CONF 0x0170
+#define DI_STAT 0x0174
+
+#define DI_SW_GEN0_RUN_COUNT(x) ((x) << 19)
+#define DI_SW_GEN0_RUN_SRC(x) ((x) << 16)
+#define DI_SW_GEN0_OFFSET_COUNT(x) ((x) << 3)
+#define DI_SW_GEN0_OFFSET_SRC(x) ((x) << 0)
+
+#define DI_SW_GEN1_CNT_POL_GEN_EN(x) ((x) << 29)
+#define DI_SW_GEN1_CNT_CLR_SRC(x) ((x) << 25)
+#define DI_SW_GEN1_CNT_POL_TRIGGER_SRC(x) ((x) << 12)
+#define DI_SW_GEN1_CNT_POL_CLR_SRC(x) ((x) << 9)
+#define DI_SW_GEN1_CNT_DOWN(x) ((x) << 16)
+#define DI_SW_GEN1_CNT_UP(x) (x)
+#define DI_SW_GEN1_AUTO_RELOAD (0x10000000)
+
+#define DI_DW_GEN_ACCESS_SIZE_OFFSET 24
+#define DI_DW_GEN_COMPONENT_SIZE_OFFSET 16
+
+#define DI_GEN_POLARITY_1 (1 << 0)
+#define DI_GEN_POLARITY_2 (1 << 1)
+#define DI_GEN_POLARITY_3 (1 << 2)
+#define DI_GEN_POLARITY_4 (1 << 3)
+#define DI_GEN_POLARITY_5 (1 << 4)
+#define DI_GEN_POLARITY_6 (1 << 5)
+#define DI_GEN_POLARITY_7 (1 << 6)
+#define DI_GEN_POLARITY_8 (1 << 7)
+#define DI_GEN_POLARITY_DISP_CLK (1 << 17)
+#define DI_GEN_DI_CLK_EXT (1 << 20)
+#define DI_GEN_DI_VSYNC_EXT (1 << 21)
+
+#define DI_POL_DRDY_DATA_POLARITY (1 << 7)
+#define DI_POL_DRDY_POLARITY_15 (1 << 4)
+
+#define DI_VSYNC_SEL_OFFSET 13
+
+static inline u32 ipu_di_read(struct ipu_di *di, unsigned offset)
+{
+ return readl(di->base + offset);
+}
+
+static inline void ipu_di_write(struct ipu_di *di, u32 value, unsigned offset)
+{
+ writel(value, di->base + offset);
+}
+
+static void ipu_di_data_wave_config(struct ipu_di *di,
+ int wave_gen,
+ int access_size, int component_size)
+{
+ u32 reg;
+ reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
+ (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
+ ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
+}
+
+static void ipu_di_data_pin_config(struct ipu_di *di, int wave_gen, int di_pin,
+ int set, int up, int down)
+{
+ u32 reg;
+
+ reg = ipu_di_read(di, DI_DW_GEN(wave_gen));
+ reg &= ~(0x3 << (di_pin * 2));
+ reg |= set << (di_pin * 2);
+ ipu_di_write(di, reg, DI_DW_GEN(wave_gen));
+
+ ipu_di_write(di, (down << 16) | up, DI_DW_SET(wave_gen, set));
+}
+
+static void ipu_di_sync_config(struct ipu_di *di, struct di_sync_config *config,
+ int start, int count)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct di_sync_config *c = &config[i];
+ int wave_gen = start + i + 1;
+
+ if ((c->run_count >= 0x1000) || (c->offset_count >= 0x1000) ||
+ (c->repeat_count >= 0x1000) ||
+ (c->cnt_up >= 0x400) ||
+ (c->cnt_down >= 0x400)) {
+ dev_err(di->ipu->dev, "DI%d counters out of range.\n",
+ di->id);
+ return;
+ }
+
+ reg = DI_SW_GEN0_RUN_COUNT(c->run_count) |
+ DI_SW_GEN0_RUN_SRC(c->run_src) |
+ DI_SW_GEN0_OFFSET_COUNT(c->offset_count) |
+ DI_SW_GEN0_OFFSET_SRC(c->offset_src);
+ ipu_di_write(di, reg, DI_SW_GEN0(wave_gen));
+
+ reg = DI_SW_GEN1_CNT_POL_GEN_EN(c->cnt_polarity_gen_en) |
+ DI_SW_GEN1_CNT_CLR_SRC(c->cnt_clr_src) |
+ DI_SW_GEN1_CNT_POL_TRIGGER_SRC(
+ c->cnt_polarity_trigger_src) |
+ DI_SW_GEN1_CNT_POL_CLR_SRC(c->cnt_polarity_clr_src) |
+ DI_SW_GEN1_CNT_DOWN(c->cnt_down) |
+ DI_SW_GEN1_CNT_UP(c->cnt_up);
+
+ /* Enable auto reload */
+ if (c->repeat_count == 0)
+ reg |= DI_SW_GEN1_AUTO_RELOAD;
+
+ ipu_di_write(di, reg, DI_SW_GEN1(wave_gen));
+
+ reg = ipu_di_read(di, DI_STP_REP(wave_gen));
+ reg &= ~(0xffff << (16 * ((wave_gen - 1) & 0x1)));
+ reg |= c->repeat_count << (16 * ((wave_gen - 1) & 0x1));
+ ipu_di_write(di, reg, DI_STP_REP(wave_gen));
+ }
+}
+
+static void ipu_di_sync_config_interlaced(struct ipu_di *di,
+ struct ipu_di_signal_cfg *sig)
+{
+ u32 h_total = sig->width + sig->h_sync_width +
+ sig->h_start_width + sig->h_end_width;
+ u32 v_total = sig->height + sig->v_sync_width +
+ sig->v_start_width + sig->v_end_width;
+ u32 reg;
+ struct di_sync_config cfg[] = {
+ {
+ .run_count = h_total / 2 - 1,
+ .run_src = DI_SYNC_CLK,
+ }, {
+ .run_count = h_total - 11,
+ .run_src = DI_SYNC_CLK,
+ .cnt_down = 4,
+ }, {
+ .run_count = v_total * 2 - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = 1,
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .cnt_down = 4,
+ }, {
+ .run_count = v_total / 2 - 1,
+ .run_src = DI_SYNC_HSYNC,
+ .offset_count = sig->v_start_width,
+ .offset_src = DI_SYNC_HSYNC,
+ .repeat_count = 2,
+ .cnt_clr_src = DI_SYNC_VSYNC,
+ }, {
+ .run_src = DI_SYNC_HSYNC,
+ .repeat_count = sig->height / 2,
+ .cnt_clr_src = 4,
+ }, {
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_HSYNC,
+ }, {
+ .run_count = v_total / 2 - 1,
+ .run_src = DI_SYNC_HSYNC,
+ .offset_count = 9,
+ .offset_src = DI_SYNC_HSYNC,
+ .repeat_count = 2,
+ .cnt_clr_src = DI_SYNC_VSYNC,
+ }, {
+ .run_src = DI_SYNC_CLK,
+ .offset_count = sig->h_start_width,
+ .offset_src = DI_SYNC_CLK,
+ .repeat_count = sig->width,
+ .cnt_clr_src = 5,
+ }, {
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = v_total / 2,
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .cnt_clr_src = DI_SYNC_HSYNC,
+ .cnt_down = 4,
+ }
+ };
+
+ ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
+
+ /* set gentime select and tag sel */
+ reg = ipu_di_read(di, DI_SW_GEN1(9));
+ reg &= 0x1FFFFFFF;
+ reg |= (3 - 1) << 29 | 0x00008000;
+ ipu_di_write(di, reg, DI_SW_GEN1(9));
+
+ ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF);
+}
+
+static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
+ struct ipu_di_signal_cfg *sig, int div)
+{
+ u32 h_total = sig->width + sig->h_sync_width + sig->h_start_width +
+ sig->h_end_width;
+ u32 v_total = sig->height + sig->v_sync_width + sig->v_start_width +
+ sig->v_end_width;
+ struct di_sync_config cfg[] = {
+ {
+ /* 1: INT_HSYNC */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ } , {
+ /* PIN2: HSYNC */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ .offset_count = div * sig->v_to_h_sync,
+ .offset_src = DI_SYNC_CLK,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_CLK,
+ .cnt_down = sig->h_sync_width * 2,
+ } , {
+ /* PIN3: VSYNC */
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+ .cnt_down = sig->v_sync_width * 2,
+ } , {
+ /* 4: Line Active */
+ .run_src = DI_SYNC_HSYNC,
+ .offset_count = sig->v_sync_width + sig->v_start_width,
+ .offset_src = DI_SYNC_HSYNC,
+ .repeat_count = sig->height,
+ .cnt_clr_src = DI_SYNC_VSYNC,
+ } , {
+ /* 5: Pixel Active, referenced by DC */
+ .run_src = DI_SYNC_CLK,
+ .offset_count = sig->h_sync_width + sig->h_start_width,
+ .offset_src = DI_SYNC_CLK,
+ .repeat_count = sig->width,
+ .cnt_clr_src = 5, /* Line Active */
+ } , {
+ /* unused */
+ } , {
+ /* unused */
+ } , {
+ /* unused */
+ } , {
+ /* unused */
+ },
+ };
+ /* can't use #7 and #8 for line active and pixel active counters */
+ struct di_sync_config cfg_vga[] = {
+ {
+ /* 1: INT_HSYNC */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ } , {
+ /* 2: VSYNC */
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ } , {
+ /* 3: Line Active */
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = sig->v_sync_width + sig->v_start_width,
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .repeat_count = sig->height,
+ .cnt_clr_src = 3 /* VSYNC */,
+ } , {
+ /* PIN4: HSYNC for VGA via TVEv2 on TQ MBa53 */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
+ .offset_src = DI_SYNC_CLK,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_CLK,
+ .cnt_down = sig->h_sync_width * 2,
+ } , {
+ /* 5: Pixel Active signal to DC */
+ .run_src = DI_SYNC_CLK,
+ .offset_count = sig->h_sync_width + sig->h_start_width,
+ .offset_src = DI_SYNC_CLK,
+ .repeat_count = sig->width,
+ .cnt_clr_src = 4, /* Line Active */
+ } , {
+ /* PIN6: VSYNC for VGA via TVEv2 on TQ MBa53 */
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = 1, /* magic value from Freescale TVE driver */
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+ .cnt_down = sig->v_sync_width * 2,
+ } , {
+ /* PIN4: HSYNC for VGA via TVEv2 on i.MX53-QSB */
+ .run_count = h_total - 1,
+ .run_src = DI_SYNC_CLK,
+ .offset_count = div * sig->v_to_h_sync + 18, /* magic value from Freescale TVE driver */
+ .offset_src = DI_SYNC_CLK,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_CLK,
+ .cnt_down = sig->h_sync_width * 2,
+ } , {
+ /* PIN6: VSYNC for VGA via TVEv2 on i.MX53-QSB */
+ .run_count = v_total - 1,
+ .run_src = DI_SYNC_INT_HSYNC,
+ .offset_count = 1, /* magic value from Freescale TVE driver */
+ .offset_src = DI_SYNC_INT_HSYNC,
+ .cnt_polarity_gen_en = 1,
+ .cnt_polarity_trigger_src = DI_SYNC_INT_HSYNC,
+ .cnt_down = sig->v_sync_width * 2,
+ } , {
+ /* unused */
+ },
+ };
+
+ ipu_di_write(di, v_total - 1, DI_SCR_CONF);
+ if (sig->hsync_pin == 2 && sig->vsync_pin == 3)
+ ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
+ else
+ ipu_di_sync_config(di, cfg_vga, 0, ARRAY_SIZE(cfg_vga));
+}
+
+static void ipu_di_config_clock(struct ipu_di *di,
+ const struct ipu_di_signal_cfg *sig)
+{
+ struct clk *clk;
+ unsigned clkgen0;
+ uint32_t val;
+
+ if (sig->clkflags & IPU_DI_CLKMODE_EXT) {
+ /*
+ * CLKMODE_EXT means we must use the DI clock: this is
+ * needed for things like LVDS which needs to feed the
+ * DI and LDB with the same pixel clock.
+ */
+ clk = di->clk_di;
+
+ if (sig->clkflags & IPU_DI_CLKMODE_SYNC) {
+ /*
+ * CLKMODE_SYNC means that we want the DI to be
+ * clocked at the same rate as the parent clock.
+ * This is needed (eg) for LDB which needs to be
+ * fed with the same pixel clock. We assume that
+ * the LDB clock has already been set correctly.
+ */
+ clkgen0 = 1 << 4;
+ } else {
+ /*
+ * We can use the divider. We should really have
+ * a flag here indicating whether the bridge can
+ * cope with a fractional divider or not. For the
+ * time being, let's go for simplicitly and
+ * reliability.
+ */
+ unsigned long in_rate;
+ unsigned div;
+
+ clk_set_rate(clk, sig->pixelclock);
+
+ in_rate = clk_get_rate(clk);
+ div = (in_rate + sig->pixelclock / 2) / sig->pixelclock;
+ if (div == 0)
+ div = 1;
+
+ clkgen0 = div << 4;
+ }
+ } else {
+ /*
+ * For other interfaces, we can arbitarily select between
+ * the DI specific clock and the internal IPU clock. See
+ * DI_GENERAL bit 20. We select the IPU clock if it can
+ * give us a clock rate within 1% of the requested frequency,
+ * otherwise we use the DI clock.
+ */
+ unsigned long rate, clkrate;
+ unsigned div, error;
+
+ clkrate = clk_get_rate(di->clk_ipu);
+ div = (clkrate + sig->pixelclock / 2) / sig->pixelclock;
+ rate = clkrate / div;
+
+ error = rate / (sig->pixelclock / 1000);
+
+ dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n",
+ rate, div, (signed)(error - 1000) / 10, error % 10);
+
+ /* Allow a 1% error */
+ if (error < 1010 && error >= 990) {
+ clk = di->clk_ipu;
+
+ clkgen0 = div << 4;
+ } else {
+ unsigned long in_rate;
+ unsigned div;
+
+ clk = di->clk_di;
+
+ clk_set_rate(clk, sig->pixelclock);
+
+ in_rate = clk_get_rate(clk);
+ div = (in_rate + sig->pixelclock / 2) / sig->pixelclock;
+ if (div == 0)
+ div = 1;
+
+ clkgen0 = div << 4;
+ }
+ }
+
+ di->clk_di_pixel = clk;
+
+ /* Set the divider */
+ ipu_di_write(di, clkgen0, DI_BS_CLKGEN0);
+
+ /*
+ * Set the high/low periods. Bits 24:16 give us the falling edge,
+ * and bits 8:0 give the rising edge. LSB is fraction, and is
+ * based on the divider above. We want a 50% duty cycle, so set
+ * the falling edge to be half the divider.
+ */
+ ipu_di_write(di, (clkgen0 >> 4) << 16, DI_BS_CLKGEN1);
+
+ /* Finally select the input clock */
+ val = ipu_di_read(di, DI_GENERAL) & ~DI_GEN_DI_CLK_EXT;
+ if (clk == di->clk_di)
+ val |= DI_GEN_DI_CLK_EXT;
+ ipu_di_write(di, val, DI_GENERAL);
+
+ dev_dbg(di->ipu->dev, "Want %luHz IPU %luHz DI %luHz using %s, %luHz\n",
+ sig->pixelclock,
+ clk_get_rate(di->clk_ipu),
+ clk_get_rate(di->clk_di),
+ clk == di->clk_di ? "DI" : "IPU",
+ clk_get_rate(di->clk_di_pixel) / (clkgen0 >> 4));
+}
+
+int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
+{
+ u32 reg;
+ u32 di_gen, vsync_cnt;
+ u32 div;
+ u32 h_total, v_total;
+
+ dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
+ di->id, sig->width, sig->height);
+
+ if ((sig->v_sync_width == 0) || (sig->h_sync_width == 0))
+ return -EINVAL;
+
+ h_total = sig->width + sig->h_sync_width + sig->h_start_width +
+ sig->h_end_width;
+ v_total = sig->height + sig->v_sync_width + sig->v_start_width +
+ sig->v_end_width;
+
+ dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
+ clk_get_rate(di->clk_ipu),
+ clk_get_rate(di->clk_di),
+ sig->pixelclock);
+
+ mutex_lock(&di_mutex);
+
+ ipu_di_config_clock(di, sig);
+
+ div = ipu_di_read(di, DI_BS_CLKGEN0) & 0xfff;
+ div = div / 16; /* Now divider is integer portion */
+
+ /* Setup pixel clock timing */
+ /* Down time is half of period */
+ ipu_di_write(di, (div << 16), DI_BS_CLKGEN1);
+
+ ipu_di_data_wave_config(di, SYNC_WAVE, div - 1, div - 1);
+ ipu_di_data_pin_config(di, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
+
+ di_gen = ipu_di_read(di, DI_GENERAL) & DI_GEN_DI_CLK_EXT;
+ di_gen |= DI_GEN_DI_VSYNC_EXT;
+
+ if (sig->interlaced) {
+ ipu_di_sync_config_interlaced(di, sig);
+
+ /* set y_sel = 1 */
+ di_gen |= 0x10000000;
+ di_gen |= DI_GEN_POLARITY_5;
+ di_gen |= DI_GEN_POLARITY_8;
+
+ vsync_cnt = 7;
+
+ if (sig->Hsync_pol)
+ di_gen |= DI_GEN_POLARITY_3;
+ if (sig->Vsync_pol)
+ di_gen |= DI_GEN_POLARITY_2;
+ } else {
+ ipu_di_sync_config_noninterlaced(di, sig, div);
+
+ vsync_cnt = 3;
+ if (di->id == 1)
+ /*
+ * TODO: change only for TVEv2, parallel display
+ * uses pin 2 / 3
+ */
+ if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
+ vsync_cnt = 6;
+
+ if (sig->Hsync_pol) {
+ if (sig->hsync_pin == 2)
+ di_gen |= DI_GEN_POLARITY_2;
+ else if (sig->hsync_pin == 4)
+ di_gen |= DI_GEN_POLARITY_4;
+ else if (sig->hsync_pin == 7)
+ di_gen |= DI_GEN_POLARITY_7;
+ }
+ if (sig->Vsync_pol) {
+ if (sig->vsync_pin == 3)
+ di_gen |= DI_GEN_POLARITY_3;
+ else if (sig->vsync_pin == 6)
+ di_gen |= DI_GEN_POLARITY_6;
+ else if (sig->vsync_pin == 8)
+ di_gen |= DI_GEN_POLARITY_8;
+ }
+ }
+
+ if (sig->clk_pol)
+ di_gen |= DI_GEN_POLARITY_DISP_CLK;
+
+ ipu_di_write(di, di_gen, DI_GENERAL);
+
+ ipu_di_write(di, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) | 0x00000002,
+ DI_SYNC_AS_GEN);
+
+ reg = ipu_di_read(di, DI_POL);
+ reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
+
+ if (sig->enable_pol)
+ reg |= DI_POL_DRDY_POLARITY_15;
+ if (sig->data_pol)
+ reg |= DI_POL_DRDY_DATA_POLARITY;
+
+ ipu_di_write(di, reg, DI_POL);
+
+ mutex_unlock(&di_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
+
+int ipu_di_enable(struct ipu_di *di)
+{
+ int ret;
+
+ WARN_ON(IS_ERR(di->clk_di_pixel));
+
+ ret = clk_prepare_enable(di->clk_di_pixel);
+ if (ret)
+ return ret;
+
+ ipu_module_enable(di->ipu, di->module);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_di_enable);
+
+int ipu_di_disable(struct ipu_di *di)
+{
+ WARN_ON(IS_ERR(di->clk_di_pixel));
+
+ ipu_module_disable(di->ipu, di->module);
+
+ clk_disable_unprepare(di->clk_di_pixel);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_di_disable);
+
+int ipu_di_get_num(struct ipu_di *di)
+{
+ return di->id;
+}
+EXPORT_SYMBOL_GPL(ipu_di_get_num);
+
+static DEFINE_MUTEX(ipu_di_lock);
+
+struct ipu_di *ipu_di_get(struct ipu_soc *ipu, int disp)
+{
+ struct ipu_di *di;
+
+ if (disp > 1)
+ return ERR_PTR(-EINVAL);
+
+ di = ipu->di_priv[disp];
+
+ mutex_lock(&ipu_di_lock);
+
+ if (di->inuse) {
+ di = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ di->inuse = true;
+out:
+ mutex_unlock(&ipu_di_lock);
+
+ return di;
+}
+EXPORT_SYMBOL_GPL(ipu_di_get);
+
+void ipu_di_put(struct ipu_di *di)
+{
+ mutex_lock(&ipu_di_lock);
+
+ di->inuse = false;
+
+ mutex_unlock(&ipu_di_lock);
+}
+EXPORT_SYMBOL_GPL(ipu_di_put);
+
+int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
+ unsigned long base,
+ u32 module, struct clk *clk_ipu)
+{
+ struct ipu_di *di;
+
+ if (id > 1)
+ return -ENODEV;
+
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
+ return -ENOMEM;
+
+ ipu->di_priv[id] = di;
+
+ di->clk_di = devm_clk_get(dev, id ? "di1" : "di0");
+ if (IS_ERR(di->clk_di))
+ return PTR_ERR(di->clk_di);
+
+ di->module = module;
+ di->id = id;
+ di->clk_ipu = clk_ipu;
+ di->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!di->base)
+ return -ENOMEM;
+
+ ipu_di_write(di, 0x10, DI_BS_CLKGEN0);
+
+ dev_dbg(dev, "DI%d base: 0x%08lx remapped to %p\n",
+ id, base, di->base);
+ di->inuse = false;
+ di->ipu = ipu;
+
+ return 0;
+}
+
+void ipu_di_exit(struct ipu_soc *ipu, int id)
+{
+}
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
new file mode 100644
index 000000000000..042c3958e2a0
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+#define DMFC_RD_CHAN 0x0000
+#define DMFC_WR_CHAN 0x0004
+#define DMFC_WR_CHAN_DEF 0x0008
+#define DMFC_DP_CHAN 0x000c
+#define DMFC_DP_CHAN_DEF 0x0010
+#define DMFC_GENERAL1 0x0014
+#define DMFC_GENERAL2 0x0018
+#define DMFC_IC_CTRL 0x001c
+#define DMFC_WR_CHAN_ALT 0x0020
+#define DMFC_WR_CHAN_DEF_ALT 0x0024
+#define DMFC_DP_CHAN_ALT 0x0028
+#define DMFC_DP_CHAN_DEF_ALT 0x002c
+#define DMFC_GENERAL1_ALT 0x0030
+#define DMFC_STAT 0x0034
+
+#define DMFC_WR_CHAN_1_28 0
+#define DMFC_WR_CHAN_2_41 8
+#define DMFC_WR_CHAN_1C_42 16
+#define DMFC_WR_CHAN_2C_43 24
+
+#define DMFC_DP_CHAN_5B_23 0
+#define DMFC_DP_CHAN_5F_27 8
+#define DMFC_DP_CHAN_6B_24 16
+#define DMFC_DP_CHAN_6F_29 24
+
+#define DMFC_FIFO_SIZE_64 (3 << 3)
+#define DMFC_FIFO_SIZE_128 (2 << 3)
+#define DMFC_FIFO_SIZE_256 (1 << 3)
+#define DMFC_FIFO_SIZE_512 (0 << 3)
+
+#define DMFC_SEGMENT(x) ((x & 0x7) << 0)
+#define DMFC_BURSTSIZE_128 (0 << 6)
+#define DMFC_BURSTSIZE_64 (1 << 6)
+#define DMFC_BURSTSIZE_32 (2 << 6)
+#define DMFC_BURSTSIZE_16 (3 << 6)
+
+struct dmfc_channel_data {
+ int ipu_channel;
+ unsigned long channel_reg;
+ unsigned long shift;
+ unsigned eot_shift;
+ unsigned max_fifo_lines;
+};
+
+static const struct dmfc_channel_data dmfcdata[] = {
+ {
+ .ipu_channel = IPUV3_CHANNEL_MEM_BG_SYNC,
+ .channel_reg = DMFC_DP_CHAN,
+ .shift = DMFC_DP_CHAN_5B_23,
+ .eot_shift = 20,
+ .max_fifo_lines = 3,
+ }, {
+ .ipu_channel = 24,
+ .channel_reg = DMFC_DP_CHAN,
+ .shift = DMFC_DP_CHAN_6B_24,
+ .eot_shift = 22,
+ .max_fifo_lines = 1,
+ }, {
+ .ipu_channel = IPUV3_CHANNEL_MEM_FG_SYNC,
+ .channel_reg = DMFC_DP_CHAN,
+ .shift = DMFC_DP_CHAN_5F_27,
+ .eot_shift = 21,
+ .max_fifo_lines = 2,
+ }, {
+ .ipu_channel = IPUV3_CHANNEL_MEM_DC_SYNC,
+ .channel_reg = DMFC_WR_CHAN,
+ .shift = DMFC_WR_CHAN_1_28,
+ .eot_shift = 16,
+ .max_fifo_lines = 2,
+ }, {
+ .ipu_channel = 29,
+ .channel_reg = DMFC_DP_CHAN,
+ .shift = DMFC_DP_CHAN_6F_29,
+ .eot_shift = 23,
+ .max_fifo_lines = 1,
+ },
+};
+
+#define DMFC_NUM_CHANNELS ARRAY_SIZE(dmfcdata)
+
+struct ipu_dmfc_priv;
+
+struct dmfc_channel {
+ unsigned slots;
+ unsigned slotmask;
+ unsigned segment;
+ int burstsize;
+ struct ipu_soc *ipu;
+ struct ipu_dmfc_priv *priv;
+ const struct dmfc_channel_data *data;
+};
+
+struct ipu_dmfc_priv {
+ struct ipu_soc *ipu;
+ struct device *dev;
+ struct dmfc_channel channels[DMFC_NUM_CHANNELS];
+ struct mutex mutex;
+ unsigned long bandwidth_per_slot;
+ void __iomem *base;
+ int use_count;
+};
+
+int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ mutex_lock(&priv->mutex);
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_DMFC_EN);
+
+ priv->use_count++;
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
+
+static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(priv->dev,
+ "Timeout waiting for DMFC FIFOs to clear\n");
+ break;
+ }
+ cpu_relax();
+ }
+}
+
+void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+
+ mutex_lock(&priv->mutex);
+
+ priv->use_count--;
+
+ if (!priv->use_count) {
+ ipu_dmfc_wait_fifos(priv);
+ ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
+ }
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
+
+static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
+ int segment, int burstsize)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ u32 val, field;
+
+ dev_dbg(priv->dev,
+ "dmfc: using %d slots starting from segment %d for IPU channel %d\n",
+ slots, segment, dmfc->data->ipu_channel);
+
+ switch (slots) {
+ case 1:
+ field = DMFC_FIFO_SIZE_64;
+ break;
+ case 2:
+ field = DMFC_FIFO_SIZE_128;
+ break;
+ case 4:
+ field = DMFC_FIFO_SIZE_256;
+ break;
+ case 8:
+ field = DMFC_FIFO_SIZE_512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (burstsize) {
+ case 16:
+ field |= DMFC_BURSTSIZE_16;
+ break;
+ case 32:
+ field |= DMFC_BURSTSIZE_32;
+ break;
+ case 64:
+ field |= DMFC_BURSTSIZE_64;
+ break;
+ case 128:
+ field |= DMFC_BURSTSIZE_128;
+ break;
+ }
+
+ field |= DMFC_SEGMENT(segment);
+
+ val = readl(priv->base + dmfc->data->channel_reg);
+
+ val &= ~(0xff << dmfc->data->shift);
+ val |= field << dmfc->data->shift;
+
+ writel(val, priv->base + dmfc->data->channel_reg);
+
+ dmfc->slots = slots;
+ dmfc->segment = segment;
+ dmfc->burstsize = burstsize;
+ dmfc->slotmask = ((1 << slots) - 1) << segment;
+
+ return 0;
+}
+
+static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv,
+ unsigned long bandwidth)
+{
+ int slots = 1;
+
+ while (slots * priv->bandwidth_per_slot < bandwidth)
+ slots *= 2;
+
+ return slots;
+}
+
+static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots)
+{
+ unsigned slotmask_need, slotmask_used = 0;
+ int i, segment = 0;
+
+ slotmask_need = (1 << slots) - 1;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++)
+ slotmask_used |= priv->channels[i].slotmask;
+
+ while (slotmask_need <= 0xff) {
+ if (!(slotmask_used & slotmask_need))
+ return segment;
+
+ slotmask_need <<= 1;
+ segment++;
+ }
+
+ return -EBUSY;
+}
+
+void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ int i;
+
+ dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n",
+ dmfc->slots, dmfc->segment);
+
+ mutex_lock(&priv->mutex);
+
+ if (!dmfc->slots)
+ goto out;
+
+ dmfc->slotmask = 0;
+ dmfc->slots = 0;
+ dmfc->segment = 0;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++)
+ priv->channels[i].slotmask = 0;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
+ if (priv->channels[i].slots > 0) {
+ priv->channels[i].segment =
+ dmfc_find_slots(priv, priv->channels[i].slots);
+ priv->channels[i].slotmask =
+ ((1 << priv->channels[i].slots) - 1) <<
+ priv->channels[i].segment;
+ }
+ }
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
+ if (priv->channels[i].slots > 0)
+ ipu_dmfc_setup_channel(&priv->channels[i],
+ priv->channels[i].slots,
+ priv->channels[i].segment,
+ priv->channels[i].burstsize);
+ }
+out:
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth);
+
+int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
+ unsigned long bandwidth_pixel_per_second, int burstsize)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
+ int segment = -1, ret = 0;
+
+ dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
+ bandwidth_pixel_per_second / 1000000,
+ dmfc->data->ipu_channel);
+
+ ipu_dmfc_free_bandwidth(dmfc);
+
+ mutex_lock(&priv->mutex);
+
+ if (slots > 8) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* For the MEM_BG channel, first try to allocate twice the slots */
+ if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
+ segment = dmfc_find_slots(priv, slots * 2);
+ else if (slots < 2)
+ /* Always allocate at least 128*4 bytes (2 slots) */
+ slots = 2;
+
+ if (segment >= 0)
+ slots *= 2;
+ else
+ segment = dmfc_find_slots(priv, slots);
+ if (segment < 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize);
+
+out:
+ mutex_unlock(&priv->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
+
+int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
+{
+ struct ipu_dmfc_priv *priv = dmfc->priv;
+ u32 dmfc_gen1;
+
+ dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
+
+ if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
+ dmfc_gen1 |= 1 << dmfc->data->eot_shift;
+ else
+ dmfc_gen1 &= ~(1 << dmfc->data->eot_shift);
+
+ writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel);
+
+struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
+{
+ struct ipu_dmfc_priv *priv = ipu->dmfc_priv;
+ int i;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++)
+ if (dmfcdata[i].ipu_channel == ipu_channel)
+ return &priv->channels[i];
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_get);
+
+void ipu_dmfc_put(struct dmfc_channel *dmfc)
+{
+ ipu_dmfc_free_bandwidth(dmfc);
+}
+EXPORT_SYMBOL_GPL(ipu_dmfc_put);
+
+int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+ struct clk *ipu_clk)
+{
+ struct ipu_dmfc_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!priv->base)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->ipu = ipu;
+ mutex_init(&priv->mutex);
+
+ ipu->dmfc_priv = priv;
+
+ for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
+ priv->channels[i].priv = priv;
+ priv->channels[i].ipu = ipu;
+ priv->channels[i].data = &dmfcdata[i];
+ }
+
+ writel(0x0, priv->base + DMFC_WR_CHAN);
+ writel(0x0, priv->base + DMFC_DP_CHAN);
+
+ /*
+ * We have a total bandwidth of clkrate * 4pixel divided
+ * into 8 slots.
+ */
+ priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8;
+
+ dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
+ priv->bandwidth_per_slot / 1000000);
+
+ writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
+ writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
+ writel(0x00000003, priv->base + DMFC_GENERAL1);
+
+ return 0;
+}
+
+void ipu_dmfc_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
new file mode 100644
index 000000000000..98686edbcdbb
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <video/imx-ipu-v3.h>
+#include "ipu-prv.h"
+
+#define DP_SYNC 0
+#define DP_ASYNC0 0x60
+#define DP_ASYNC1 0xBC
+
+#define DP_COM_CONF 0x0
+#define DP_GRAPH_WIND_CTRL 0x0004
+#define DP_FG_POS 0x0008
+#define DP_CSC_A_0 0x0044
+#define DP_CSC_A_1 0x0048
+#define DP_CSC_A_2 0x004C
+#define DP_CSC_A_3 0x0050
+#define DP_CSC_0 0x0054
+#define DP_CSC_1 0x0058
+
+#define DP_COM_CONF_FG_EN (1 << 0)
+#define DP_COM_CONF_GWSEL (1 << 1)
+#define DP_COM_CONF_GWAM (1 << 2)
+#define DP_COM_CONF_GWCKE (1 << 3)
+#define DP_COM_CONF_CSC_DEF_MASK (3 << 8)
+#define DP_COM_CONF_CSC_DEF_OFFSET 8
+#define DP_COM_CONF_CSC_DEF_FG (3 << 8)
+#define DP_COM_CONF_CSC_DEF_BG (2 << 8)
+#define DP_COM_CONF_CSC_DEF_BOTH (1 << 8)
+
+#define IPUV3_NUM_FLOWS 3
+
+struct ipu_dp_priv;
+
+struct ipu_dp {
+ u32 flow;
+ bool in_use;
+ bool foreground;
+ enum ipu_color_space in_cs;
+};
+
+struct ipu_flow {
+ struct ipu_dp foreground;
+ struct ipu_dp background;
+ enum ipu_color_space out_cs;
+ void __iomem *base;
+ struct ipu_dp_priv *priv;
+};
+
+struct ipu_dp_priv {
+ struct ipu_soc *ipu;
+ struct device *dev;
+ void __iomem *base;
+ struct ipu_flow flow[IPUV3_NUM_FLOWS];
+ struct mutex mutex;
+ int use_count;
+};
+
+static u32 ipu_dp_flow_base[] = {DP_SYNC, DP_ASYNC0, DP_ASYNC1};
+
+static inline struct ipu_flow *to_flow(struct ipu_dp *dp)
+{
+ if (dp->foreground)
+ return container_of(dp, struct ipu_flow, foreground);
+ else
+ return container_of(dp, struct ipu_flow, background);
+}
+
+int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable,
+ u8 alpha, bool bg_chan)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+ u32 reg;
+
+ mutex_lock(&priv->mutex);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ if (bg_chan)
+ reg &= ~DP_COM_CONF_GWSEL;
+ else
+ reg |= DP_COM_CONF_GWSEL;
+ writel(reg, flow->base + DP_COM_CONF);
+
+ if (enable) {
+ reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL;
+ writel(reg | ((u32) alpha << 24),
+ flow->base + DP_GRAPH_WIND_CTRL);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
+ } else {
+ reg = readl(flow->base + DP_COM_CONF);
+ writel(reg & ~DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
+ }
+
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_set_global_alpha);
+
+int ipu_dp_set_window_pos(struct ipu_dp *dp, u16 x_pos, u16 y_pos)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+
+ writel((x_pos << 16) | y_pos, flow->base + DP_FG_POS);
+
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_set_window_pos);
+
+static void ipu_dp_csc_init(struct ipu_flow *flow,
+ enum ipu_color_space in,
+ enum ipu_color_space out,
+ u32 place)
+{
+ u32 reg;
+
+ reg = readl(flow->base + DP_COM_CONF);
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+
+ if (in == out) {
+ writel(reg, flow->base + DP_COM_CONF);
+ return;
+ }
+
+ if (in == IPUV3_COLORSPACE_RGB && out == IPUV3_COLORSPACE_YUV) {
+ writel(0x099 | (0x12d << 16), flow->base + DP_CSC_A_0);
+ writel(0x03a | (0x3a9 << 16), flow->base + DP_CSC_A_1);
+ writel(0x356 | (0x100 << 16), flow->base + DP_CSC_A_2);
+ writel(0x100 | (0x329 << 16), flow->base + DP_CSC_A_3);
+ writel(0x3d6 | (0x0000 << 16) | (2 << 30),
+ flow->base + DP_CSC_0);
+ writel(0x200 | (2 << 14) | (0x200 << 16) | (2 << 30),
+ flow->base + DP_CSC_1);
+ } else {
+ writel(0x095 | (0x000 << 16), flow->base + DP_CSC_A_0);
+ writel(0x0cc | (0x095 << 16), flow->base + DP_CSC_A_1);
+ writel(0x3ce | (0x398 << 16), flow->base + DP_CSC_A_2);
+ writel(0x095 | (0x0ff << 16), flow->base + DP_CSC_A_3);
+ writel(0x000 | (0x3e42 << 16) | (1 << 30),
+ flow->base + DP_CSC_0);
+ writel(0x10a | (1 << 14) | (0x3dd6 << 16) | (1 << 30),
+ flow->base + DP_CSC_1);
+ }
+
+ reg |= place;
+
+ writel(reg, flow->base + DP_COM_CONF);
+}
+
+int ipu_dp_setup_channel(struct ipu_dp *dp,
+ enum ipu_color_space in,
+ enum ipu_color_space out)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+
+ mutex_lock(&priv->mutex);
+
+ dp->in_cs = in;
+
+ if (!dp->foreground)
+ flow->out_cs = out;
+
+ if (flow->foreground.in_cs == flow->background.in_cs) {
+ /*
+ * foreground and background are of same colorspace, put
+ * colorspace converter after combining unit.
+ */
+ ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
+ DP_COM_CONF_CSC_DEF_BOTH);
+ } else {
+ if (flow->foreground.in_cs == flow->out_cs)
+ /*
+ * foreground identical to output, apply color
+ * conversion on background
+ */
+ ipu_dp_csc_init(flow, flow->background.in_cs,
+ flow->out_cs, DP_COM_CONF_CSC_DEF_BG);
+ else
+ ipu_dp_csc_init(flow, flow->foreground.in_cs,
+ flow->out_cs, DP_COM_CONF_CSC_DEF_FG);
+ }
+
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_setup_channel);
+
+int ipu_dp_enable(struct ipu_soc *ipu)
+{
+ struct ipu_dp_priv *priv = ipu->dp_priv;
+
+ mutex_lock(&priv->mutex);
+
+ if (!priv->use_count)
+ ipu_module_enable(priv->ipu, IPU_CONF_DP_EN);
+
+ priv->use_count++;
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_enable);
+
+int ipu_dp_enable_channel(struct ipu_dp *dp)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+ u32 reg;
+
+ if (!dp->foreground)
+ return 0;
+
+ mutex_lock(&priv->mutex);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ reg |= DP_COM_CONF_FG_EN;
+ writel(reg, flow->base + DP_COM_CONF);
+
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_enable_channel);
+
+void ipu_dp_disable_channel(struct ipu_dp *dp)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+ u32 reg, csc;
+
+ if (!dp->foreground)
+ return;
+
+ mutex_lock(&priv->mutex);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ csc = reg & DP_COM_CONF_CSC_DEF_MASK;
+ if (csc == DP_COM_CONF_CSC_DEF_FG)
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+
+ reg &= ~DP_COM_CONF_FG_EN;
+ writel(reg, flow->base + DP_COM_CONF);
+
+ writel(0, flow->base + DP_FG_POS);
+ ipu_srm_dp_sync_update(priv->ipu);
+
+ if (ipu_idmac_channel_busy(priv->ipu, IPUV3_CHANNEL_MEM_BG_SYNC))
+ ipu_wait_interrupt(priv->ipu, IPU_IRQ_DP_SF_END, 50);
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
+
+void ipu_dp_disable(struct ipu_soc *ipu)
+{
+ struct ipu_dp_priv *priv = ipu->dp_priv;
+
+ mutex_lock(&priv->mutex);
+
+ priv->use_count--;
+
+ if (!priv->use_count)
+ ipu_module_disable(priv->ipu, IPU_CONF_DP_EN);
+
+ if (priv->use_count < 0)
+ priv->use_count = 0;
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dp_disable);
+
+struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
+{
+ struct ipu_dp_priv *priv = ipu->dp_priv;
+ struct ipu_dp *dp;
+
+ if ((flow >> 1) >= IPUV3_NUM_FLOWS)
+ return ERR_PTR(-EINVAL);
+
+ if (flow & 1)
+ dp = &priv->flow[flow >> 1].foreground;
+ else
+ dp = &priv->flow[flow >> 1].background;
+
+ if (dp->in_use)
+ return ERR_PTR(-EBUSY);
+
+ dp->in_use = true;
+
+ return dp;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_get);
+
+void ipu_dp_put(struct ipu_dp *dp)
+{
+ dp->in_use = false;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_put);
+
+int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
+{
+ struct ipu_dp_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+ priv->ipu = ipu;
+
+ ipu->dp_priv = priv;
+
+ priv->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!priv->base)
+ return -ENOMEM;
+
+ mutex_init(&priv->mutex);
+
+ for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+ priv->flow[i].foreground.foreground = true;
+ priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
+ priv->flow[i].priv = priv;
+ }
+
+ return 0;
+}
+
+void ipu_dp_exit(struct ipu_soc *ipu)
+{
+}
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
new file mode 100644
index 000000000000..c93f50ec04f7
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef __IPU_PRV_H__
+#define __IPU_PRV_H__
+
+struct ipu_soc;
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <video/imx-ipu-v3.h>
+
+#define IPUV3_CHANNEL_CSI0 0
+#define IPUV3_CHANNEL_CSI1 1
+#define IPUV3_CHANNEL_CSI2 2
+#define IPUV3_CHANNEL_CSI3 3
+#define IPUV3_CHANNEL_MEM_BG_SYNC 23
+#define IPUV3_CHANNEL_MEM_FG_SYNC 27
+#define IPUV3_CHANNEL_MEM_DC_SYNC 28
+#define IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA 31
+#define IPUV3_CHANNEL_MEM_DC_ASYNC 41
+#define IPUV3_CHANNEL_ROT_ENC_MEM 45
+#define IPUV3_CHANNEL_ROT_VF_MEM 46
+#define IPUV3_CHANNEL_ROT_PP_MEM 47
+#define IPUV3_CHANNEL_ROT_ENC_MEM_OUT 48
+#define IPUV3_CHANNEL_ROT_VF_MEM_OUT 49
+#define IPUV3_CHANNEL_ROT_PP_MEM_OUT 50
+#define IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA 51
+
+#define IPU_MCU_T_DEFAULT 8
+#define IPU_CM_IDMAC_REG_OFS 0x00008000
+#define IPU_CM_IC_REG_OFS 0x00020000
+#define IPU_CM_IRT_REG_OFS 0x00028000
+#define IPU_CM_CSI0_REG_OFS 0x00030000
+#define IPU_CM_CSI1_REG_OFS 0x00038000
+#define IPU_CM_SMFC_REG_OFS 0x00050000
+#define IPU_CM_DC_REG_OFS 0x00058000
+#define IPU_CM_DMFC_REG_OFS 0x00060000
+
+/* Register addresses */
+/* IPU Common registers */
+#define IPU_CM_REG(offset) (offset)
+
+#define IPU_CONF IPU_CM_REG(0)
+
+#define IPU_SRM_PRI1 IPU_CM_REG(0x00a0)
+#define IPU_SRM_PRI2 IPU_CM_REG(0x00a4)
+#define IPU_FS_PROC_FLOW1 IPU_CM_REG(0x00a8)
+#define IPU_FS_PROC_FLOW2 IPU_CM_REG(0x00ac)
+#define IPU_FS_PROC_FLOW3 IPU_CM_REG(0x00b0)
+#define IPU_FS_DISP_FLOW1 IPU_CM_REG(0x00b4)
+#define IPU_FS_DISP_FLOW2 IPU_CM_REG(0x00b8)
+#define IPU_SKIP IPU_CM_REG(0x00bc)
+#define IPU_DISP_ALT_CONF IPU_CM_REG(0x00c0)
+#define IPU_DISP_GEN IPU_CM_REG(0x00c4)
+#define IPU_DISP_ALT1 IPU_CM_REG(0x00c8)
+#define IPU_DISP_ALT2 IPU_CM_REG(0x00cc)
+#define IPU_DISP_ALT3 IPU_CM_REG(0x00d0)
+#define IPU_DISP_ALT4 IPU_CM_REG(0x00d4)
+#define IPU_SNOOP IPU_CM_REG(0x00d8)
+#define IPU_MEM_RST IPU_CM_REG(0x00dc)
+#define IPU_PM IPU_CM_REG(0x00e0)
+#define IPU_GPR IPU_CM_REG(0x00e4)
+#define IPU_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
+#define IPU_ALT_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
+#define IPU_CHA_CUR_BUF(ch) IPU_CM_REG(0x023C + 4 * ((ch) / 32))
+#define IPU_ALT_CUR_BUF0 IPU_CM_REG(0x0244)
+#define IPU_ALT_CUR_BUF1 IPU_CM_REG(0x0248)
+#define IPU_SRM_STAT IPU_CM_REG(0x024C)
+#define IPU_PROC_TASK_STAT IPU_CM_REG(0x0250)
+#define IPU_DISP_TASK_STAT IPU_CM_REG(0x0254)
+#define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0268 + 4 * ((ch) / 32))
+#define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0270 + 4 * ((ch) / 32))
+#define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0278 + 4 * ((ch) / 32))
+#define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0280 + 4 * ((ch) / 32))
+
+#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * (n))
+#define IPU_INT_STAT(n) IPU_CM_REG(0x0200 + 4 * (n))
+
+#define IPU_DI0_COUNTER_RELEASE (1 << 24)
+#define IPU_DI1_COUNTER_RELEASE (1 << 25)
+
+#define IPU_IDMAC_REG(offset) (offset)
+
+#define IDMAC_CONF IPU_IDMAC_REG(0x0000)
+#define IDMAC_CHA_EN(ch) IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
+#define IDMAC_SEP_ALPHA IPU_IDMAC_REG(0x000c)
+#define IDMAC_ALT_SEP_ALPHA IPU_IDMAC_REG(0x0010)
+#define IDMAC_CHA_PRI(ch) IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
+#define IDMAC_WM_EN(ch) IPU_IDMAC_REG(0x001c + 4 * ((ch) / 32))
+#define IDMAC_CH_LOCK_EN_1 IPU_IDMAC_REG(0x0024)
+#define IDMAC_CH_LOCK_EN_2 IPU_IDMAC_REG(0x0028)
+#define IDMAC_SUB_ADDR_0 IPU_IDMAC_REG(0x002c)
+#define IDMAC_SUB_ADDR_1 IPU_IDMAC_REG(0x0030)
+#define IDMAC_SUB_ADDR_2 IPU_IDMAC_REG(0x0034)
+#define IDMAC_BAND_EN(ch) IPU_IDMAC_REG(0x0040 + 4 * ((ch) / 32))
+#define IDMAC_CHA_BUSY(ch) IPU_IDMAC_REG(0x0100 + 4 * ((ch) / 32))
+
+#define IPU_NUM_IRQS (32 * 15)
+
+enum ipu_modules {
+ IPU_CONF_CSI0_EN = (1 << 0),
+ IPU_CONF_CSI1_EN = (1 << 1),
+ IPU_CONF_IC_EN = (1 << 2),
+ IPU_CONF_ROT_EN = (1 << 3),
+ IPU_CONF_ISP_EN = (1 << 4),
+ IPU_CONF_DP_EN = (1 << 5),
+ IPU_CONF_DI0_EN = (1 << 6),
+ IPU_CONF_DI1_EN = (1 << 7),
+ IPU_CONF_SMFC_EN = (1 << 8),
+ IPU_CONF_DC_EN = (1 << 9),
+ IPU_CONF_DMFC_EN = (1 << 10),
+
+ IPU_CONF_VDI_EN = (1 << 12),
+
+ IPU_CONF_IDMAC_DIS = (1 << 22),
+
+ IPU_CONF_IC_DMFC_SEL = (1 << 25),
+ IPU_CONF_IC_DMFC_SYNC = (1 << 26),
+ IPU_CONF_VDI_DMFC_SYNC = (1 << 27),
+
+ IPU_CONF_CSI0_DATA_SOURCE = (1 << 28),
+ IPU_CONF_CSI1_DATA_SOURCE = (1 << 29),
+ IPU_CONF_IC_INPUT = (1 << 30),
+ IPU_CONF_CSI_SEL = (1 << 31),
+};
+
+struct ipuv3_channel {
+ unsigned int num;
+
+ bool enabled;
+ bool busy;
+
+ struct ipu_soc *ipu;
+};
+
+struct ipu_dc_priv;
+struct ipu_dmfc_priv;
+struct ipu_di;
+struct ipu_smfc_priv;
+
+struct ipu_devtype;
+
+struct ipu_soc {
+ struct device *dev;
+ const struct ipu_devtype *devtype;
+ enum ipuv3_type ipu_type;
+ spinlock_t lock;
+ struct mutex channel_lock;
+
+ void __iomem *cm_reg;
+ void __iomem *idmac_reg;
+ struct ipu_ch_param __iomem *cpmem_base;
+
+ int usecount;
+
+ struct clk *clk;
+
+ struct ipuv3_channel channel[64];
+
+ int irq_sync;
+ int irq_err;
+ struct irq_domain *domain;
+
+ struct ipu_dc_priv *dc_priv;
+ struct ipu_dp_priv *dp_priv;
+ struct ipu_dmfc_priv *dmfc_priv;
+ struct ipu_di *di_priv[2];
+ struct ipu_smfc_priv *smfc_priv;
+};
+
+void ipu_srm_dp_sync_update(struct ipu_soc *ipu);
+
+int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
+int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
+
+bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
+int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms);
+
+int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
+ unsigned long base, u32 module, struct clk *ipu_clk);
+void ipu_di_exit(struct ipu_soc *ipu, int id);
+
+int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+ struct clk *ipu_clk);
+void ipu_dmfc_exit(struct ipu_soc *ipu);
+
+int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+void ipu_dp_exit(struct ipu_soc *ipu);
+
+int ipu_dc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
+ unsigned long template_base);
+void ipu_dc_exit(struct ipu_soc *ipu);
+
+int ipu_cpmem_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+void ipu_cpmem_exit(struct ipu_soc *ipu);
+
+int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
+void ipu_smfc_exit(struct ipu_soc *ipu);
+
+#endif /* __IPU_PRV_H__ */
diff --git a/drivers/gpu/ipu-v3/ipu-smfc.c b/drivers/gpu/ipu-v3/ipu-smfc.c
new file mode 100644
index 000000000000..e4f85ad286fc
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-smfc.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#define DEBUG
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <video/imx-ipu-v3.h>
+
+#include "ipu-prv.h"
+
+struct ipu_smfc_priv {
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+/*SMFC Registers */
+#define SMFC_MAP 0x0000
+#define SMFC_WMC 0x0004
+#define SMFC_BS 0x0008
+
+int ipu_smfc_set_burstsize(struct ipu_soc *ipu, int channel, int burstsize)
+{
+ struct ipu_smfc_priv *smfc = ipu->smfc_priv;
+ unsigned long flags;
+ u32 val, shift;
+
+ spin_lock_irqsave(&smfc->lock, flags);
+
+ shift = channel * 4;
+ val = readl(smfc->base + SMFC_BS);
+ val &= ~(0xf << shift);
+ val |= burstsize << shift;
+ writel(val, smfc->base + SMFC_BS);
+
+ spin_unlock_irqrestore(&smfc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_set_burstsize);
+
+int ipu_smfc_map_channel(struct ipu_soc *ipu, int channel, int csi_id, int mipi_id)
+{
+ struct ipu_smfc_priv *smfc = ipu->smfc_priv;
+ unsigned long flags;
+ u32 val, shift;
+
+ spin_lock_irqsave(&smfc->lock, flags);
+
+ shift = channel * 3;
+ val = readl(smfc->base + SMFC_MAP);
+ val &= ~(0x7 << shift);
+ val |= ((csi_id << 2) | mipi_id) << shift;
+ writel(val, smfc->base + SMFC_MAP);
+
+ spin_unlock_irqrestore(&smfc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_smfc_map_channel);
+
+int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev,
+ unsigned long base)
+{
+ struct ipu_smfc_priv *smfc;
+
+ smfc = devm_kzalloc(dev, sizeof(*smfc), GFP_KERNEL);
+ if (!smfc)
+ return -ENOMEM;
+
+ ipu->smfc_priv = smfc;
+ spin_lock_init(&smfc->lock);
+
+ smfc->base = devm_ioremap(dev, base, PAGE_SIZE);
+ if (!smfc->base)
+ return -ENOMEM;
+
+ pr_debug("%s: ioremap 0x%08lx -> %p\n", __func__, base, smfc->base);
+
+ return 0;
+}
+
+void ipu_smfc_exit(struct ipu_soc *ipu)
+{
+}